Explorar el Código

HDDS-1653. Add option to "ozone scmcli printTopology" to order the output acccording to topology layer. Contributed by Xiaoyu Yao. (#1067)

* HDDS-1653. Add option to "ozone scmcli printTopology" to order the output acccording to topology layer. Contributed by Xiaoyu Yao.

* use ip/hostname instead of network name for -o output and add smoke test
Xiaoyu Yao hace 5 años
padre
commit
4e66cb9333

+ 52 - 8
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java

@@ -19,9 +19,11 @@
 package org.apache.hadoop.hdds.scm.cli;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import picocli.CommandLine;
+
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING;
@@ -29,7 +31,11 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
 
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.TreeSet;
 import java.util.concurrent.Callable;
 
 /**
@@ -55,6 +61,10 @@ public class TopologySubcommand implements Callable<Void> {
     stateArray.add(DECOMMISSIONED);
   }
 
+  @CommandLine.Option(names = {"-o", "--order"},
+      description = "Print Topology ordered by network location")
+  private boolean order;
+
   @Override
   public Void call() throws Exception {
     try (ScmClient scmClient = parent.createScmClient()) {
@@ -64,17 +74,51 @@ public class TopologySubcommand implements Callable<Void> {
         if (nodes != null && nodes.size() > 0) {
           // show node state
           System.out.println("State = " + state.toString());
-          // format "hostname/ipAddress    networkLocation"
-          nodes.forEach(node -> {
-            System.out.print(node.getNodeID().getHostName() + "/" +
-                node.getNodeID().getIpAddress());
-            System.out.println("    " +
-                (node.getNodeID().getNetworkLocation() != null ?
-                    node.getNodeID().getNetworkLocation() : "NA"));
-          });
+          if (order) {
+            printOrderedByLocation(nodes);
+          } else {
+            printNodesWithLocation(nodes);
+          }
         }
       }
       return null;
     }
   }
+
+  // Format
+  // Location: rack1
+  //  ipAddress(hostName)
+  private void printOrderedByLocation(List<HddsProtos.Node> nodes) {
+    HashMap<String, TreeSet<DatanodeDetails>> tree =
+        new HashMap<>();
+    for (HddsProtos.Node node : nodes) {
+      String location = node.getNodeID().getNetworkLocation();
+      if (location != null && !tree.containsKey(location)) {
+        tree.put(location, new TreeSet<>());
+      }
+      tree.get(location).add(DatanodeDetails.getFromProtoBuf(node.getNodeID()));
+    }
+    ArrayList<String> locations = new ArrayList<>(tree.keySet());
+    Collections.sort(locations);
+
+    locations.forEach(location -> {
+      System.out.println("Location: " + location);
+      tree.get(location).forEach(node -> {
+        System.out.println(" " + node.getIpAddress() + "(" + node.getHostName()
+            + ")");
+      });
+    });
+  }
+
+
+  // Format "ipAddress(hostName)    networkLocation"
+  private void printNodesWithLocation(Collection<HddsProtos.Node> nodes) {
+    nodes.forEach(node -> {
+      System.out.print(" " + node.getNodeID().getIpAddress() + "(" +
+          node.getNodeID().getHostName() + ")");
+      System.out.println("    " +
+          (node.getNodeID().getNetworkLocation() != null ?
+              node.getNodeID().getNetworkLocation() : "NA"));
+    });
+  }
 }

+ 2 - 0
hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh

@@ -30,6 +30,8 @@ execute_robot_test om auditparser
 
 execute_robot_test scm basic/basic.robot
 
+execute_robot_test scm topology/scmcli.robot
+
 stop_docker_env
 
 generate_report

+ 32 - 0
hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot

@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoketest ozone cluster startup
+Library             OperatingSystem
+Library             BuiltIn
+Resource            ../commonlib.robot
+
+*** Variables ***
+
+
+*** Test Cases ***
+Run printTopology
+    ${output} =         Execute          ozone scmcli printTopology
+                        Should contain   ${output}         10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net)    /rack2
+Run printTopology -o
+    ${output} =         Execute          ozone scmcli printTopology -o
+                        Should contain   ${output}         Location: /rack2
+                        Should contain   ${output}         10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net)