Переглянути джерело

HDFS-17735. [ARR] LocalResolver#getDatanodesSubcluster adapts to async rpc. (#7422). Contributed by hfutatzhanghb.

Reviewed-by: Jian Zhang <keepromise@apache.org>
hfutatzhanghb 1 місяць тому
батько
коміт
f910b11614

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java

@@ -118,4 +118,9 @@ public class MultipleDestinationMountTableResolver extends MountTableResolver {
   public void addResolver(DestinationOrder order, OrderedResolver resolver) {
     orderedResolvers.put(order, resolver);
   }
-}
+
+  @VisibleForTesting
+  public OrderedResolver getOrderedResolver(DestinationOrder order) {
+    return orderedResolvers.get(order);
+  }
+}

+ 13 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java

@@ -45,6 +45,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
 
+import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
 
 /**
  * The local subcluster (where the writer is) should be tried first. The writer
@@ -124,9 +125,9 @@ public class LocalResolver extends RouterResolver<String, String> {
    * needs to be done as a privileged action to use the user for the Router and
    * not the one from the client in the RPC call.
    *
-   * @return DN IP -> Subcluster.
+   * @return DN IP -&gt; Subcluster.
    */
-  private Map<String, String> getDatanodesSubcluster() {
+  public Map<String, String> getDatanodesSubcluster() {
 
     final RouterRpcServer rpcServer = getRpcServer();
     if (rpcServer == null) {
@@ -143,9 +144,16 @@ public class LocalResolver extends RouterResolver<String, String> {
             @Override
             public Map<String, DatanodeStorageReport[]> run() {
               try {
-                return rpcServer.getDatanodeStorageReportMap(
-                    DatanodeReportType.ALL);
-              } catch (IOException e) {
+                Map<String, DatanodeStorageReport[]> result;
+                if (rpcServer.isAsync()) {
+                  rpcServer.getDatanodeStorageReportMapAsync(DatanodeReportType.ALL);
+                  result = syncReturn(Map.class);
+                } else {
+                  result = rpcServer.getDatanodeStorageReportMap(
+                      DatanodeReportType.ALL);
+                }
+                return result;
+              } catch (Exception e) {
                 LOG.error("Cannot get the datanodes from the RPC server", e);
                 return null;
               }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/AsyncUtil.java

@@ -127,7 +127,7 @@ public final class AsyncUtil {
     try {
       return (R) completableFuture.get();
     } catch (ExecutionException e) {
-      throw (Exception)e.getCause();
+      throw (Exception) e.getCause();
     }
   }
 

+ 52 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRPCMultipleDestinationMountTableResolver.java

@@ -25,16 +25,22 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.LocalResolver;
 import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
+import org.apache.hadoop.hdfs.server.federation.router.RouterClient;
 import org.apache.hadoop.hdfs.server.federation.router.RouterClientProtocol;
 import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterRPCMultipleDestinationMountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -52,14 +58,21 @@ import static org.junit.Assert.assertTrue;
 public class TestRouterAsyncRPCMultipleDestinationMountTableResolver extends
     TestRouterRPCMultipleDestinationMountTableResolver {
 
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterAsyncRPCMultipleDestinationMountTableResolver.class);
+
   @BeforeClass
   public static void setUp() throws Exception {
 
     // Build and start a federated cluster.
     cluster = new StateStoreDFSCluster(false, 3,
         MultipleDestinationMountTableResolver.class);
-    Configuration routerConf =
-        new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
+    Configuration routerConf = new RouterConfigBuilder()
+        .stateStore()
+        .admin()
+        .quota()
+        .rpc()
+        .build();
     routerConf.setBoolean(DFS_ROUTER_ASYNC_RPC_ENABLE_KEY, true);
 
     Configuration hdfsConf = new Configuration(false);
@@ -84,6 +97,43 @@ public class TestRouterAsyncRPCMultipleDestinationMountTableResolver extends
     rpcServer =routerContext.getRouter().getRpcServer();
   }
 
+  @Test
+  public void testLocalResolverGetDatanodesSubcluster() throws IOException {
+    String testPath = "/testLocalResolverGetDatanodesSubcluster";
+    Path path = new Path(testPath);
+    Map<String, String> destMap = new HashMap<>();
+    destMap.put("ns0", testPath);
+    destMap.put("ns1", testPath);
+    nnFs0.mkdirs(path);
+    nnFs1.mkdirs(path);
+    MountTable addEntry =
+        MountTable.newInstance(testPath, destMap);
+    addEntry.setQuota(new RouterQuotaUsage.Builder().build());
+    addEntry.setDestOrder(DestinationOrder.LOCAL);
+    assertTrue(addMountTable(addEntry));
+
+    Map<String, String> datanodesSubcluster = null;
+    try {
+      MultipleDestinationMountTableResolver resolver =
+          (MultipleDestinationMountTableResolver) routerContext.getRouter().getSubclusterResolver();
+      LocalResolver localResolver =
+          (LocalResolver) resolver.getOrderedResolver(DestinationOrder.LOCAL);
+      datanodesSubcluster = localResolver.getDatanodesSubcluster();
+    } catch (Exception e) {
+      LOG.info("Exception occurs when testLocalResolverGetDatanodesSubcluster.", e);
+    } finally {
+      RouterClient client = routerContext.getAdminClient();
+      MountTableManager mountTableManager = client.getMountTableManager();
+      RemoveMountTableEntryRequest req2 =
+          RemoveMountTableEntryRequest.newInstance(testPath);
+      mountTableManager.removeMountTableEntry(req2);
+      nnFs0.delete(new Path(testPath), true);
+      nnFs1.delete(new Path(testPath), true);
+    }
+    assertNotNull(datanodesSubcluster);
+    assertFalse(datanodesSubcluster.isEmpty());
+  }
+
   @Override
   @Test
   public void testInvokeAtAvailableNs() throws IOException {