Bläddra i källkod

HDFS-17043. HttpFS implementation for getAllErasureCodingPolicies (#5734). Contributed by Hualong Zhang.

Reviewed-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
zhtttylz 1 år sedan
förälder
incheckning
9a7d1b49e2

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.type.MapType;
@@ -284,6 +285,7 @@ public class HttpFSFileSystem extends FileSystem
         HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT), GETSNAPSHOTDIFFLISTING(HTTP_GET),
     GETFILELINKSTATUS(HTTP_GET),
     GETSTATUS(HTTP_GET),
+    GETECPOLICIES(HTTP_GET),
     GET_BLOCK_LOCATIONS(HTTP_GET);
 
     private String httpMethod;
@@ -1773,6 +1775,17 @@ public class HttpFSFileSystem extends FileSystem
     return JsonUtilClient.toFsStatus(json);
   }
 
+  public Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies() throws IOException {
+    Map<String, String> params = new HashMap<>();
+    params.put(OP_PARAM, Operation.GETECPOLICIES.toString());
+    Path path = new Path(getUri().toString(), "/");
+    HttpURLConnection conn =
+        getConnection(Operation.GETECPOLICIES.getMethod(), params, path, false);
+    HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return JsonUtilClient.getAllErasureCodingPolicies(json);
+  }
+
   @VisibleForTesting
   static BlockLocation[] toBlockLocations(JSONObject json) throws IOException {
     ObjectMapper mapper = new ObjectMapper();

+ 27 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -2342,4 +2343,30 @@ public final class FSOperations {
       return toJson(fsStatus);
     }
   }
+
+  /**
+   * Executor that performs a FSGetErasureCodingPolicies operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSGetErasureCodingPolicies
+      implements FileSystemAccess.FileSystemExecutor<String> {
+
+    public FSGetErasureCodingPolicies() {
+    }
+
+    @Override
+    public String execute(FileSystem fs) throws IOException {
+      Collection<ErasureCodingPolicyInfo> ecPolicyInfos = null;
+      if (fs instanceof DistributedFileSystem) {
+        DistributedFileSystem dfs = (DistributedFileSystem) fs;
+        ecPolicyInfos = dfs.getAllErasureCodingPolicies();
+      } else {
+        throw new UnsupportedOperationException("getErasureCodingPolicies is " +
+            "not supported for HttpFs on " + fs.getClass() +
+            ". Please check your fs.defaultFS configuration");
+      }
+      HttpFSServerWebApp.get().getMetrics().incrOpsAllECPolicies();
+      return JsonUtil.toJsonString(ecPolicyInfos.stream().toArray(ErasureCodingPolicyInfo[]::new));
+    }
+  }
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java

@@ -130,6 +130,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
     PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {});
     PARAMS_DEF.put(Operation.GETFILELINKSTATUS, new Class[]{});
     PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{});
+    PARAMS_DEF.put(Operation.GETECPOLICIES, new Class[]{});
     PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class});
   }
 

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java

@@ -526,6 +526,14 @@ public class HttpFSServer {
       response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
       break;
     }
+    case GETECPOLICIES: {
+      FSOperations.FSGetErasureCodingPolicies command =
+          new FSOperations.FSGetErasureCodingPolicies();
+      String js = fsExecute(user, command);
+      AUDIT_LOG.info("[{}]", path);
+      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+      break;
+    }
     case GET_BLOCK_LOCATIONS: {
       long offset = 0;
       long len = Long.MAX_VALUE;

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java

@@ -65,6 +65,7 @@ public class HttpFSServerMetrics {
   private @Metric MutableCounterLong opsStat;
   private @Metric MutableCounterLong opsCheckAccess;
   private @Metric MutableCounterLong opsStatus;
+  private @Metric MutableCounterLong opsAllECPolicies;
 
   private final MetricsRegistry registry = new MetricsRegistry("httpfsserver");
   private final String name;
@@ -165,4 +166,8 @@ public class HttpFSServerMetrics {
   public void incrOpsStatus() {
     opsStatus.incr();
   }
+
+  public void incrOpsAllECPolicies() {
+    opsAllECPolicies.incr();
+  }
 }

+ 40 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java

@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -1217,7 +1218,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST,
     GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY,
     SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS,
-    GETFILELINKSTATUS, GETSTATUS
+    GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES
   }
 
   private void operation(Operation op) throws Exception {
@@ -1366,8 +1367,10 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     case GETSTATUS:
       testGetStatus();
       break;
+    case GETECPOLICIES:
+      testGetAllEEPolicies();
+      break;
     }
-
   }
 
   @Parameterized.Parameters
@@ -2111,6 +2114,41 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     }
   }
 
+  private void testGetAllEEPolicies() throws Exception {
+    if (isLocalFS()) {
+      // do not test the getAllEEPolicies for local FS.
+      return;
+    }
+    final Path path = new Path("/foo");
+    FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf());
+    if (fs instanceof DistributedFileSystem) {
+      DistributedFileSystem dfs =
+          (DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf());
+      FileSystem httpFs = this.getHttpFSFileSystem();
+
+      Collection<ErasureCodingPolicyInfo> dfsAllErasureCodingPolicies =
+          dfs.getAllErasureCodingPolicies();
+      Collection<ErasureCodingPolicyInfo> diffErasureCodingPolicies = null;
+
+      if (httpFs instanceof HttpFSFileSystem) {
+        HttpFSFileSystem httpFS = (HttpFSFileSystem) httpFs;
+        diffErasureCodingPolicies = httpFS.getAllErasureCodingPolicies();
+      } else if (httpFs instanceof WebHdfsFileSystem) {
+        WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) httpFs;
+        diffErasureCodingPolicies = webHdfsFileSystem.getAllErasureCodingPolicies();
+      } else {
+        Assert.fail(fs.getClass().getSimpleName() +
+            " is not of type HttpFSFileSystem or WebHdfsFileSystem");
+      }
+
+      //Validate erasureCodingPolicyInfos are the same as DistributedFileSystem
+      assertEquals(dfsAllErasureCodingPolicies.size(), diffErasureCodingPolicies.size());
+      assertTrue(dfsAllErasureCodingPolicies.containsAll(diffErasureCodingPolicies));
+    } else {
+      Assert.fail(fs.getClass().getSimpleName() + " is not of type DistributedFileSystem.");
+    }
+  }
+
   private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing,
       SnapshotDiffReportListing dfsDiffReportListing) {
     Assert.assertEquals(diffReportListing.getCreateList().size(),