瀏覽代碼

HDFS-16131. Show storage type for failed volumes on namenode web (#3211). Contributed by tomscut.

Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
litao 3 年之前
父節點
當前提交
5d765497c5

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -723,7 +723,7 @@ public class DataNode extends ReconfigurableBase
         for (Iterator<StorageLocation> newLocationItr =
              results.newLocations.iterator(); newLocationItr.hasNext();) {
           StorageLocation newLocation = newLocationItr.next();
-          if (newLocation.getNormalizedUri().toString().equals(
+          if (newLocation.toString().equals(
               failedStorageLocation)) {
             // The failed storage is being re-added. DataNode#refreshVolumes()
             // will take care of re-assessing it.

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -728,7 +728,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         infos.length);
     for (VolumeFailureInfo info: infos) {
       failedStorageLocations.add(
-          info.getFailedStorageLocation().getNormalizedUri().toString());
+          info.getFailedStorageLocation().toString());
     }
     return failedStorageLocations.toArray(
         new String[failedStorageLocations.size()]);
@@ -767,7 +767,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     long estimatedCapacityLostTotal = 0;
     for (VolumeFailureInfo info: infos) {
       failedStorageLocations.add(
-          info.getFailedStorageLocation().getNormalizedUri().toString());
+          info.getFailedStorageLocation().toString());
       long failureDate = info.getFailureDate();
       if (failureDate > lastVolumeFailureDate) {
         lastVolumeFailureDate = failureDate;

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -215,6 +215,10 @@ public class TestDataNodeVolumeFailure {
     BlockManagerTestUtil.checkHeartbeat(bm);
     // NN now should have latest volume failure
     assertEquals(1, cluster.getNamesystem().getVolumeFailuresTotal());
+    // assert failedStorageLocations
+    assertTrue(dn.getFSDataset().getVolumeFailureSummary()
+        .getFailedStorageLocations()[0]
+        .contains("[DISK]"));
 
     // verify number of blocks and files...
     verify(filename, filesize);

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java

@@ -721,7 +721,10 @@ public class TestDataNodeVolumeFailureReporting {
     String[] absolutePaths = new String[locations.length];
     for (int count = 0; count < locations.length; count++) {
       try {
-        absolutePaths[count] = new File(new URI(locations[count]))
+        String location = locations[count];
+        location = location.contains("]")
+            ? location.substring(location.indexOf("]") + 1) : location;
+        absolutePaths[count] = new File(new URI(location))
             .getAbsolutePath();
       } catch (URISyntaxException e) {
         //if the provided location is not an URI,