فهرست منبع

HDFS-9038. DFS reserved space is erroneously counted towards non-DFS used. (Brahma Reddy Battula)

Arpit Agarwal 8 سال پیش
والد
کامیت
607801b2ff
18فایلهای تغییر یافته به همراه218 افزوده شده و 43 حذف شده
  1. 29 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  2. 27 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  3. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  4. 7 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
  5. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  6. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  7. 34 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
  8. 9 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
  9. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
  10. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  11. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
  12. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  13. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
  14. 39 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
  15. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  16. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
  17. 31 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
  18. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

+ 29 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -44,6 +44,7 @@ import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
 public class DatanodeInfo extends DatanodeID implements Node {
   private long capacity;
   private long dfsUsed;
+  private long nonDfsUsed;
   private long remaining;
   private long blockPoolUsed;
   private long cacheCapacity;
@@ -87,6 +88,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     super(from);
     this.capacity = from.getCapacity();
     this.dfsUsed = from.getDfsUsed();
+    this.nonDfsUsed = from.getNonDfsUsed();
     this.remaining = from.getRemaining();
     this.blockPoolUsed = from.getBlockPoolUsed();
     this.cacheCapacity = from.getCacheCapacity();
@@ -102,6 +104,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     super(nodeID);
     this.capacity = 0L;
     this.dfsUsed = 0L;
+    this.nonDfsUsed = 0L;
     this.remaining = 0L;
     this.blockPoolUsed = 0L;
     this.cacheCapacity = 0L;
@@ -138,10 +141,25 @@ public class DatanodeInfo extends DatanodeID implements Node {
       final long lastUpdate, final long lastUpdateMonotonic,
       final int xceiverCount, final String networkLocation,
       final AdminStates adminState) {
-    super(ipAddr, hostName, datanodeUuid, xferPort, infoPort,
-            infoSecurePort, ipcPort);
+    this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
+        ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed, cacheCapacity,
+        cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
+        networkLocation, adminState);
+  }
+  /** Constructor. */
+  public DatanodeInfo(final String ipAddr, final String hostName,
+     final String datanodeUuid, final int xferPort, final int infoPort,
+     final int infoSecurePort, final int ipcPort, final long capacity,
+     final long dfsUsed, final long nonDfsUsed, final long remaining,
+     final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
+     final long lastUpdate, final long lastUpdateMonotonic,
+     final int xceiverCount, final String networkLocation,
+     final AdminStates adminState) {
+      super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
+                ipcPort);
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
+    this.nonDfsUsed = nonDfsUsed;
     this.remaining = remaining;
     this.blockPoolUsed = blockPoolUsed;
     this.cacheCapacity = cacheCapacity;
@@ -169,9 +187,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public long getBlockPoolUsed() { return blockPoolUsed; }
 
   /** The used space by the data node. */
-  public long getNonDfsUsed() { 
-    long nonDFSUsed = capacity - dfsUsed - remaining;
-    return nonDFSUsed < 0 ? 0 : nonDFSUsed;
+  public long getNonDfsUsed() {
+    return nonDfsUsed;
   }
 
   /** The used space by the data node as percentage of present capacity */
@@ -261,6 +278,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.dfsUsed = dfsUsed;
   }
 
+  /**
+   * Sets the nondfs-used space for the datanode.
+   */
+  public void setNonDfsUsed(long nonDfsUsed) {
+    this.nonDfsUsed = nonDfsUsed;
+  }
+
   /** Sets raw free space. */
   public void setRemaining(long remaining) { 
     this.remaining = remaining; 

+ 27 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -640,14 +640,23 @@ public class PBHelper {
   }
   
   static public DatanodeInfo convert(DatanodeInfoProto di) {
-    if (di == null) return null;
-    return new DatanodeInfo(
-        PBHelper.convert(di.getId()),
-        di.hasLocation() ? di.getLocation() : null , 
-        di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
-        di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(),
-        di.getLastUpdate(), di.getLastUpdateMonotonic(),
-        di.getXceiverCount(), PBHelper.convert(di.getAdminState()));
+    if (di == null) {
+      return null;
+    }
+    DatanodeInfo dinfo = new DatanodeInfo(PBHelper.convert(di.getId()),
+        di.hasLocation() ? di.getLocation() : null, di.getCapacity(),
+        di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(),
+        di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(),
+        di.getLastUpdateMonotonic(), di.getXceiverCount(),
+        PBHelper.convert(di.getAdminState()));
+    if (di.hasNonDfsUsed()) {
+      dinfo.setNonDfsUsed(di.getNonDfsUsed());
+    } else {
+      // use the legacy way for older datanodes
+      long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
+      dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
+    }
+    return dinfo;
   }
   
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
@@ -703,6 +712,7 @@ public class PBHelper {
         .setId(PBHelper.convert((DatanodeID)info))
         .setCapacity(info.getCapacity())
         .setDfsUsed(info.getDfsUsed())
+        .setNonDfsUsed(info.getNonDfsUsed())
         .setRemaining(info.getRemaining())
         .setBlockPoolUsed(info.getBlockPoolUsed())
         .setCacheCapacity(info.getCacheCapacity())
@@ -1909,17 +1919,19 @@ public class PBHelper {
         .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
         .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
         .setStorageUuid(r.getStorage().getStorageID())
-        .setStorage(convert(r.getStorage()));
+        .setStorage(convert(r.getStorage()))
+        .setNonDfsUsed(r.getNonDfsUsed());
     return builder.build();
   }
 
   public static StorageReport convert(StorageReportProto p) {
-    return new StorageReport(
-        p.hasStorage() ?
-            convert(p.getStorage()) :
-            new DatanodeStorage(p.getStorageUuid()),
-        p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
-        p.getBlockPoolUsed());
+    long nonDfsUsed = p.hasNonDfsUsed() ?
+        p.getNonDfsUsed() :
+        p.getCapacity() - p.getDfsUsed() - p.getRemaining();
+    return new StorageReport(p.hasStorage() ?
+        convert(p.getStorage()) :
+        new DatanodeStorage(p.getStorageUuid()), p.getFailed(), p.getCapacity(),
+        p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed(), nonDfsUsed);
   }
 
   public static StorageReport[] convertStorageReports(

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -416,6 +416,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
     long totalRemaining = 0;
     long totalBlockPoolUsed = 0;
     long totalDfsUsed = 0;
+    long totalNonDfsUsed = 0;
     Set<DatanodeStorageInfo> failedStorageInfos = null;
 
     // Decide if we should check for any missing StorageReport and mark it as
@@ -472,6 +473,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       totalRemaining += report.getRemaining();
       totalBlockPoolUsed += report.getBlockPoolUsed();
       totalDfsUsed += report.getDfsUsed();
+      totalNonDfsUsed += report.getNonDfsUsed();
     }
     rollBlocksScheduled(getLastUpdateMonotonic());
 
@@ -480,6 +482,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
     setRemaining(totalRemaining);
     setBlockPoolUsed(totalBlockPoolUsed);
     setDfsUsed(totalDfsUsed);
+    setNonDfsUsed(totalNonDfsUsed);
     if (checkFailedStorages) {
       updateFailedStorage(failedStorageInfos);
     }

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java

@@ -109,6 +109,7 @@ public class DatanodeStorageInfo {
 
   private long capacity;
   private long dfsUsed;
+  private long nonDfsUsed;
   private volatile long remaining;
   private long blockPoolUsed;
 
@@ -218,6 +219,9 @@ public class DatanodeStorageInfo {
     return dfsUsed;
   }
 
+  long getNonDfsUsed() {
+    return nonDfsUsed;
+  }
   long getRemaining() {
     return remaining;
   }
@@ -291,6 +295,7 @@ public class DatanodeStorageInfo {
   void updateState(StorageReport r) {
     capacity = r.getCapacity();
     dfsUsed = r.getDfsUsed();
+    nonDfsUsed = r.getNonDfsUsed();
     remaining = r.getRemaining();
     blockPoolUsed = r.getBlockPoolUsed();
   }
@@ -328,9 +333,8 @@ public class DatanodeStorageInfo {
   }
   
   StorageReport toStorageReport() {
-    return new StorageReport(
-        new DatanodeStorage(storageID, state, storageType),
-        false, capacity, dfsUsed, remaining, blockPoolUsed);
+    return new StorageReport(new DatanodeStorage(storageID, state, storageType),
+        false, capacity, dfsUsed, remaining, blockPoolUsed, nonDfsUsed);
   }
 
   static Iterable<StorageType> toStorageTypes(

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -141,9 +141,7 @@ class HeartbeatManager implements DatanodeStatistics {
 
   @Override
   public synchronized long getCapacityUsedNonDFS() {
-    final long nonDFSUsed = stats.capacityTotal
-        - stats.capacityRemaining - stats.capacityUsed;
-    return nonDFSUsed < 0L? 0L : nonDFSUsed;
+    return stats.capacityUsedNonDfs;
   }
 
   @Override
@@ -394,6 +392,7 @@ class HeartbeatManager implements DatanodeStatistics {
   private static class Stats {
     private long capacityTotal = 0L;
     private long capacityUsed = 0L;
+    private long capacityUsedNonDfs = 0L;
     private long capacityRemaining = 0L;
     private long blockPoolUsed = 0L;
     private int xceiverCount = 0;
@@ -407,6 +406,7 @@ class HeartbeatManager implements DatanodeStatistics {
 
     private void add(final DatanodeDescriptor node) {
       capacityUsed += node.getDfsUsed();
+      capacityUsedNonDfs += node.getNonDfsUsed();
       blockPoolUsed += node.getBlockPoolUsed();
       xceiverCount += node.getXceiverCount();
       if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
@@ -423,6 +423,7 @@ class HeartbeatManager implements DatanodeStatistics {
 
     private void subtract(final DatanodeDescriptor node) {
       capacityUsed -= node.getDfsUsed();
+      capacityUsedNonDfs -= node.getNonDfsUsed();
       blockPoolUsed -= node.getBlockPoolUsed();
       xceiverCount -= node.getXceiverCount();
       if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -160,7 +160,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
               volume.getCapacity(),
               volume.getDfsUsed(),
               volume.getAvailable(),
-              volume.getBlockPoolUsed(bpid));
+              volume.getBlockPoolUsed(bpid),
+              volume.getNonDfsUsed());
           reports.add(sr);
         } catch (ClosedChannelException e) {
           continue;

+ 34 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -345,14 +345,46 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   @Override
   public long getAvailable() throws IOException {
-    long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get();
-    long available = usage.getAvailable() - reserved - reservedForRbw.get();
+    long remaining = getCapacity() - getDfsUsed() - getReservedForRbw();
+    long available =
+        usage.getAvailable() - getRemainingReserved() - getReservedForRbw();
     if (remaining > available) {
       remaining = available;
     }
     return (remaining > 0) ? remaining : 0;
   }
 
+  long getActualNonDfsUsed() throws IOException {
+    return usage.getUsed() - getDfsUsed();
+  }
+
+  private long getRemainingReserved() throws IOException {
+    long actualNonDfsUsed = getActualNonDfsUsed();
+    if (actualNonDfsUsed < reserved) {
+      return reserved - actualNonDfsUsed;
+    }
+    return 0L;
+  }
+
+  /**
+   * Unplanned Non-DFS usage, i.e. Extra usage beyond reserved.
+   *
+   * @return
+   * @throws IOException
+   */
+  public long getNonDfsUsed() throws IOException {
+    long actualNonDfsUsed = getActualNonDfsUsed();
+    if (actualNonDfsUsed < reserved) {
+      return 0L;
+    }
+    return actualNonDfsUsed - reserved;
+  }
+
+  @VisibleForTesting
+  long getDfAvailable() {
+    return usage.getAvailable();
+  }
+
   @VisibleForTesting
   public long getReservedForRbw() {
     return reservedForRbw.get();

+ 9 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java

@@ -25,17 +25,19 @@ public class StorageReport {
   private final boolean failed;
   private final long capacity;
   private final long dfsUsed;
+  private final long nonDfsUsed;
   private final long remaining;
   private final long blockPoolUsed;
 
   public static final StorageReport[] EMPTY_ARRAY = {};
-  
-  public StorageReport(DatanodeStorage storage, boolean failed,
-      long capacity, long dfsUsed, long remaining, long bpUsed) {
+
+  public StorageReport(DatanodeStorage storage, boolean failed, long capacity,
+      long dfsUsed, long remaining, long bpUsed, long nonDfsUsed) {
     this.storage = storage;
     this.failed = failed;
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
+    this.nonDfsUsed = nonDfsUsed;
     this.remaining = remaining;
     this.blockPoolUsed = bpUsed;
   }
@@ -56,6 +58,10 @@ public class StorageReport {
     return dfsUsed;
   }
 
+  public long getNonDfsUsed() {
+    return nonDfsUsed;
+  }
+
   public long getRemaining() {
     return remaining;
   }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -88,6 +88,7 @@ message DatanodeInfoProto {
   optional uint64 lastUpdate = 6 [default = 0];
   optional uint32 xceiverCount = 7 [default = 0];
   optional string location = 8;
+  optional uint64 nonDfsUsed = 9;
   enum AdminState {
     NORMAL = 0;
     DECOMMISSION_INPROGRESS = 1;
@@ -122,6 +123,7 @@ message StorageReportProto {
   optional uint64 remaining = 5 [ default = 0 ];
   optional uint64 blockPoolUsed = 6 [ default = 0 ];
   optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+  optional uint64 nonDfsUsed = 8;
 }
 
 /**

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -638,4 +638,26 @@ public class TestPBHelper {
         .build();
     Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
   }
+
+  @Test
+  public void testDataNodeInfoPBHelper() {
+    DatanodeID id = DFSTestUtil.getLocalDatanodeID();
+    DatanodeInfo dnInfos0 = new DatanodeInfo(id);
+    dnInfos0.setCapacity(3500L);
+    dnInfos0.setDfsUsed(1000L);
+    dnInfos0.setNonDfsUsed(2000L);
+    dnInfos0.setRemaining(500L);
+    HdfsProtos.DatanodeInfoProto dnproto = PBHelper.convert(dnInfos0);
+    DatanodeInfo dnInfos1 = PBHelper.convert(dnproto);
+    compare(dnInfos0, dnInfos1);
+    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
+
+    //Testing without nonDfs field
+    HdfsProtos.DatanodeInfoProto.Builder b =
+        HdfsProtos.DatanodeInfoProto.newBuilder();
+    b.setId(PBHelper.convert(id)).setCapacity(3500L).setDfsUsed(1000L)
+        .setRemaining(500L);
+    DatanodeInfo dnInfos3 = PBHelper.convert(b.build());
+    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
+  }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java

@@ -291,7 +291,7 @@ public class BlockManagerTestUtil {
       StorageReport report = new StorageReport(
           dns ,false, storage.getCapacity(),
           storage.getDfsUsed(), storage.getRemaining(),
-          storage.getBlockPoolUsed());
+          storage.getBlockPoolUsed(), 0L);
       reports.add(report);
     }
     return reports.toArray(StorageReport.EMPTY_ARRAY);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -427,7 +427,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     synchronized StorageReport getStorageReport(String bpid) {
       return new StorageReport(dnStorage,
           false, getCapacity(), getUsed(), getFree(),
-          map.get(bpid).getUsed());
+          map.get(bpid).getUsed(), 0L);
     }
   }
   

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java

@@ -72,7 +72,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   @Override
   public StorageReport[] getStorageReports(String bpid) throws IOException {
     StorageReport[] result = new StorageReport[1];
-    result[0] = new StorageReport(storage, false, 0, 0, 0, 0);
+    result[0] = new StorageReport(storage, false, 0, 0, 0, 0, 0);
     return result;
   }
 

+ 39 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosing
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
@@ -120,4 +122,41 @@ public class TestFsVolumeList {
     } catch (IllegalStateException e) {
     }
   }
+
+  @Test
+  public void testNonDfsUsedMetricForVolume() throws Exception {
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+    /*
+     * Lets have the example.
+     * Capacity - 1000
+     * Reserved - 100
+     * DfsUsed  - 200
+     * Actual Non-DfsUsed - 300 -->(expected)
+     * ReservedForReplicas - 50
+     */
+    long diskCapacity = 1000L;
+    long duReserved = 100L;
+    long dfsUsage = 200L;
+    long actualNonDfsUsage = 300L;
+    long reservedForReplicas = 50L;
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, duReserved);
+    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
+        StorageType.DEFAULT);
+    FsVolumeImpl spyVolume = Mockito.spy(volume);
+    // Set Capacity for testing
+    long testCapacity = diskCapacity - duReserved;
+    spyVolume.setCapacityForTesting(testCapacity);
+    // Mock volume.getDfAvailable()
+    long dfAvailable = diskCapacity - dfsUsage - actualNonDfsUsage;
+    Mockito.doReturn(dfAvailable).when(spyVolume).getDfAvailable();
+    // Mock dfsUsage
+    Mockito.doReturn(dfsUsage).when(spyVolume).getDfsUsed();
+    // Mock reservedForReplcas
+    Mockito.doReturn(reservedForReplicas).when(spyVolume).getReservedForRbw();
+    Mockito.doReturn(actualNonDfsUsage).when(spyVolume).getActualNonDfsUsed();
+    long expectedNonDfsUsage = actualNonDfsUsage - duReserved;
+    assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
+  }
+
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -953,7 +953,7 @@ public class NNThroughputBenchmark implements Tool {
       // register datanode
       // TODO:FEDERATION currently a single block pool is supported
       StorageReport[] rep = { new StorageReport(storage, false,
-          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0L) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
           0L, 0L, 0, 0, 0, null).getCommands();
       if(cmds != null) {
@@ -1002,7 +1002,7 @@ public class NNThroughputBenchmark implements Tool {
     int replicateBlocks() throws IOException {
       // register datanode
       StorageReport[] rep = { new StorageReport(storage,
-          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
+          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0L) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
           rep, 0L, 0L, 0, 0, 0, null).getCommands();
       if (cmds != null) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java

@@ -121,7 +121,7 @@ public class TestDeadDatanode {
     // that asks datanode to register again
     StorageReport[] rep = { new StorageReport(
         new DatanodeStorage(reg.getDatanodeUuid()),
-        false, 0, 0, 0, 0) };
+        false, 0, 0, 0, 0, 0) };
     DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null)
         .getCommands();
     assertEquals(1, cmd.length);

+ 31 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java

@@ -100,8 +100,9 @@ public class TestNamenodeCapacityReport {
             + " used " + used + " non DFS used " + nonDFSUsed 
             + " remaining " + remaining + " perentUsed " + percentUsed
             + " percentRemaining " + percentRemaining);
-        
-        assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+        // There will be 5% space reserved in ext filesystem which is not
+        // considered.
+        assertTrue(configCapacity >= (used + remaining + nonDFSUsed));
         assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
         assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,
             configCapacity));
@@ -148,7 +149,9 @@ public class TestNamenodeCapacityReport {
       assertTrue(configCapacity == diskCapacity - reserved);
       
       // Ensure new total capacity reported excludes the reserved space
-      assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+      // There will be 5% space reserved in ext filesystem which is not
+      // considered.
+      assertTrue(configCapacity >= (used + remaining + nonDFSUsed));
 
       // Ensure percent used is calculated based on used and present capacity
       assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
@@ -158,9 +161,33 @@ public class TestNamenodeCapacityReport {
 
       // Ensure percent used is calculated based on used and present capacity
       assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
+
+      //Adding testcase for non-dfs used where we need to consider
+      // reserved replica also.
+      final int fileCount = 5;
+      final DistributedFileSystem fs = cluster.getFileSystem();
+      // create streams and hsync to force datastreamers to start
+      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
+      for (int i=0; i < fileCount; i++) {
+        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i))
+            .getWrappedStream();
+        streams[i].write("1".getBytes());
+        streams[i].hsync();
+      }
+      triggerHeartbeats(cluster.getDataNodes());
+      assertTrue(configCapacity > (namesystem.getCapacityUsed() + namesystem
+          .getCapacityRemaining() + namesystem.getNonDfsUsedSpace()));
+      // There is a chance that nonDFS usage might have slightly due to
+      // testlogs, So assume 1MB other files used within this gap
+      assertTrue(
+          (namesystem.getCapacityUsed() + namesystem.getCapacityRemaining()
+              + namesystem.getNonDfsUsedSpace() + fileCount * fs
+              .getDefaultBlockSize()) - configCapacity < 1 * 1024);
     }
     finally {
-      if (cluster != null) {cluster.shutdown();}
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
   

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -155,7 +155,9 @@ public class TestNameNodeMetrics {
         MetricsAsserts.getLongGauge("CapacityRemaining", rb);
     long capacityUsedNonDFS =
         MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb);
-    assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
+    // There will be 5% space reserved in ext filesystem which is not
+    // considered.
+    assert(capacityUsed + capacityRemaining + capacityUsedNonDFS <=
         capacityTotal);
   }