Browse Source

HDFS-15046. Backport HDFS-7060 to branch-2.10. Contributed by Lisheng Sun.

Wei-Chiu Chuang 5 years ago
parent
commit
1d0e2ba335

+ 16 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -159,22 +159,18 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   public StorageReport[] getStorageReports(String bpid)
       throws IOException {
     List<StorageReport> reports;
-    synchronized (statsLock) {
-      List<FsVolumeImpl> curVolumes = volumes.getVolumes();
-      reports = new ArrayList<>(curVolumes.size());
-      for (FsVolumeImpl volume : curVolumes) {
-        try (FsVolumeReference ref = volume.obtainReference()) {
-          StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
-              false,
-              volume.getCapacity(),
-              volume.getDfsUsed(),
-              volume.getAvailable(),
-              volume.getBlockPoolUsed(bpid),
-              volume.getNonDfsUsed());
-          reports.add(sr);
-        } catch (ClosedChannelException e) {
-          continue;
-        }
+    // Volumes are the references from a copy-on-write snapshot, so the
+    // access on the volume metrics doesn't require an additional lock.
+    List<FsVolumeImpl> curVolumes = volumes.getVolumes();
+    reports = new ArrayList<>(curVolumes.size());
+    for (FsVolumeImpl volume : curVolumes) {
+      try (FsVolumeReference ref = volume.obtainReference()) {
+        StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false,
+            volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(),
+            volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
+        reports.add(sr);
+      } catch (ClosedChannelException e) {
+        continue;
       }
     }
 
@@ -292,9 +288,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
   private final int smallBufferSize;
 
-  // Used for synchronizing access to usage stats
-  private final Object statsLock = new Object();
-
   final LocalFileSystem localFS;
 
   private boolean blockPinningEnabled;
@@ -643,9 +636,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FSDatasetMBean
   public long getDfsUsed() throws IOException {
-    synchronized(statsLock) {
-      return volumes.getDfsUsed();
-    }
+    return volumes.getDfsUsed();
   }
 
   /**
@@ -653,9 +644,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FSDatasetMBean
   public long getBlockPoolUsed(String bpid) throws IOException {
-    synchronized(statsLock) {
-      return volumes.getBlockPoolUsed(bpid);
-    }
+    return volumes.getBlockPoolUsed(bpid);
   }
 
   /**
@@ -677,9 +666,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FSDatasetMBean
   public long getCapacity() {
-    synchronized(statsLock) {
-      return volumes.getCapacity();
-    }
+    return volumes.getCapacity();
   }
 
   /**
@@ -687,9 +674,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FSDatasetMBean
   public long getRemaining() throws IOException {
-    synchronized(statsLock) {
-      return volumes.getRemaining();
-    }
+    return volumes.getRemaining();
   }
 
   /**

+ 17 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -323,43 +322,38 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private void decDfsUsedAndNumBlocks(String bpid, long value,
                                       boolean blockFileDeleted) {
-    try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
-      BlockPoolSlice bp = bpSlices.get(bpid);
-      if (bp != null) {
-        bp.decDfsUsed(value);
-        if (blockFileDeleted) {
-          bp.decrNumBlocks();
-        }
+    // BlockPoolSlice map is thread safe, and update the space used or
+    // number of blocks are atomic operations, so it doesn't require to
+    // hold the dataset lock.
+    BlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp != null) {
+      bp.decDfsUsed(value);
+      if (blockFileDeleted) {
+        bp.decrNumBlocks();
       }
     }
   }
 
   void incDfsUsedAndNumBlocks(String bpid, long value) {
-    try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
-      BlockPoolSlice bp = bpSlices.get(bpid);
-      if (bp != null) {
-        bp.incDfsUsed(value);
-        bp.incrNumBlocks();
-      }
+    BlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp != null) {
+      bp.incDfsUsed(value);
+      bp.incrNumBlocks();
     }
   }
 
   void incDfsUsed(String bpid, long value) {
-    try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
-      BlockPoolSlice bp = bpSlices.get(bpid);
-      if (bp != null) {
-        bp.incDfsUsed(value);
-      }
+    BlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp != null) {
+      bp.incDfsUsed(value);
     }
   }
 
   @VisibleForTesting
   public long getDfsUsed() throws IOException {
     long dfsUsed = 0;
-    try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
-      for(BlockPoolSlice s : bpSlices.values()) {
-        dfsUsed += s.getDfsUsed();
-      }
+    for(BlockPoolSlice s : bpSlices.values()) {
+      dfsUsed += s.getDfsUsed();
     }
     return dfsUsed;
   }