فهرست منبع

HDFS-17691. [FGL] Move FSNamesystemLockMode to org.apache.hadoop.hdfs.util package (#7232)

ZanderXu 6 ماه پیش
والد
کامیت
b289f9abd3
87فایلهای تغییر یافته به همراه767 افزوده شده و 768 حذف شده
  1. 3 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  2. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
  3. 57 57
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  4. 16 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
  5. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
  6. 19 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
  7. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
  8. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  9. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  10. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
  11. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
  12. 17 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  13. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
  14. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
  15. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
  16. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
  17. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
  18. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
  19. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  20. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
  21. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
  22. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
  23. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
  24. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  25. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  26. 123 123
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  27. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
  28. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
  29. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
  30. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  31. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  32. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  33. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
  34. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
  35. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  36. 17 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNLockManager.java
  37. 62 61
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FineGrainedFSNamesystemLock.java
  38. 16 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/GlobalFSNamesystemLock.java
  39. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
  40. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java
  41. 18 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
  42. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLockMode.java
  43. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  44. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
  45. 11 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
  46. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
  47. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
  48. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
  49. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
  50. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
  51. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
  52. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
  53. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
  54. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  55. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
  56. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
  57. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
  58. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  59. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
  60. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
  61. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
  62. 9 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  63. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  64. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
  65. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  66. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  67. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
  68. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
  69. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
  70. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
  71. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
  72. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
  73. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
  74. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  75. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
  76. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
  77. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
  78. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
  79. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
  80. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
  81. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
  82. 17 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/fgl/TestFineGrainedFSNamesystemLock.java
  83. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
  84. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  85. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
  86. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
  87. 7 7
      hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -121,13 +121,13 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
@@ -1699,10 +1699,10 @@ public class TestRouterRpc {
       // mark a replica as corrupt
       // mark a replica as corrupt
       LocatedBlock block = NameNodeAdapter
       LocatedBlock block = NameNodeAdapter
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "findAndMarkBlockAsCorrupt");
+      namesystem.writeUnlock(RwLockMode.BM, "findAndMarkBlockAsCorrupt");
       BlockManagerTestUtil.updateState(bm);
       BlockManagerTestUtil.updateState(bm);
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
           new Path(testFile), block.getBlock(), 1);
           new Path(testFile), block.getBlock(), 1);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java

@@ -27,7 +27,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -373,7 +373,7 @@ public class DelegationTokenSecretManager
       // closes the edit log files. Doing this inside the
       // closes the edit log files. Doing this inside the
       // fsn lock will prevent being interrupted when stopping
       // fsn lock will prevent being interrupted when stopping
       // the secret manager.
       // the secret manager.
-      namesystem.readLockInterruptibly(FSNamesystemLockMode.FS);
+      namesystem.readLockInterruptibly(RwLockMode.FS);
       try {
       try {
         // this monitor isn't necessary if stopped while holding write lock
         // this monitor isn't necessary if stopped while holding write lock
         // but for safety, guard against a stop with read lock.
         // but for safety, guard against a stop with read lock.
@@ -384,7 +384,7 @@ public class DelegationTokenSecretManager
           namesystem.logUpdateMasterKey(key);
           namesystem.logUpdateMasterKey(key);
         }
         }
       } finally {
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.FS, "logUpdateMasterKey");
+        namesystem.readUnlock(RwLockMode.FS, "logUpdateMasterKey");
       }
       }
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
       // AbstractDelegationTokenManager may crash if an exception is thrown.
       // AbstractDelegationTokenManager may crash if an exception is thrown.
@@ -402,7 +402,7 @@ public class DelegationTokenSecretManager
       // closes the edit log files. Doing this inside the
       // closes the edit log files. Doing this inside the
       // fsn lock will prevent being interrupted when stopping
       // fsn lock will prevent being interrupted when stopping
       // the secret manager.
       // the secret manager.
-      namesystem.readLockInterruptibly(FSNamesystemLockMode.FS);
+      namesystem.readLockInterruptibly(RwLockMode.FS);
       try {
       try {
         // this monitor isn't necessary if stopped while holding write lock
         // this monitor isn't necessary if stopped while holding write lock
         // but for safety, guard against a stop with read lock.
         // but for safety, guard against a stop with read lock.
@@ -413,7 +413,7 @@ public class DelegationTokenSecretManager
           namesystem.logExpireDelegationToken(dtId);
           namesystem.logExpireDelegationToken(dtId);
         }
         }
       } finally {
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.FS, "logExpireToken");
+        namesystem.readUnlock(RwLockMode.FS, "logExpireToken");
       }
       }
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
       // AbstractDelegationTokenManager may crash if an exception is thrown.
       // AbstractDelegationTokenManager may crash if an exception is thrown.

+ 57 - 57
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -97,7 +97,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
@@ -121,6 +120,7 @@ import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 
 
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -861,7 +861,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   /** Dump meta data to out. */
   /** Dump meta data to out. */
   public void metaSave(PrintWriter out) {
   public void metaSave(PrintWriter out) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
     final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
     datanodeManager.fetchDatanodes(live, dead, false);
     datanodeManager.fetchDatanodes(live, dead, false);
@@ -1584,7 +1584,7 @@ public class BlockManager implements BlockStatsMXBean {
       final boolean inSnapshot, FileEncryptionInfo feInfo,
       final boolean inSnapshot, FileEncryptionInfo feInfo,
       ErasureCodingPolicy ecPolicy)
       ErasureCodingPolicy ecPolicy)
       throws IOException {
       throws IOException {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     if (blocks == null) {
     if (blocks == null) {
       return null;
       return null;
     } else if (blocks.length == 0) {
     } else if (blocks.length == 0) {
@@ -1830,7 +1830,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   /** Remove the blocks associated to the given DatanodeStorageInfo. */
   /** Remove the blocks associated to the given DatanodeStorageInfo. */
   void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) {
   void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
     final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     while(it.hasNext()) {
     while(it.hasNext()) {
@@ -1901,7 +1901,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
    */
   public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
   public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
       final DatanodeInfo dn, String storageID, String reason) throws IOException {
       final DatanodeInfo dn, String storageID, String reason) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     final Block reportedBlock = blk.getLocalBlock();
     final Block reportedBlock = blk.getLocalBlock();
     final BlockInfo storedBlock = getStoredBlock(reportedBlock);
     final BlockInfo storedBlock = getStoredBlock(reportedBlock);
     if (storedBlock == null) {
     if (storedBlock == null) {
@@ -2107,9 +2107,9 @@ public class BlockManager implements BlockStatsMXBean {
    */
    */
   int computeBlockReconstructionWork(int blocksToProcess) {
   int computeBlockReconstructionWork(int blocksToProcess) {
     List<List<BlockInfo>> blocksToReconstruct = null;
     List<List<BlockInfo>> blocksToReconstruct = null;
-    // TODO: Change it to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change it to readLock(RwLockMode.BM)
     //  since chooseLowRedundancyBlocks is thread safe.
     //  since chooseLowRedundancyBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       boolean reset = false;
       boolean reset = false;
       if (replQueueResetToHeadThreshold > 0) {
       if (replQueueResetToHeadThreshold > 0) {
@@ -2124,7 +2124,7 @@ public class BlockManager implements BlockStatsMXBean {
       blocksToReconstruct = neededReconstruction
       blocksToReconstruct = neededReconstruction
           .chooseLowRedundancyBlocks(blocksToProcess, reset);
           .chooseLowRedundancyBlocks(blocksToProcess, reset);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeBlockReconstructionWork");
+      namesystem.writeUnlock(RwLockMode.BM, "computeBlockReconstructionWork");
     }
     }
     return computeReconstructionWorkForBlocks(blocksToReconstruct);
     return computeReconstructionWorkForBlocks(blocksToReconstruct);
   }
   }
@@ -2143,9 +2143,9 @@ public class BlockManager implements BlockStatsMXBean {
     List<BlockReconstructionWork> reconWork = new ArrayList<>();
     List<BlockReconstructionWork> reconWork = new ArrayList<>();
 
 
     // Step 1: categorize at-risk blocks into replication and EC tasks
     // Step 1: categorize at-risk blocks into replication and EC tasks
-    // TODO: Change to readLock(FSNamesystemLockMode.GLOBAL)
+    // TODO: Change to readLock(RwLockMode.GLOBAL)
     //  since neededReconstruction is thread safe.
     //  since neededReconstruction is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       synchronized (neededReconstruction) {
       synchronized (neededReconstruction) {
         for (int priority = 0; priority < blocksToReconstruct
         for (int priority = 0; priority < blocksToReconstruct
@@ -2160,7 +2160,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "computeReconstructionWorkForBlocks");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "computeReconstructionWorkForBlocks");
     }
     }
 
 
     // Step 2: choose target nodes for each reconstruction task
     // Step 2: choose target nodes for each reconstruction task
@@ -2185,9 +2185,9 @@ public class BlockManager implements BlockStatsMXBean {
     }
     }
 
 
     // Step 3: add tasks to the DN
     // Step 3: add tasks to the DN
-    // TODO: Change to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change to readLock(RwLockMode.BM)
     //  since pendingReconstruction and neededReconstruction are thread safe.
     //  since pendingReconstruction and neededReconstruction are thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       for (BlockReconstructionWork rw : reconWork) {
       for (BlockReconstructionWork rw : reconWork) {
         final DatanodeStorageInfo[] targets = rw.getTargets();
         final DatanodeStorageInfo[] targets = rw.getTargets();
@@ -2203,7 +2203,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeReconstructionWorkForBlocks");
+      namesystem.writeUnlock(RwLockMode.BM, "computeReconstructionWorkForBlocks");
     }
     }
 
 
     if (blockLog.isDebugEnabled()) {
     if (blockLog.isDebugEnabled()) {
@@ -2694,9 +2694,9 @@ public class BlockManager implements BlockStatsMXBean {
   void processPendingReconstructions() {
   void processPendingReconstructions() {
     BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
     BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
     if (timedOutItems != null) {
     if (timedOutItems != null) {
-      // TODO: Change to readLock(FSNamesystemLockMode.BM)
+      // TODO: Change to readLock(RwLockMode.BM)
       //  since neededReconstruction is thread safe.
       //  since neededReconstruction is thread safe.
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
       try {
         for (int i = 0; i < timedOutItems.length; i++) {
         for (int i = 0; i < timedOutItems.length; i++) {
           /*
           /*
@@ -2715,7 +2715,7 @@ public class BlockManager implements BlockStatsMXBean {
           }
           }
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "processPendingReconstructions");
+        namesystem.writeUnlock(RwLockMode.BM, "processPendingReconstructions");
       }
       }
       /* If we know the target datanodes where the replication timedout,
       /* If we know the target datanodes where the replication timedout,
        * we could invoke decBlocksScheduled() on it. Its ok for now.
        * we could invoke decBlocksScheduled() on it. Its ok for now.
@@ -2724,7 +2724,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   }
 
 
   public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
   public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     DatanodeDescriptor node = null;
     DatanodeDescriptor node = null;
     try {
     try {
       node = datanodeManager.getDatanode(nodeReg);
       node = datanodeManager.getDatanode(nodeReg);
@@ -2795,7 +2795,7 @@ public class BlockManager implements BlockStatsMXBean {
    *               list of blocks that need to be removed from blocksMap
    *               list of blocks that need to be removed from blocksMap
    */
    */
   public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
   public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // In the case that we are a Standby tailing edits from the
     // In the case that we are a Standby tailing edits from the
     // active while in safe-mode, we need to track the total number
     // active while in safe-mode, we need to track the total number
     // of blocks and safe blocks in the system.
     // of blocks and safe blocks in the system.
@@ -2910,7 +2910,7 @@ public class BlockManager implements BlockStatsMXBean {
       final DatanodeStorage storage,
       final DatanodeStorage storage,
       final BlockListAsLongs newReport,
       final BlockListAsLongs newReport,
       BlockReportContext context) throws IOException {
       BlockReportContext context) throws IOException {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     final long startTime = Time.monotonicNow(); //after acquiring write lock
     final long startTime = Time.monotonicNow(); //after acquiring write lock
     final long endTime;
     final long endTime;
     DatanodeDescriptor node;
     DatanodeDescriptor node;
@@ -2968,7 +2968,7 @@ public class BlockManager implements BlockStatsMXBean {
       storageInfo.receivedBlockReport();
       storageInfo.receivedBlockReport();
     } finally {
     } finally {
       endTime = Time.monotonicNow();
       endTime = Time.monotonicNow();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processReport");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processReport");
     }
     }
 
 
     if (blockLog.isDebugEnabled()) {
     if (blockLog.isDebugEnabled()) {
@@ -3012,7 +3012,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
   public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
       final BlockReportContext context) throws IOException {
       final BlockReportContext context) throws IOException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor node;
     DatanodeDescriptor node;
     try {
     try {
       node = datanodeManager.getDatanode(nodeID);
       node = datanodeManager.getDatanode(nodeID);
@@ -3030,7 +3030,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBRLeaseIfNeeded");
+      namesystem.writeUnlock(RwLockMode.BM, "removeBRLeaseIfNeeded");
     }
     }
   }
   }
 
 
@@ -3041,7 +3041,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (getPostponedMisreplicatedBlocksCount() == 0) {
     if (getPostponedMisreplicatedBlocksCount() == 0) {
       return;
       return;
     }
     }
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     long startTime = Time.monotonicNow();
     long startTime = Time.monotonicNow();
     long startSize = postponedMisreplicatedBlocks.size();
     long startSize = postponedMisreplicatedBlocks.size();
     try {
     try {
@@ -3070,7 +3070,7 @@ public class BlockManager implements BlockStatsMXBean {
       postponedMisreplicatedBlocks.addAll(rescannedMisreplicatedBlocks);
       postponedMisreplicatedBlocks.addAll(rescannedMisreplicatedBlocks);
       rescannedMisreplicatedBlocks.clear();
       rescannedMisreplicatedBlocks.clear();
       long endSize = postponedMisreplicatedBlocks.size();
       long endSize = postponedMisreplicatedBlocks.size();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL,
+      namesystem.writeUnlock(RwLockMode.GLOBAL,
           "rescanPostponedMisreplicatedBlocks");
           "rescanPostponedMisreplicatedBlocks");
       LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" +
       LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" +
           " msecs. {} blocks are left. {} blocks were removed.",
           " msecs. {} blocks are left. {} blocks were removed.",
@@ -3114,7 +3114,7 @@ public class BlockManager implements BlockStatsMXBean {
       return;
       return;
     }
     }
     // TODO: Change to readLock(FSNamesysteLockMode.BM) since invalidateBlocks is thread safe.
     // TODO: Change to readLock(FSNamesysteLockMode.BM) since invalidateBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     long now = Time.monotonicNow();
     long now = Time.monotonicNow();
     int processed = 0;
     int processed = 0;
     try {
     try {
@@ -3168,7 +3168,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processTimedOutExcessBlocks");
+      namesystem.writeUnlock(RwLockMode.BM, "processTimedOutExcessBlocks");
       LOG.info("processTimedOutExcessBlocks {} msecs.", (Time.monotonicNow() - now));
       LOG.info("processTimedOutExcessBlocks {} msecs.", (Time.monotonicNow() - now));
     }
     }
   }
   }
@@ -3224,7 +3224,7 @@ public class BlockManager implements BlockStatsMXBean {
       BlockInfo block,
       BlockInfo block,
       long oldGenerationStamp, long oldNumBytes, 
       long oldGenerationStamp, long oldNumBytes, 
       DatanodeStorageInfo[] newStorages) throws IOException {
       DatanodeStorageInfo[] newStorages) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     BlockToMarkCorrupt b = null;
     BlockToMarkCorrupt b = null;
     if (block.getGenerationStamp() != oldGenerationStamp) {
     if (block.getGenerationStamp() != oldGenerationStamp) {
       b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
       b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
@@ -3274,7 +3274,7 @@ public class BlockManager implements BlockStatsMXBean {
       final DatanodeStorageInfo storageInfo,
       final DatanodeStorageInfo storageInfo,
       final BlockListAsLongs report) throws IOException {
       final BlockListAsLongs report) throws IOException {
     if (report == null) return;
     if (report == null) return;
-    assert (namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    assert (namesystem.hasWriteLock(RwLockMode.GLOBAL));
     assert (storageInfo.getBlockReportCount() == 0);
     assert (storageInfo.getBlockReportCount() == 0);
 
 
     for (BlockReportReplica iblk : report) {
     for (BlockReportReplica iblk : report) {
@@ -3742,7 +3742,7 @@ public class BlockManager implements BlockStatsMXBean {
   private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported,
   private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported,
       DatanodeStorageInfo storageInfo)
       DatanodeStorageInfo storageInfo)
   throws IOException {
   throws IOException {
-    assert (storedBlock != null && namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    assert (storedBlock != null && namesystem.hasWriteLock(RwLockMode.GLOBAL));
     if (!namesystem.isInStartupSafeMode()
     if (!namesystem.isInStartupSafeMode()
         || isPopulatingReplQueues()) {
         || isPopulatingReplQueues()) {
       addStoredBlock(storedBlock, reported, storageInfo, null, false);
       addStoredBlock(storedBlock, reported, storageInfo, null, false);
@@ -3777,7 +3777,7 @@ public class BlockManager implements BlockStatsMXBean {
                                DatanodeDescriptor delNodeHint,
                                DatanodeDescriptor delNodeHint,
                                boolean logEveryBlock)
                                boolean logEveryBlock)
   throws IOException {
   throws IOException {
-    assert block != null && namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert block != null && namesystem.hasWriteLock(RwLockMode.GLOBAL);
     BlockInfo storedBlock;
     BlockInfo storedBlock;
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     if (!block.isComplete()) {
     if (!block.isComplete()) {
@@ -3954,7 +3954,7 @@ public class BlockManager implements BlockStatsMXBean {
    * extra or low redundancy. Place it into the respective queue.
    * extra or low redundancy. Place it into the respective queue.
    */
    */
   public void processMisReplicatedBlocks() {
   public void processMisReplicatedBlocks() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     stopReconstructionInitializer();
     stopReconstructionInitializer();
     neededReconstruction.clear();
     neededReconstruction.clear();
     reconstructionQueuesInitializer = new Daemon() {
     reconstructionQueuesInitializer = new Daemon() {
@@ -4013,7 +4013,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
     while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) {
     while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) {
       int processed = 0;
       int processed = 0;
-      namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
       try {
       try {
         while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
         while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
           BlockInfo block = blocksItr.next();
           BlockInfo block = blocksItr.next();
@@ -4072,7 +4072,7 @@ public class BlockManager implements BlockStatsMXBean {
           break;
           break;
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMisReplicatesAsync");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "processMisReplicatesAsync");
         LOG.info("Reconstruction queues initialisation progress: {}, total number of blocks " +
         LOG.info("Reconstruction queues initialisation progress: {}, total number of blocks " +
             "processed: {}/{}", reconstructionQueuesInitProgress, totalProcessed, totalBlocks);
             "processed: {}/{}", reconstructionQueuesInitProgress, totalProcessed, totalBlocks);
         // Make sure it is out of the write lock for sufficiently long time.
         // Make sure it is out of the write lock for sufficiently long time.
@@ -4119,7 +4119,7 @@ public class BlockManager implements BlockStatsMXBean {
               && !Thread.currentThread().isInterrupted()
               && !Thread.currentThread().isInterrupted()
               && iter.hasNext()) {
               && iter.hasNext()) {
         int limit = processed + numBlocksPerIteration;
         int limit = processed + numBlocksPerIteration;
-        namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
         try {
         try {
           while (iter.hasNext() && processed < limit) {
           while (iter.hasNext() && processed < limit) {
             BlockInfo blk = iter.next();
             BlockInfo blk = iter.next();
@@ -4129,7 +4129,7 @@ public class BlockManager implements BlockStatsMXBean {
                 blk, r);
                 blk, r);
           }
           }
         } finally {
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMisReplicatedBlocks");
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "processMisReplicatedBlocks");
         }
         }
       }
       }
     } catch (InterruptedException ex) {
     } catch (InterruptedException ex) {
@@ -4225,7 +4225,7 @@ public class BlockManager implements BlockStatsMXBean {
   private boolean processExtraRedundancyBlockWithoutPostpone(final BlockInfo block,
   private boolean processExtraRedundancyBlockWithoutPostpone(final BlockInfo block,
       final short replication, final DatanodeDescriptor addedNode,
       final short replication, final DatanodeDescriptor addedNode,
       DatanodeDescriptor delNodeHint) {
       DatanodeDescriptor delNodeHint) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     if (addedNode == delNodeHint) {
     if (addedNode == delNodeHint) {
       delNodeHint = null;
       delNodeHint = null;
     }
     }
@@ -4270,9 +4270,9 @@ public class BlockManager implements BlockStatsMXBean {
       DatanodeDescriptor addedNode,
       DatanodeDescriptor addedNode,
       DatanodeDescriptor delNodeHint) {
       DatanodeDescriptor delNodeHint) {
     // bc.getStoragePolicyID() needs FSReadLock.
     // bc.getStoragePolicyID() needs FSReadLock.
-    // TODO: Change to hasReadLock(FSNamesystemLockMode.GLOBAL)
+    // TODO: Change to hasReadLock(RwLockMode.GLOBAL)
     //  since chooseExcessRedundancyContiguous is thread safe.
     //  since chooseExcessRedundancyContiguous is thread safe.
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     // first form a rack to datanodes map and
     // first form a rack to datanodes map and
     BlockCollection bc = getBlockCollection(storedBlock);
     BlockCollection bc = getBlockCollection(storedBlock);
     if (storedBlock.isStriped()) {
     if (storedBlock.isStriped()) {
@@ -4447,7 +4447,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
    */
   public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
   public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
     blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
     blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
-    assert (namesystem.hasWriteLock(FSNamesystemLockMode.BM));
+    assert (namesystem.hasWriteLock(RwLockMode.BM));
     {
     {
       if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
       if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
         blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}",
         blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}",
@@ -4641,7 +4641,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
    */
   public void processIncrementalBlockReport(final DatanodeID nodeID,
   public void processIncrementalBlockReport(final DatanodeID nodeID,
       final StorageReceivedDeletedBlocks srdb) throws IOException {
       final StorageReceivedDeletedBlocks srdb) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
     final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
     if (node == null || !node.isRegistered()) {
     if (node == null || !node.isRegistered()) {
       blockLog.warn("BLOCK* processIncrementalBlockReport"
       blockLog.warn("BLOCK* processIncrementalBlockReport"
@@ -4892,15 +4892,15 @@ public class BlockManager implements BlockStatsMXBean {
       // When called by tests like TestDefaultBlockPlacementPolicy.
       // When called by tests like TestDefaultBlockPlacementPolicy.
       // testPlacementWithLocalRackNodesDecommissioned, it is not protected by
       // testPlacementWithLocalRackNodesDecommissioned, it is not protected by
       // lock, only when called by DatanodeManager.refreshNodes have writeLock
       // lock, only when called by DatanodeManager.refreshNodes have writeLock
-      if (namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL,
+      if (namesystem.hasWriteLock(RwLockMode.GLOBAL)) {
+        namesystem.writeUnlock(RwLockMode.GLOBAL,
             "processExtraRedundancyBlocksOnInService");
             "processExtraRedundancyBlocksOnInService");
         try {
         try {
           Thread.sleep(1);
           Thread.sleep(1);
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
           Thread.currentThread().interrupt();
         }
         }
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
       }
     }
     }
     LOG.info("Invalidated {} extra redundancy blocks on {} after "
     LOG.info("Invalidated {} extra redundancy blocks on {} after "
@@ -4964,7 +4964,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   }
 
 
   public void removeBlock(BlockInfo block) {
   public void removeBlock(BlockInfo block) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // No need to ACK blocks that are being removed entirely
     // No need to ACK blocks that are being removed entirely
     // from the namespace, since the removal of the associated
     // from the namespace, since the removal of the associated
     // file already removes them from the block map below.
     // file already removes them from the block map below.
@@ -5007,9 +5007,9 @@ public class BlockManager implements BlockStatsMXBean {
   /** updates a block in needed reconstruction queue. */
   /** updates a block in needed reconstruction queue. */
   private void updateNeededReconstructions(final BlockInfo block,
   private void updateNeededReconstructions(final BlockInfo block,
       final int curReplicasDelta, int expectedReplicasDelta) {
       final int curReplicasDelta, int expectedReplicasDelta) {
-    // TODO: Change to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change to readLock(RwLockMode.BM)
     //  since pendingReconstruction and neededReconstruction are thread safe.
     //  since pendingReconstruction and neededReconstruction are thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       if (!isPopulatingReplQueues() || !block.isComplete()) {
       if (!isPopulatingReplQueues() || !block.isComplete()) {
         return;
         return;
@@ -5028,7 +5028,7 @@ public class BlockManager implements BlockStatsMXBean {
             repl.outOfServiceReplicas(), oldExpectedReplicas);
             repl.outOfServiceReplicas(), oldExpectedReplicas);
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "updateNeededReconstructions");
+      namesystem.writeUnlock(RwLockMode.BM, "updateNeededReconstructions");
     }
     }
   }
   }
 
 
@@ -5061,8 +5061,8 @@ public class BlockManager implements BlockStatsMXBean {
   private int invalidateWorkForOneNode(DatanodeInfo dn) {
   private int invalidateWorkForOneNode(DatanodeInfo dn) {
     final List<Block> toInvalidate;
     final List<Block> toInvalidate;
 
 
-    // TODO: Change to readLock(FSNamesystemLockMode.BM) since invalidateBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    // TODO: Change to readLock(RwLockMode.BM) since invalidateBlocks is thread safe.
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       // blocks should not be replicated or removed if safe mode is on
       // blocks should not be replicated or removed if safe mode is on
       if (namesystem.isInSafeMode()) {
       if (namesystem.isInSafeMode()) {
@@ -5086,7 +5086,7 @@ public class BlockManager implements BlockStatsMXBean {
         return 0;
         return 0;
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "invalidateWorkForOneNode");
+      namesystem.writeUnlock(RwLockMode.BM, "invalidateWorkForOneNode");
     }
     }
     if (blockLog.isDebugEnabled()) {
     if (blockLog.isDebugEnabled()) {
       blockLog.debug("BLOCK* {}: ask {} to delete {}",
       blockLog.debug("BLOCK* {}: ask {} to delete {}",
@@ -5314,7 +5314,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
     private void remove(long time) {
     private void remove(long time) {
       if (checkToDeleteIterator()) {
       if (checkToDeleteIterator()) {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
         try {
           while (toDeleteIterator.hasNext()) {
           while (toDeleteIterator.hasNext()) {
             removeBlock(toDeleteIterator.next());
             removeBlock(toDeleteIterator.next());
@@ -5325,7 +5325,7 @@ public class BlockManager implements BlockStatsMXBean {
             }
             }
           }
           }
         } finally {
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "markedDeleteBlockScrubberThread");
+          namesystem.writeUnlock(RwLockMode.BM, "markedDeleteBlockScrubberThread");
         }
         }
       }
       }
     }
     }
@@ -5440,12 +5440,12 @@ public class BlockManager implements BlockStatsMXBean {
 
 
     // Update counters
     // Update counters
     // TODO: Make corruptReplicas thread safe to remove this lock.
     // TODO: Make corruptReplicas thread safe to remove this lock.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       this.updateState();
       this.updateState();
       this.scheduledReplicationBlocksCount = workFound;
       this.scheduledReplicationBlocksCount = workFound;
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeDatanodeWork");
+      namesystem.writeUnlock(RwLockMode.BM, "computeDatanodeWork");
     }
     }
     workFound += this.computeInvalidateWork(nodesToProcess);
     workFound += this.computeInvalidateWork(nodesToProcess);
     return workFound;
     return workFound;
@@ -5672,7 +5672,7 @@ public class BlockManager implements BlockStatsMXBean {
           // batch as many operations in the write lock until the queue
           // batch as many operations in the write lock until the queue
           // runs dry, or the max lock hold is reached.
           // runs dry, or the max lock hold is reached.
           int processed = 0;
           int processed = 0;
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeLock(RwLockMode.GLOBAL);
           metrics.setBlockOpsQueued(queue.size() + 1);
           metrics.setBlockOpsQueued(queue.size() + 1);
           try {
           try {
             long start = Time.monotonicNow();
             long start = Time.monotonicNow();
@@ -5685,7 +5685,7 @@ public class BlockManager implements BlockStatsMXBean {
               action = queue.poll();
               action = queue.poll();
             } while (action != null);
             } while (action != null);
           } finally {
           } finally {
-            namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processQueue");
+            namesystem.writeUnlock(RwLockMode.GLOBAL, "processQueue");
             metrics.addBlockOpsBatched(processed - 1);
             metrics.addBlockOpsBatched(processed - 1);
           }
           }
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {

+ 16 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeSt
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 
 
@@ -170,7 +170,7 @@ class BlockManagerSafeMode {
    * @param total initial total blocks
    * @param total initial total blocks
    */
    */
   void activate(long total) {
   void activate(long total) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     assert status == BMSafeModeStatus.OFF;
     assert status == BMSafeModeStatus.OFF;
 
 
     startTime = monotonicNow();
     startTime = monotonicNow();
@@ -204,7 +204,7 @@ class BlockManagerSafeMode {
    * If safe mode is not currently on, this is a no-op.
    * If safe mode is not currently on, this is a no-op.
    */
    */
   void checkSafeMode() {
   void checkSafeMode() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (namesystem.inTransitionToActive()) {
     if (namesystem.inTransitionToActive()) {
       return;
       return;
     }
     }
@@ -246,7 +246,7 @@ class BlockManagerSafeMode {
    * @param deltaTotal the change in number of total blocks expected
    * @param deltaTotal the change in number of total blocks expected
    */
    */
   void adjustBlockTotals(int deltaSafe, int deltaTotal) {
   void adjustBlockTotals(int deltaSafe, int deltaTotal) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (!isSafeModeTrackingBlocks()) {
     if (!isSafeModeTrackingBlocks()) {
       return;
       return;
     }
     }
@@ -280,7 +280,7 @@ class BlockManagerSafeMode {
    * set after the image has been loaded.
    * set after the image has been loaded.
    */
    */
   boolean isSafeModeTrackingBlocks() {
   boolean isSafeModeTrackingBlocks() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     return haEnabled && status != BMSafeModeStatus.OFF;
     return haEnabled && status != BMSafeModeStatus.OFF;
   }
   }
 
 
@@ -288,7 +288,7 @@ class BlockManagerSafeMode {
    * Set total number of blocks.
    * Set total number of blocks.
    */
    */
   void setBlockTotal(long total) {
   void setBlockTotal(long total) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     synchronized (this) {
     synchronized (this) {
       this.blockTotal = total;
       this.blockTotal = total;
       this.blockThreshold = (long) (total * threshold);
       this.blockThreshold = (long) (total * threshold);
@@ -374,7 +374,7 @@ class BlockManagerSafeMode {
    * @return true if it leaves safe mode successfully else false
    * @return true if it leaves safe mode successfully else false
    */
    */
   boolean leaveSafeMode(boolean force) {
   boolean leaveSafeMode(boolean force) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM) : "Leaving safe mode needs write lock!";
+    assert namesystem.hasWriteLock(RwLockMode.BM) : "Leaving safe mode needs write lock!";
 
 
     final long bytesInFuture = getBytesInFuture();
     final long bytesInFuture = getBytesInFuture();
     if (bytesInFuture > 0) {
     if (bytesInFuture > 0) {
@@ -445,7 +445,7 @@ class BlockManagerSafeMode {
    */
    */
   synchronized void incrementSafeBlockCount(int storageNum,
   synchronized void incrementSafeBlockCount(int storageNum,
       BlockInfo storedBlock) {
       BlockInfo storedBlock) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
     if (status == BMSafeModeStatus.OFF) {
       return;
       return;
     }
     }
@@ -477,7 +477,7 @@ class BlockManagerSafeMode {
    * If safe mode is not currently on, this is a no-op.
    * If safe mode is not currently on, this is a no-op.
    */
    */
   synchronized void decrementSafeBlockCount(BlockInfo b) {
   synchronized void decrementSafeBlockCount(BlockInfo b) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
     if (status == BMSafeModeStatus.OFF) {
       return;
       return;
     }
     }
@@ -500,7 +500,7 @@ class BlockManagerSafeMode {
    * @param brr block report replica which belongs to no file in BlockManager
    * @param brr block report replica which belongs to no file in BlockManager
    */
    */
   void checkBlocksWithFutureGS(BlockReportReplica brr) {
   void checkBlocksWithFutureGS(BlockReportReplica brr) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
     if (status == BMSafeModeStatus.OFF) {
       return;
       return;
     }
     }
@@ -534,7 +534,7 @@ class BlockManagerSafeMode {
   }
   }
 
 
   void close() {
   void close() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL)
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL)
         : "Closing bmSafeMode needs write lock!";
         : "Closing bmSafeMode needs write lock!";
     try {
     try {
       smmthread.interrupt();
       smmthread.interrupt();
@@ -569,7 +569,7 @@ class BlockManagerSafeMode {
 
 
   /** Check if we are ready to initialize replication queues. */
   /** Check if we are ready to initialize replication queues. */
   private void initializeReplQueuesIfNecessary() {
   private void initializeReplQueuesIfNecessary() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // Whether it has reached the threshold for initializing replication queues.
     // Whether it has reached the threshold for initializing replication queues.
     boolean canInitializeReplQueues = blockManager.shouldPopulateReplQueues() &&
     boolean canInitializeReplQueues = blockManager.shouldPopulateReplQueues() &&
         blockSafe >= blockReplQueueThreshold;
         blockSafe >= blockReplQueueThreshold;
@@ -584,7 +584,7 @@ class BlockManagerSafeMode {
    * @return true if both block and datanode threshold are met else false.
    * @return true if both block and datanode threshold are met else false.
    */
    */
   private boolean areThresholdsMet() {
   private boolean areThresholdsMet() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // Calculating the number of live datanodes is time-consuming
     // Calculating the number of live datanodes is time-consuming
     // in large clusters. Skip it when datanodeThreshold is zero.
     // in large clusters. Skip it when datanodeThreshold is zero.
     // We need to evaluate getNumLiveDataNodes only when
     // We need to evaluate getNumLiveDataNodes only when
@@ -629,7 +629,7 @@ class BlockManagerSafeMode {
    * Print status every 20 seconds.
    * Print status every 20 seconds.
    */
    */
   private void reportStatus(String msg, boolean rightNow) {
   private void reportStatus(String msg, boolean rightNow) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     long curTime = monotonicNow();
     long curTime = monotonicNow();
     if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) {
     if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) {
       return;
       return;
@@ -663,7 +663,7 @@ class BlockManagerSafeMode {
     public void run() {
     public void run() {
       while (namesystem.isRunning()) {
       while (namesystem.isRunning()) {
         try {
         try {
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeLock(RwLockMode.GLOBAL);
           if (status == BMSafeModeStatus.OFF) { // Not in safe mode.
           if (status == BMSafeModeStatus.OFF) { // Not in safe mode.
             break;
             break;
           }
           }
@@ -673,7 +673,7 @@ class BlockManagerSafeMode {
             break;
             break;
           }
           }
         } finally {
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "leaveSafeMode");
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "leaveSafeMode");
         }
         }
 
 
         try {
         try {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

@@ -48,9 +48,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -224,7 +224,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
    * after are not atomic.
    * after are not atomic.
    */
    */
   public void waitForRescanIfNeeded() {
   public void waitForRescanIfNeeded() {
-    Preconditions.checkArgument(!namesystem.hasWriteLock(FSNamesystemLockMode.FS),
+    Preconditions.checkArgument(!namesystem.hasWriteLock(RwLockMode.FS),
         "Must not hold the FSN write lock when waiting for a rescan.");
         "Must not hold the FSN write lock when waiting for a rescan.");
     Preconditions.checkArgument(lock.isHeldByCurrentThread(),
     Preconditions.checkArgument(lock.isHeldByCurrentThread(),
         "Must hold the CRM lock when waiting for a rescan.");
         "Must hold the CRM lock when waiting for a rescan.");
@@ -269,7 +269,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
    */
    */
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
-    Preconditions.checkArgument(namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    Preconditions.checkArgument(namesystem.hasWriteLock(RwLockMode.GLOBAL));
     lock.lock();
     lock.lock();
     try {
     try {
       if (shutdown) return;
       if (shutdown) return;
@@ -292,7 +292,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     scannedBlocks = 0;
     scannedBlocks = 0;
     lastScanTimeMs = Time.monotonicNow();
     lastScanTimeMs = Time.monotonicNow();
     try {
     try {
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       try {
       try {
         lock.lock();
         lock.lock();
         if (shutdown) {
         if (shutdown) {
@@ -309,7 +309,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
       rescanCachedBlockMap();
       rescanCachedBlockMap();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "cacheReplicationMonitorRescan");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "cacheReplicationMonitorRescan");
     }
     }
   }
   }
 
 
@@ -326,11 +326,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     long now = Time.monotonicNow();
     long now = Time.monotonicNow();
     if (now - last > cacheManager.getMaxLockTimeMs()) {
     if (now - last > cacheManager.getMaxLockTimeMs()) {
       try {
       try {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "cacheReplicationMonitorRescan");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "cacheReplicationMonitorRescan");
         Thread.sleep(cacheManager.getSleepTimeMs());
         Thread.sleep(cacheManager.getSleepTimeMs());
       } catch (InterruptedException e) {
       } catch (InterruptedException e) {
       } finally {
       } finally {
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
       }
     }
     }
   }
   }

+ 19 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -25,6 +24,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -171,7 +171,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
     numBlocksChecked = 0;
     numBlocksChecked = 0;
     // Check decommission or maintenance progress.
     // Check decommission or maintenance progress.
     try {
     try {
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
       try {
         /**
         /**
          * Other threads can modify the pendingNode list and the cancelled
          * Other threads can modify the pendingNode list and the cancelled
@@ -209,7 +209,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
 
 
         processPendingNodes();
         processPendingNodes();
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "DatanodeAdminMonitorV2Thread");
+        namesystem.writeUnlock(RwLockMode.BM, "DatanodeAdminMonitorV2Thread");
       }
       }
       // After processing the above, various parts of the check() method will
       // After processing the above, various parts of the check() method will
       // take and drop the read / write lock as needed. Aside from the
       // take and drop the read / write lock as needed. Aside from the
@@ -327,7 +327,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
    */
    */
   private void processMaintenanceNodes() {
   private void processMaintenanceNodes() {
     // Check for any maintenance state nodes which need to be expired
     // Check for any maintenance state nodes which need to be expired
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       for (DatanodeDescriptor dn : outOfServiceNodeBlocks.keySet()) {
       for (DatanodeDescriptor dn : outOfServiceNodeBlocks.keySet()) {
         if (dn.isMaintenance() && dn.maintenanceExpired()) {
         if (dn.isMaintenance() && dn.maintenanceExpired()) {
@@ -339,12 +339,12 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           // which added the node to the cancelled list. Therefore expired
           // which added the node to the cancelled list. Therefore expired
           // maintenance nodes do not need to be added to the toRemove list.
           // maintenance nodes do not need to be added to the toRemove list.
           dnAdmin.stopMaintenance(dn);
           dnAdmin.stopMaintenance(dn);
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMaintenanceNodes");
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "processMaintenanceNodes");
+          namesystem.writeLock(RwLockMode.GLOBAL);
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMaintenanceNodes");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processMaintenanceNodes");
     }
     }
   }
   }
 
 
@@ -361,7 +361,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
       // taking the write lock at all.
       // taking the write lock at all.
       return;
       return;
     }
     }
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       for (DatanodeDescriptor dn : toRemove) {
       for (DatanodeDescriptor dn : toRemove) {
         final boolean isHealthy =
         final boolean isHealthy =
@@ -403,7 +403,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processCompletedNodes");
+      namesystem.writeUnlock(RwLockMode.BM, "processCompletedNodes");
     }
     }
   }
   }
 
 
@@ -487,7 +487,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
       return;
       return;
     }
     }
 
 
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       long repQueueSize = blockManager.getLowRedundancyBlocksCount();
       long repQueueSize = blockManager.getLowRedundancyBlocksCount();
 
 
@@ -525,8 +525,8 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           // replication
           // replication
           if (blocksProcessed >= blocksPerLock) {
           if (blocksProcessed >= blocksPerLock) {
             blocksProcessed = 0;
             blocksProcessed = 0;
-            namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "moveBlocksToPending");
-            namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+            namesystem.writeUnlock(RwLockMode.GLOBAL, "moveBlocksToPending");
+            namesystem.writeLock(RwLockMode.GLOBAL);
           }
           }
           blocksProcessed++;
           blocksProcessed++;
           if (nextBlockAddedToPending(blockIt, dn)) {
           if (nextBlockAddedToPending(blockIt, dn)) {
@@ -547,7 +547,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
         }
         }
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "moveBlocksToPending");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "moveBlocksToPending");
     }
     }
     LOG.debug("{} blocks are now pending replication", pendingCount);
     LOG.debug("{} blocks are now pending replication", pendingCount);
   }
   }
@@ -627,16 +627,16 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
     }
     }
 
 
     DatanodeStorageInfo[] storage;
     DatanodeStorageInfo[] storage;
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
     try {
       storage = dn.getStorageInfos();
       storage = dn.getStorageInfos();
     } finally {
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "scanDatanodeStorage");
+      namesystem.readUnlock(RwLockMode.BM, "scanDatanodeStorage");
     }
     }
 
 
     for (DatanodeStorageInfo s : storage) {
     for (DatanodeStorageInfo s : storage) {
       // isBlockReplicatedOk involves FS.
       // isBlockReplicatedOk involves FS.
-      namesystem.readLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.readLock(RwLockMode.GLOBAL);
       try {
       try {
         // As the lock is dropped and re-taken between each storage, we need
         // As the lock is dropped and re-taken between each storage, we need
         // to check the storage is still present before processing it, as it
         // to check the storage is still present before processing it, as it
@@ -662,7 +662,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           numBlocksChecked++;
           numBlocksChecked++;
         }
         }
       } finally {
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.GLOBAL, "scanDatanodeStorage");
+        namesystem.readUnlock(RwLockMode.GLOBAL, "scanDatanodeStorage");
       }
       }
     }
     }
   }
   }
@@ -685,7 +685,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
    * namenode write lock while it runs.
    * namenode write lock while it runs.
    */
    */
   private void processPendingReplication() {
   private void processPendingReplication() {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       for (Iterator<Map.Entry<DatanodeDescriptor, List<BlockInfo>>>
       for (Iterator<Map.Entry<DatanodeDescriptor, List<BlockInfo>>>
            entIt = pendingRep.entrySet().iterator(); entIt.hasNext();) {
            entIt = pendingRep.entrySet().iterator(); entIt.hasNext();) {
@@ -717,7 +717,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
             suspectBlocks.getOutOfServiceBlockCount());
             suspectBlocks.getOutOfServiceBlockCount());
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processPendingReplication");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processPendingReplication");
     }
     }
   }
   }
 
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -27,6 +26,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -185,7 +185,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
     // Check decommission or maintenance progress.
     // Check decommission or maintenance progress.
     // dnAdmin.stopMaintenance(dn) needs FSReadLock
     // dnAdmin.stopMaintenance(dn) needs FSReadLock
     // since processExtraRedundancyBlock involves storage policy and isSufficient involves bc.
     // since processExtraRedundancyBlock involves storage policy and isSufficient involves bc.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       processCancelledNodes();
       processCancelledNodes();
       processPendingNodes();
       processPendingNodes();
@@ -194,7 +194,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
       LOG.warn("DatanodeAdminMonitor caught exception when processing node.",
       LOG.warn("DatanodeAdminMonitor caught exception when processing node.",
           e);
           e);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "DatanodeAdminMonitorThread");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "DatanodeAdminMonitorThread");
     }
     }
     if (numBlocksChecked + numNodesChecked > 0) {
     if (numBlocksChecked + numNodesChecked > 0) {
       LOG.info("Checked {} blocks and {} nodes this tick. {} nodes are now " +
       LOG.info("Checked {} blocks and {} nodes this tick. {} nodes are now " +
@@ -429,7 +429,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
         // lock.
         // lock.
         // Yielding is required in case of block number is greater than the
         // Yielding is required in case of block number is greater than the
         // configured per-iteration-limit.
         // configured per-iteration-limit.
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processBlocksInternal");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "processBlocksInternal");
         try {
         try {
           LOG.debug("Yielded lock during decommission/maintenance check");
           LOG.debug("Yielded lock during decommission/maintenance check");
           Thread.sleep(0, 500);
           Thread.sleep(0, 500);
@@ -438,7 +438,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
         }
         }
         // reset
         // reset
         numBlocksCheckedPerLock = 0;
         numBlocksCheckedPerLock = 0;
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
       }
       numBlocksChecked++;
       numBlocksChecked++;
       numBlocksCheckedPerLock++;
       numBlocksCheckedPerLock++;

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -22,7 +22,6 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -51,6 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@@ -864,7 +864,7 @@ public class DatanodeManager {
    */
    */
   private void removeDatanode(DatanodeDescriptor nodeInfo,
   private void removeDatanode(DatanodeDescriptor nodeInfo,
       boolean removeBlocksFromBlocksMap) {
       boolean removeBlocksFromBlocksMap) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     heartbeatManager.removeDatanode(nodeInfo);
     heartbeatManager.removeDatanode(nodeInfo);
     if (removeBlocksFromBlocksMap) {
     if (removeBlocksFromBlocksMap) {
       blockManager.removeBlocksAssociatedTo(nodeInfo);
       blockManager.removeBlocksAssociatedTo(nodeInfo);
@@ -883,7 +883,7 @@ public class DatanodeManager {
    */
    */
   public void removeDatanode(final DatanodeID node)
   public void removeDatanode(final DatanodeID node)
       throws UnregisteredNodeException {
       throws UnregisteredNodeException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       final DatanodeDescriptor descriptor = getDatanode(node);
       final DatanodeDescriptor descriptor = getDatanode(node);
       if (descriptor != null) {
       if (descriptor != null) {
@@ -893,7 +893,7 @@ public class DatanodeManager {
                                      + node + " does not exist");
                                      + node + " does not exist");
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDatanode");
+      namesystem.writeUnlock(RwLockMode.BM, "removeDatanode");
     }
     }
   }
   }
 
 
@@ -1344,12 +1344,12 @@ public class DatanodeManager {
   public void refreshNodes(final Configuration conf) throws IOException {
   public void refreshNodes(final Configuration conf) throws IOException {
     refreshHostsReader(conf);
     refreshHostsReader(conf);
     // processExtraRedundancyBlocksOnInService involves FS in stopMaintenance and stopDecommission.
     // processExtraRedundancyBlocksOnInService involves FS in stopMaintenance and stopDecommission.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       refreshDatanodes();
       refreshDatanodes();
       countSoftwareVersions();
       countSoftwareVersions();
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "refreshNodes");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "refreshNodes");
     }
     }
   }
   }
 
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -28,9 +28,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -515,20 +515,20 @@ class HeartbeatManager implements DatanodeStatistics {
 
 
       for (DatanodeDescriptor dead : deadDatanodes) {
       for (DatanodeDescriptor dead : deadDatanodes) {
         // acquire the fsnamesystem lock, and then remove the dead node.
         // acquire the fsnamesystem lock, and then remove the dead node.
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
         try {
           dm.removeDeadDatanode(dead, !dead.isMaintenance());
           dm.removeDeadDatanode(dead, !dead.isMaintenance());
         } finally {
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDeadDatanode");
+          namesystem.writeUnlock(RwLockMode.BM, "removeDeadDatanode");
         }
         }
       }
       }
       for (DatanodeStorageInfo failedStorage : failedStorages) {
       for (DatanodeStorageInfo failedStorage : failedStorages) {
         // acquire the fsnamesystem lock, and remove blocks on the storage.
         // acquire the fsnamesystem lock, and remove blocks on the storage.
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
         try {
           blockManager.removeBlocksAssociatedTo(failedStorage);
           blockManager.removeBlocksAssociatedTo(failedStorage);
         } finally {
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBlocksAssociatedTo");
+          namesystem.writeUnlock(RwLockMode.BM, "removeBlocksAssociatedTo");
         }
         }
       }
       }
     }
     }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java

@@ -45,10 +45,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
 import org.apache.hadoop.hdfs.server.common.BlockAlias;
 import org.apache.hadoop.hdfs.server.common.BlockAlias;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.hdfs.util.RwLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 
 
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -145,7 +145,7 @@ public class ProvidedStorageMap {
 
 
   private void processProvidedStorageReport()
   private void processProvidedStorageReport()
       throws IOException {
       throws IOException {
-    assert lock.hasWriteLock(FSNamesystemLockMode.GLOBAL) : "Not holding write lock";
+    assert lock.hasWriteLock(RwLockMode.GLOBAL) : "Not holding write lock";
     if (providedStorageInfo.getBlockReportCount() == 0
     if (providedStorageInfo.getBlockReportCount() == 0
         || providedDescriptor.activeProvidedDatanodes() == 0) {
         || providedDescriptor.activeProvidedDatanodes() == 0) {
       LOG.info("Calling process first blk report from storage: "
       LOG.info("Calling process first blk report from storage: "
@@ -174,7 +174,7 @@ public class ProvidedStorageMap {
 
 
   public void removeDatanode(DatanodeDescriptor dnToRemove) {
   public void removeDatanode(DatanodeDescriptor dnToRemove) {
     if (providedEnabled) {
     if (providedEnabled) {
-      assert lock.hasWriteLock(FSNamesystemLockMode.BM) : "Not holding write lock";
+      assert lock.hasWriteLock(RwLockMode.BM) : "Not holding write lock";
       providedDescriptor.remove(dnToRemove);
       providedDescriptor.remove(dnToRemove);
       // if all datanodes fail, set the block report count to 0
       // if all datanodes fail, set the block report count to 0
       if (providedDescriptor.activeProvidedDatanodes() == 0) {
       if (providedDescriptor.activeProvidedDatanodes() == 0) {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -219,11 +219,11 @@ public class BackupImage extends FSImage {
       }
       }
       lastAppliedTxId = logLoader.getLastAppliedTxId();
       lastAppliedTxId = logLoader.getLastAppliedTxId();
 
 
-      getNamesystem().writeLock(FSNamesystemLockMode.FS);
+      getNamesystem().writeLock(RwLockMode.FS);
       try {
       try {
         getNamesystem().dir.updateCountForQuota();
         getNamesystem().dir.updateCountForQuota();
       } finally {
       } finally {
-        getNamesystem().writeUnlock(FSNamesystemLockMode.FS, "applyEdits");
+        getNamesystem().writeUnlock(RwLockMode.FS, "applyEdits");
       }
       }
     } finally {
     } finally {
       backupInputStream.clear();
       backupInputStream.clear();

+ 17 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBl
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
@@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Co
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.GSet;
@@ -318,7 +318,7 @@ public class CacheManager {
   }
   }
 
 
   public void clearDirectiveStats() {
   public void clearDirectiveStats() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     for (CacheDirective directive : directivesById.values()) {
     for (CacheDirective directive : directivesById.values()) {
       directive.resetStatistics();
       directive.resetStatistics();
     }
     }
@@ -328,7 +328,7 @@ public class CacheManager {
    * @return Unmodifiable view of the collection of CachePools.
    * @return Unmodifiable view of the collection of CachePools.
    */
    */
   public Collection<CachePool> getCachePools() {
   public Collection<CachePool> getCachePools() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     return Collections.unmodifiableCollection(cachePools.values());
     return Collections.unmodifiableCollection(cachePools.values());
   }
   }
 
 
@@ -336,18 +336,18 @@ public class CacheManager {
    * @return Unmodifiable view of the collection of CacheDirectives.
    * @return Unmodifiable view of the collection of CacheDirectives.
    */
    */
   public Collection<CacheDirective> getCacheDirectives() {
   public Collection<CacheDirective> getCacheDirectives() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     return Collections.unmodifiableCollection(directivesById.values());
     return Collections.unmodifiableCollection(directivesById.values());
   }
   }
   
   
   @VisibleForTesting
   @VisibleForTesting
   public GSet<CachedBlock, CachedBlock> getCachedBlocks() {
   public GSet<CachedBlock, CachedBlock> getCachedBlocks() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     return cachedBlocks;
     return cachedBlocks;
   }
   }
 
 
   private long getNextDirectiveId() throws IOException {
   private long getNextDirectiveId() throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     if (nextDirectiveId >= Long.MAX_VALUE - 1) {
     if (nextDirectiveId >= Long.MAX_VALUE - 1) {
       throw new IOException("No more available IDs.");
       throw new IOException("No more available IDs.");
     }
     }
@@ -575,7 +575,7 @@ public class CacheManager {
   public CacheDirectiveInfo addDirective(
   public CacheDirectiveInfo addDirective(
       CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
       CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
       throws IOException {
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     CacheDirective directive;
     CacheDirective directive;
     try {
     try {
       CachePool pool = getCachePool(validatePoolName(info));
       CachePool pool = getCachePool(validatePoolName(info));
@@ -653,7 +653,7 @@ public class CacheManager {
 
 
   public void modifyDirective(CacheDirectiveInfo info,
   public void modifyDirective(CacheDirectiveInfo info,
       FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
       FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     String idString =
     String idString =
         (info.getId() == null) ?
         (info.getId() == null) ?
             "(null)" : info.getId().toString();
             "(null)" : info.getId().toString();
@@ -704,7 +704,7 @@ public class CacheManager {
 
 
   private void removeInternal(CacheDirective directive)
   private void removeInternal(CacheDirective directive)
       throws InvalidRequestException {
       throws InvalidRequestException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     // Remove the corresponding entry in directivesByPath.
     // Remove the corresponding entry in directivesByPath.
     String path = directive.getPath();
     String path = directive.getPath();
     if (!directivesByPath.remove(path, directive)) {
     if (!directivesByPath.remove(path, directive)) {
@@ -725,7 +725,7 @@ public class CacheManager {
 
 
   public void removeDirective(long id, FSPermissionChecker pc)
   public void removeDirective(long id, FSPermissionChecker pc)
       throws IOException {
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     try {
     try {
       CacheDirective directive = getById(id);
       CacheDirective directive = getById(id);
       checkWritePermission(pc, directive.getPool());
       checkWritePermission(pc, directive.getPool());
@@ -741,7 +741,7 @@ public class CacheManager {
         listCacheDirectives(long prevId,
         listCacheDirectives(long prevId,
             CacheDirectiveInfo filter,
             CacheDirectiveInfo filter,
             FSPermissionChecker pc) throws IOException {
             FSPermissionChecker pc) throws IOException {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     String filterPath = null;
     String filterPath = null;
     if (filter.getPath() != null) {
     if (filter.getPath() != null) {
@@ -816,7 +816,7 @@ public class CacheManager {
    */
    */
   public CachePoolInfo addCachePool(CachePoolInfo info)
   public CachePoolInfo addCachePool(CachePoolInfo info)
       throws IOException {
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     CachePool pool;
     CachePool pool;
     try {
     try {
       CachePoolInfo.validate(info);
       CachePoolInfo.validate(info);
@@ -846,7 +846,7 @@ public class CacheManager {
    */
    */
   public void modifyCachePool(CachePoolInfo info)
   public void modifyCachePool(CachePoolInfo info)
       throws IOException {
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     StringBuilder bld = new StringBuilder();
     StringBuilder bld = new StringBuilder();
     try {
     try {
       CachePoolInfo.validate(info);
       CachePoolInfo.validate(info);
@@ -916,7 +916,7 @@ public class CacheManager {
    */
    */
   public void removeCachePool(String poolName)
   public void removeCachePool(String poolName)
       throws IOException {
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     try {
     try {
       CachePoolInfo.validateName(poolName);
       CachePoolInfo.validateName(poolName);
       CachePool pool = cachePools.remove(poolName);
       CachePool pool = cachePools.remove(poolName);
@@ -942,7 +942,7 @@ public class CacheManager {
 
 
   public BatchedListEntries<CachePoolEntry>
   public BatchedListEntries<CachePoolEntry>
       listCachePools(FSPermissionChecker pc, String prevKey) {
       listCachePools(FSPermissionChecker pc, String prevKey) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     ArrayList<CachePoolEntry> results = 
     ArrayList<CachePoolEntry> results = 
         new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
         new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
@@ -1009,7 +1009,7 @@ public class CacheManager {
           datanodeID, DFS_NAMENODE_CACHING_ENABLED_KEY, blockIds.size());
           datanodeID, DFS_NAMENODE_CACHING_ENABLED_KEY, blockIds.size());
       return;
       return;
     }
     }
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     final long startTime = Time.monotonicNow();
     final long startTime = Time.monotonicNow();
     final long endTime;
     final long endTime;
     try {
     try {
@@ -1023,7 +1023,7 @@ public class CacheManager {
       processCacheReportImpl(datanode, blockIds);
       processCacheReportImpl(datanode, blockIds);
     } finally {
     } finally {
       endTime = Time.monotonicNow();
       endTime = Time.monotonicNow();
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processCacheReport");
+      namesystem.writeUnlock(RwLockMode.BM, "processCacheReport");
     }
     }
 
 
     // Log the block report processing stats from Namenode perspective
     // Log the block report processing stats from Namenode perspective

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -32,12 +32,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
@@ -245,14 +245,14 @@ class Checkpointer extends Daemon {
 
 
       if(needReloadImage) {
       if(needReloadImage) {
         LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
         LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
-        backupNode.namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        backupNode.namesystem.writeLock(RwLockMode.GLOBAL);
         try {
         try {
           File file = bnStorage.findImageFile(NameNodeFile.IMAGE,
           File file = bnStorage.findImageFile(NameNodeFile.IMAGE,
               sig.mostRecentCheckpointTxId);
               sig.mostRecentCheckpointTxId);
           bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
           bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
         } finally {
         } finally {
           backupNode.namesystem.writeUnlock(
           backupNode.namesystem.writeUnlock(
-              FSNamesystemLockMode.GLOBAL, "doCheckpointByBackupNode");
+              RwLockMode.GLOBAL, "doCheckpointByBackupNode");
         }
         }
       }
       }
       rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
       rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
@@ -260,7 +260,7 @@ class Checkpointer extends Daemon {
     
     
     long txid = bnImage.getLastAppliedTxId();
     long txid = bnImage.getLastAppliedTxId();
     
     
-    backupNode.namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    backupNode.namesystem.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       backupNode.namesystem.setImageLoaded();
       backupNode.namesystem.setImageLoaded();
       if(backupNode.namesystem.getBlocksTotal() > 0) {
       if(backupNode.namesystem.getBlocksTotal() > 0) {
@@ -274,7 +274,7 @@ class Checkpointer extends Daemon {
         bnImage.updateStorageVersion();
         bnImage.updateStorageVersion();
       }
       }
     } finally {
     } finally {
-      backupNode.namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "doCheckpoint");
+      backupNode.namesystem.writeUnlock(RwLockMode.GLOBAL, "doCheckpoint");
     }
     }
 
 
     if(cpCmd.needToReturnImage()) {
     if(cpCmd.needToReturnImage()) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java

@@ -17,12 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttr;
@@ -119,8 +119,8 @@ public class ContentSummaryComputationContext {
 
 
     boolean hadDirReadLock = dir.hasReadLock();
     boolean hadDirReadLock = dir.hasReadLock();
     boolean hadDirWriteLock = dir.hasWriteLock();
     boolean hadDirWriteLock = dir.hasWriteLock();
-    boolean hadFsnReadLock = fsn.hasReadLock(FSNamesystemLockMode.GLOBAL);
-    boolean hadFsnWriteLock = fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    boolean hadFsnReadLock = fsn.hasReadLock(RwLockMode.GLOBAL);
+    boolean hadFsnWriteLock = fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     // sanity check.
     // sanity check.
     if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock ||
     if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock ||
@@ -131,14 +131,14 @@ public class ContentSummaryComputationContext {
 
 
     // unlock
     // unlock
     dir.readUnlock();
     dir.readUnlock();
-    fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "contentSummary");
+    fsn.readUnlock(RwLockMode.GLOBAL, "contentSummary");
 
 
     try {
     try {
       Thread.sleep(sleepMilliSec, sleepNanoSec);
       Thread.sleep(sleepMilliSec, sleepNanoSec);
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
     } finally {
     } finally {
       // reacquire
       // reacquire
-      fsn.readLock(FSNamesystemLockMode.GLOBAL);
+      fsn.readLock(RwLockMode.GLOBAL);
       dir.readLock();
       dir.readLock();
     }
     }
     yieldCount++;
     yieldCount++;

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java

@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 
 
@@ -189,12 +189,12 @@ public class EncryptionZoneManager {
       final int count) throws IOException {
       final int count) throws IOException {
     INodesInPath iip;
     INodesInPath iip;
     final FSPermissionChecker pc = dir.getPermissionChecker();
     final FSPermissionChecker pc = dir.getPermissionChecker();
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     try {
     try {
       iip = dir.resolvePath(pc, zone, DirOp.READ);
       iip = dir.resolvePath(pc, zone, DirOp.READ);
     } finally {
     } finally {
       dir.getFSNamesystem().readUnlock(
       dir.getFSNamesystem().readUnlock(
-          FSNamesystemLockMode.FS, "pauseForTestingAfterNthCheckpoint");
+          RwLockMode.FS, "pauseForTestingAfterNthCheckpoint");
     }
     }
     reencryptionHandler
     reencryptionHandler
         .pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count);
         .pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count);
@@ -215,7 +215,7 @@ public class EncryptionZoneManager {
       throws IOException {
       throws IOException {
     final FSPermissionChecker pc = dir.getPermissionChecker();
     final FSPermissionChecker pc = dir.getPermissionChecker();
     final INode inode;
     final INode inode;
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     dir.readLock();
     dir.readLock();
     try {
     try {
       final INodesInPath iip = dir.resolvePath(pc, zone, DirOp.READ);
       final INodesInPath iip = dir.resolvePath(pc, zone, DirOp.READ);
@@ -226,7 +226,7 @@ public class EncryptionZoneManager {
       return getReencryptionStatus().getZoneStatus(inode.getId());
       return getReencryptionStatus().getZoneStatus(inode.getId());
     } finally {
     } finally {
       dir.readUnlock();
       dir.readUnlock();
-      dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "getZoneStatus");
+      dir.getFSNamesystem().readUnlock(RwLockMode.FS, "getZoneStatus");
     }
     }
   }
   }
 
 
@@ -283,11 +283,11 @@ public class EncryptionZoneManager {
     if (getProvider() == null || reencryptionHandler == null) {
     if (getProvider() == null || reencryptionHandler == null) {
       return;
       return;
     }
     }
-    dir.getFSNamesystem().writeLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().writeLock(RwLockMode.FS);
     try {
     try {
       reencryptionHandler.stopThreads();
       reencryptionHandler.stopThreads();
     } finally {
     } finally {
-      dir.getFSNamesystem().writeUnlock(FSNamesystemLockMode.FS, "stopReencryptThread");
+      dir.getFSNamesystem().writeUnlock(RwLockMode.FS, "stopReencryptThread");
     }
     }
     if (reencryptHandlerExecutor != null) {
     if (reencryptHandlerExecutor != null) {
       reencryptHandlerExecutor.shutdownNow();
       reencryptHandlerExecutor.shutdownNow();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetriableException;
 
 
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
@@ -83,7 +83,7 @@ final class FSDirAppendOp {
       final String srcArg, final FSPermissionChecker pc, final String holder,
       final String srcArg, final FSPermissionChecker pc, final String holder,
       final String clientMachine, final boolean newBlock,
       final String clientMachine, final boolean newBlock,
       final boolean logRetryCache) throws IOException {
       final boolean logRetryCache) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     final LocatedBlock lb;
     final LocatedBlock lb;
     final FSDirectory fsd = fsn.getFSDirectory();
     final FSDirectory fsd = fsn.getFSDirectory();
@@ -181,7 +181,7 @@ final class FSDirAppendOp {
       final String clientMachine, final boolean newBlock,
       final String clientMachine, final boolean newBlock,
       final boolean writeToEditLog, final boolean logRetryCache)
       final boolean writeToEditLog, final boolean logRetryCache)
       throws IOException {
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     final INodeFile file = iip.getLastINode().asFile();
     final INodeFile file = iip.getLastINode().asFile();
     final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);
     final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -172,7 +172,7 @@ class FSDirDeleteOp {
       FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       throws IOException {
       throws IOException {
     // Delete INode and modify BlockInfo
     // Delete INode and modify BlockInfo
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
     }
     }

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java

@@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -84,8 +84,8 @@ final class FSDirEncryptionZoneOp {
   private static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
   private static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
       final FSDirectory fsd, final String ezKeyName) throws IOException {
       final FSDirectory fsd, final String ezKeyName) throws IOException {
     // must not be holding lock during this operation
     // must not be holding lock during this operation
-    assert !fsd.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
-    assert !fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert !fsd.getFSNamesystem().hasReadLock(RwLockMode.FS);
+    assert !fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (ezKeyName == null) {
     if (ezKeyName == null) {
       return null;
       return null;
     }
     }
@@ -383,7 +383,7 @@ final class FSDirEncryptionZoneOp {
    */
    */
   static void saveFileXAttrsForBatch(FSDirectory fsd,
   static void saveFileXAttrsForBatch(FSDirectory fsd,
       List<FileEdekInfo> batch) {
       List<FileEdekInfo> batch) {
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (batch != null && !batch.isEmpty()) {
     if (batch != null && !batch.isEmpty()) {
       for (FileEdekInfo entry : batch) {
       for (FileEdekInfo entry : batch) {
         final INode inode = fsd.getInode(entry.getInodeId());
         final INode inode = fsd.getInode(entry.getInodeId());
@@ -657,13 +657,13 @@ final class FSDirEncryptionZoneOp {
     Preconditions.checkNotNull(ezKeyName);
     Preconditions.checkNotNull(ezKeyName);
 
 
     // Generate EDEK while not holding the fsn lock.
     // Generate EDEK while not holding the fsn lock.
-    fsn.writeUnlock(FSNamesystemLockMode.FS, "getEncryptionKeyInfo");
+    fsn.writeUnlock(RwLockMode.FS, "getEncryptionKeyInfo");
     try {
     try {
       EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
       EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
       return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName,
       return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName,
           generateEncryptedDataEncryptionKey(fsd, ezKeyName));
           generateEncryptedDataEncryptionKey(fsd, ezKeyName));
     } finally {
     } finally {
-      fsn.writeLock(FSNamesystemLockMode.FS);
+      fsn.writeLock(RwLockMode.FS);
       EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
       EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
     }
     }
   }
   }
@@ -728,13 +728,13 @@ final class FSDirEncryptionZoneOp {
       final FSPermissionChecker pc, final String zone) throws IOException {
       final FSPermissionChecker pc, final String zone) throws IOException {
     assert dir.getProvider() != null;
     assert dir.getProvider() != null;
     final INodesInPath iip;
     final INodesInPath iip;
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     try {
     try {
       iip = dir.resolvePath(pc, zone, DirOp.READ);
       iip = dir.resolvePath(pc, zone, DirOp.READ);
       dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), zone);
       dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), zone);
       return dir.ezManager.getKeyName(iip);
       return dir.ezManager.getKeyName(iip);
     } finally {
     } finally {
-      dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "getKeyNameForZone");
+      dir.getFSNamesystem().readUnlock(RwLockMode.FS, "getKeyNameForZone");
     }
     }
   }
   }
 }
 }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.NoECPolicySetException;
 import org.apache.hadoop.hdfs.protocol.NoECPolicySetException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.erasurecode.CodecRegistry;
 import org.apache.hadoop.io.erasurecode.CodecRegistry;
@@ -72,7 +72,7 @@ final class FSDirErasureCodingOp {
    */
    */
   static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
   static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
         .getEnabledPolicyByName(ecPolicyName);
         .getEnabledPolicyByName(ecPolicyName);
     if (ecPolicy == null) {
     if (ecPolicy == null) {
@@ -104,7 +104,7 @@ final class FSDirErasureCodingOp {
    */
    */
   static ErasureCodingPolicy getErasureCodingPolicyByName(
   static ErasureCodingPolicy getErasureCodingPolicyByName(
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
         .getErasureCodingPolicyByName(ecPolicyName);
         .getErasureCodingPolicyByName(ecPolicyName);
     if (ecPolicy == null) {
     if (ecPolicy == null) {
@@ -133,7 +133,7 @@ final class FSDirErasureCodingOp {
       final String srcArg, final String ecPolicyName,
       final String srcArg, final String ecPolicyName,
       final FSPermissionChecker pc, final boolean logRetryCache)
       final FSPermissionChecker pc, final boolean logRetryCache)
       throws IOException, AccessControlException {
       throws IOException, AccessControlException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
 
 
     String src = srcArg;
     String src = srcArg;
     FSDirectory fsd = fsn.getFSDirectory();
     FSDirectory fsd = fsn.getFSDirectory();
@@ -210,7 +210,7 @@ final class FSDirErasureCodingOp {
   static FileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
   static FileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
       final String srcArg, final FSPermissionChecker pc,
       final String srcArg, final FSPermissionChecker pc,
       final boolean logRetryCache) throws IOException {
       final boolean logRetryCache) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
 
 
     String src = srcArg;
     String src = srcArg;
     FSDirectory fsd = fsn.getFSDirectory();
     FSDirectory fsd = fsn.getFSDirectory();
@@ -354,7 +354,7 @@ final class FSDirErasureCodingOp {
   static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
   static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
       final String src, FSPermissionChecker pc)
       final String src, FSPermissionChecker pc)
       throws IOException, AccessControlException {
       throws IOException, AccessControlException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
 
 
     if (FSDirectory.isExactReservedName(src)) {
     if (FSDirectory.isExactReservedName(src)) {
       return null;
       return null;
@@ -417,7 +417,7 @@ final class FSDirErasureCodingOp {
    */
    */
   static ErasureCodingPolicy unprotectedGetErasureCodingPolicy(
   static ErasureCodingPolicy unprotectedGetErasureCodingPolicy(
       final FSNamesystem fsn, final INodesInPath iip) throws IOException {
       final FSNamesystem fsn, final INodesInPath iip) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
 
 
     return getErasureCodingPolicyForPath(fsn.getFSDirectory(), iip);
     return getErasureCodingPolicyForPath(fsn.getFSDirectory(), iip);
   }
   }
@@ -430,7 +430,7 @@ final class FSDirErasureCodingOp {
    */
    */
   static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
   static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
       final FSNamesystem fsn) throws IOException {
       final FSNamesystem fsn) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     return fsn.getErasureCodingPolicyManager().getPolicies();
     return fsn.getErasureCodingPolicyManager().getPolicies();
   }
   }
 
 
@@ -442,7 +442,7 @@ final class FSDirErasureCodingOp {
    */
    */
   static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
   static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
       throws IOException {
       throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     return CodecRegistry.getInstance().getCodec2CoderCompactMap();
     return CodecRegistry.getInstance().getCodec2CoderCompactMap();
   }
   }
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java

@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 
 
 /**
 /**
@@ -65,7 +65,7 @@ final class FSDirSatisfyStoragePolicyOp {
   static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
   static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src, boolean logRetryCache) throws IOException {
       String src, boolean logRetryCache) throws IOException {
 
 
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     FSPermissionChecker pc = fsd.getPermissionChecker();
     FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     INodesInPath iip;
     fsd.writeLock();
     fsd.writeLock();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

@@ -18,7 +18,6 @@
 
 
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 
 
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
@@ -447,7 +447,7 @@ class FSDirStatAndListingOp {
       }
       }
       // ComputeFileSize and needLocation need BM lock.
       // ComputeFileSize and needLocation need BM lock.
       if (needLocation) {
       if (needLocation) {
-        fsd.getFSNamesystem().readLock(FSNamesystemLockMode.BM);
+        fsd.getFSNamesystem().readLock(RwLockMode.BM);
         try {
         try {
           final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
           final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
           final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
           final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
@@ -460,7 +460,7 @@ class FSDirStatAndListingOp {
             loc = new LocatedBlocks();
             loc = new LocatedBlocks();
           }
           }
         } finally {
         } finally {
-          fsd.getFSNamesystem().readUnlock(FSNamesystemLockMode.BM, "createFileStatus");
+          fsd.getFSNamesystem().readUnlock(RwLockMode.BM, "createFileStatus");
         }
         }
       }
       }
     } else if (node.isDirectory()) {
     } else if (node.isDirectory()) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java

@@ -38,9 +38,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 
 
 /**
 /**
  * Helper class to perform truncate operation.
  * Helper class to perform truncate operation.
@@ -72,7 +72,7 @@ final class FSDirTruncateOp {
       final String clientMachine, final long mtime,
       final String clientMachine, final long mtime,
       final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
       final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     FSDirectory fsd = fsn.getFSDirectory();
     FSDirectory fsd = fsn.getFSDirectory();
     final String src;
     final String src;
@@ -176,7 +176,7 @@ final class FSDirTruncateOp {
       final long newLength, final long mtime, final Block truncateBlock)
       final long newLength, final long mtime, final Block truncateBlock)
       throws UnresolvedLinkException, QuotaExceededException,
       throws UnresolvedLinkException, QuotaExceededException,
       SnapshotAccessControlException, IOException {
       SnapshotAccessControlException, IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     FSDirectory fsd = fsn.getFSDirectory();
     FSDirectory fsd = fsn.getFSDirectory();
     INodeFile file = iip.getLastINode().asFile();
     INodeFile file = iip.getLastINode().asFile();
@@ -220,7 +220,7 @@ final class FSDirTruncateOp {
   static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip,
   static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip,
       String leaseHolder, String clientMachine, long lastBlockDelta,
       String leaseHolder, String clientMachine, long lastBlockDelta,
       Block newBlock) throws IOException {
       Block newBlock) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     INodeFile file = iip.getLastINode().asFile();
     INodeFile file = iip.getLastINode().asFile();
     assert !file.isStriped();
     assert !file.isStriped();
@@ -304,7 +304,7 @@ final class FSDirTruncateOp {
   private static boolean unprotectedTruncate(FSNamesystem fsn,
   private static boolean unprotectedTruncate(FSNamesystem fsn,
       INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks,
       INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks,
       long mtime, QuotaCounts delta) throws IOException {
       long mtime, QuotaCounts delta) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
 
     INodeFile file = iip.getLastINode().asFile();
     INodeFile file = iip.getLastINode().asFile();
     int latestSnapshot = iip.getLatestSnapshotId();
     int latestSnapshot = iip.getLatestSnapshotId();

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
@@ -105,7 +105,7 @@ class FSDirWriteFileOp {
    */
    */
   static void persistBlocks(
   static void persistBlocks(
       FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
       FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     Preconditions.checkArgument(file.isUnderConstruction());
     Preconditions.checkArgument(file.isUnderConstruction());
     fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
     fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
     if(NameNode.stateChangeLog.isDebugEnabled()) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
@@ -364,7 +364,7 @@ class FSDirWriteFileOp {
       boolean shouldReplicate, String ecPolicyName, String storagePolicy,
       boolean shouldReplicate, String ecPolicyName, String storagePolicy,
       boolean logRetryEntry)
       boolean logRetryEntry)
       throws IOException {
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
     boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
     boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
 
 
@@ -372,7 +372,7 @@ class FSDirWriteFileOp {
     FSDirectory fsd = fsn.getFSDirectory();
     FSDirectory fsd = fsn.getFSDirectory();
 
 
     if (iip.getLastINode() != null) {
     if (iip.getLastINode() != null) {
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
       try {
         if (overwrite) {
         if (overwrite) {
           List<INode> toRemoveINodes = new ChunkedArrayList<>();
           List<INode> toRemoveINodes = new ChunkedArrayList<>();
@@ -392,7 +392,7 @@ class FSDirWriteFileOp {
               clientMachine + " already exists");
               clientMachine + " already exists");
         }
         }
       } finally {
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "create");
+        fsn.writeUnlock(RwLockMode.BM, "create");
       }
       }
     }
     }
     fsn.checkFsObjectLimit();
     fsn.checkFsObjectLimit();
@@ -602,7 +602,7 @@ class FSDirWriteFileOp {
       FSNamesystem fsn, INodesInPath iip, long fileId, String clientName,
       FSNamesystem fsn, INodesInPath iip, long fileId, String clientName,
       ExtendedBlock previous, LocatedBlock[] onRetryBlock)
       ExtendedBlock previous, LocatedBlock[] onRetryBlock)
       throws IOException {
       throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasReadLock(RwLockMode.GLOBAL);
     String src = iip.getPath();
     String src = iip.getPath();
     checkBlock(fsn, previous);
     checkBlock(fsn, previous);
     onRetryBlock[0] = null;
     onRetryBlock[0] = null;
@@ -700,7 +700,7 @@ class FSDirWriteFileOp {
       FSNamesystem fsn, INodesInPath iip,
       FSNamesystem fsn, INodesInPath iip,
       String holder, Block last, long fileId)
       String holder, Block last, long fileId)
       throws IOException {
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     final String src = iip.getPath();
     final String src = iip.getPath();
     final INodeFile pendingFile;
     final INodeFile pendingFile;
     INode inode = null;
     INode inode = null;
@@ -784,7 +784,7 @@ class FSDirWriteFileOp {
   static void saveAllocatedBlock(FSNamesystem fsn, String src,
   static void saveAllocatedBlock(FSNamesystem fsn, String src,
       INodesInPath inodesInPath, Block newBlock, DatanodeStorageInfo[] targets,
       INodesInPath inodesInPath, Block newBlock, DatanodeStorageInfo[] targets,
       BlockType blockType) throws IOException {
       BlockType blockType) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock, targets,
     BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock, targets,
         blockType);
         blockType);
     logAllocatedBlock(src, b);
     logAllocatedBlock(src, b);

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -64,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -263,31 +263,31 @@ public class FSDirectory implements Closeable {
    * remain as placeholders only
    * remain as placeholders only
    */
    */
   void readLock() {
   void readLock() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasReadLock(RwLockMode.FS) :
         "Should hold read lock of namesystem FSLock";
         "Should hold read lock of namesystem FSLock";
   }
   }
 
 
   void readUnlock() {
   void readUnlock() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasReadLock(RwLockMode.FS) :
         "Should hold read lock of namesystem FSLock";
         "Should hold read lock of namesystem FSLock";
   }
   }
 
 
   void writeLock() {
   void writeLock() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasWriteLock(RwLockMode.FS) :
         "Should hold write lock of namesystem FSLock";
         "Should hold write lock of namesystem FSLock";
   }
   }
 
 
   void writeUnlock() {
   void writeUnlock() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasWriteLock(RwLockMode.FS) :
         "Should hold write lock of namesystem FSLock";
         "Should hold write lock of namesystem FSLock";
   }
   }
 
 
   boolean hasWriteLock() {
   boolean hasWriteLock() {
-    return namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    return namesystem.hasWriteLock(RwLockMode.FS);
   }
   }
 
 
   boolean hasReadLock() {
   boolean hasReadLock() {
-    return namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    return namesystem.hasReadLock(RwLockMode.FS);
   }
   }
 
 
   public int getListLimit() {
   public int getListLimit() {
@@ -1106,7 +1106,7 @@ public class FSDirectory implements Closeable {
    */
    */
   public void updateSpaceForCompleteBlock(BlockInfo completeBlk,
   public void updateSpaceForCompleteBlock(BlockInfo completeBlk,
       INodesInPath inodes) throws IOException {
       INodesInPath inodes) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     INodesInPath iip = inodes != null ? inodes :
     INodesInPath iip = inodes != null ? inodes :
         INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
         INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
     INodeFile fileINode = iip.getLastINode().asFile();
     INodeFile fileINode = iip.getLastINode().asFile();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -27,7 +27,6 @@ import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -113,6 +112,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.util.Holder;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.log.LogThrottlingHelper;
 import org.apache.hadoop.log.LogThrottlingHelper;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Timer;
 import org.apache.hadoop.util.Timer;
@@ -172,7 +172,7 @@ public class FSEditLogLoader {
     StartupProgress prog = NameNode.getStartupProgress();
     StartupProgress prog = NameNode.getStartupProgress();
     Step step = createStartupProgressStep(edits);
     Step step = createStartupProgressStep(edits);
     prog.beginStep(Phase.LOADING_EDITS, step);
     prog.beginStep(Phase.LOADING_EDITS, step);
-    fsNamesys.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsNamesys.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       long startTime = timer.monotonicNow();
       long startTime = timer.monotonicNow();
       LogAction preLogAction = LOAD_EDITS_LOG_HELPER.record("pre", startTime);
       LogAction preLogAction = LOAD_EDITS_LOG_HELPER.record("pre", startTime);
@@ -197,7 +197,7 @@ public class FSEditLogLoader {
       return numEdits;
       return numEdits;
     } finally {
     } finally {
       edits.close();
       edits.close();
-      fsNamesys.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSEdits");
+      fsNamesys.writeUnlock(RwLockMode.GLOBAL, "loadFSEdits");
       prog.endStep(Phase.LOADING_EDITS, step);
       prog.endStep(Phase.LOADING_EDITS, step);
     }
     }
   }
   }
@@ -219,7 +219,7 @@ public class FSEditLogLoader {
       LOG.trace("Acquiring write lock to replay edit log");
       LOG.trace("Acquiring write lock to replay edit log");
     }
     }
 
 
-    fsNamesys.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsNamesys.writeLock(RwLockMode.GLOBAL);
     FSDirectory fsDir = fsNamesys.dir;
     FSDirectory fsDir = fsNamesys.dir;
     fsDir.writeLock();
     fsDir.writeLock();
 
 
@@ -343,7 +343,7 @@ public class FSEditLogLoader {
         in.close();
         in.close();
       }
       }
       fsDir.writeUnlock();
       fsDir.writeUnlock();
-      fsNamesys.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadEditRecords");
+      fsNamesys.writeUnlock(RwLockMode.GLOBAL, "loadEditRecords");
 
 
       if (LOG.isTraceEnabled()) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("replaying edit log finished");
         LOG.trace("replaying edit log finished");

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 123 - 123
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java


+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java

@@ -25,7 +25,6 @@ import java.util.Optional;
 import java.util.Stack;
 import java.util.Stack;
 import java.util.function.LongFunction;
 import java.util.function.LongFunction;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AuthorizationContext;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AuthorizationContext;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
@@ -50,7 +50,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  * The state of this class need not be synchronized as it has data structures that
  * The state of this class need not be synchronized as it has data structures that
  * are read-only.
  * are read-only.
  * 
  * 
- * Some of the helper methods are guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}.
+ * Some of the helper methods are guarded by {@link FSNamesystem#readLock(RwLockMode)}.
  */
  */
 public class FSPermissionChecker implements AccessControlEnforcer {
 public class FSPermissionChecker implements AccessControlEnforcer {
   static final Logger LOG = LoggerFactory.getLogger(UserGroupInformation.class);
   static final Logger LOG = LoggerFactory.getLogger(UserGroupInformation.class);
@@ -342,7 +342,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
    * @param ignoreEmptyDir Ignore permission checking for empty directory?
    * @param ignoreEmptyDir Ignore permission checking for empty directory?
    * @throws AccessControlException
    * @throws AccessControlException
    * 
    * 
-   * Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}
+   * Guarded by {@link FSNamesystem#readLock(RwLockMode)}
    * Caller of this method must hold that lock.
    * Caller of this method must hold that lock.
    */
    */
   void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
   void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
@@ -555,7 +555,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     return inodeAttrs;
     return inodeAttrs;
   }
   }
 
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
   private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
       throws AccessControlException {
       throws AccessControlException {
     if (getUser().equals(inodes[i].getUserName())) {
     if (getUser().equals(inodes[i].getUserName())) {
@@ -566,7 +566,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
         " is not the owner of inode=" + getPath(components, 0, i));
         " is not the owner of inode=" + getPath(components, 0, i));
   }
   }
 
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}.
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}.
    * @throws AccessControlException
    * @throws AccessControlException
    * @throws ParentNotDirectoryException
    * @throws ParentNotDirectoryException
    * @throws UnresolvedPathException
    * @throws UnresolvedPathException
@@ -580,7 +580,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     }
     }
   }
   }
 
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkSubAccess(byte[][] components, int pathIdx,
   private void checkSubAccess(byte[][] components, int pathIdx,
       INode inode, int snapshotId, FsAction access, boolean ignoreEmptyDir)
       INode inode, int snapshotId, FsAction access, boolean ignoreEmptyDir)
       throws AccessControlException {
       throws AccessControlException {
@@ -654,7 +654,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     }
     }
   }
   }
 
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void check(INodeAttributes[] inodes, byte[][] components, int i,
   private void check(INodeAttributes[] inodes, byte[][] components, int i,
       FsAction access) throws AccessControlException {
       FsAction access) throws AccessControlException {
     INodeAttributes inode = (i >= 0) ? inodes[i] : null;
     INodeAttributes inode = (i >= 0) ? inodes[i] : null;
@@ -768,7 +768,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     return !foundMatch && mode.getOtherAction().implies(access);
     return !foundMatch && mode.getOtherAction().implies(access);
   }
   }
 
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkStickyBit(INodeAttributes[] inodes, byte[][] components,
   private void checkStickyBit(INodeAttributes[] inodes, byte[][] components,
       int index) throws AccessControlException {
       int index) throws AccessControlException {
     INodeAttributes parent = inodes[index];
     INodeAttributes parent = inodes[index];

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java

@@ -28,9 +28,9 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Timer;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -128,7 +128,7 @@ public abstract class FSTreeTraverser {
       List<byte[]> startAfters, final TraverseInfo traverseInfo)
       List<byte[]> startAfters, final TraverseInfo traverseInfo)
       throws IOException, InterruptedException {
       throws IOException, InterruptedException {
     assert dir.hasReadLock();
     assert dir.hasReadLock();
-    assert dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+    assert dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
     long lockStartTime = timer.monotonicNow();
     long lockStartTime = timer.monotonicNow();
     Preconditions.checkNotNull(curr, "Current inode can't be null");
     Preconditions.checkNotNull(curr, "Current inode can't be null");
     checkINodeReady(startId);
     checkINodeReady(startId);
@@ -262,13 +262,13 @@ public abstract class FSTreeTraverser {
   }
   }
 
 
   protected void readLock() {
   protected void readLock() {
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     dir.readLock();
     dir.readLock();
   }
   }
 
 
   protected void readUnlock() {
   protected void readUnlock() {
     dir.readUnlock();
     dir.readUnlock();
-    dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "FSTreeTraverser");
+    dir.getFSNamesystem().readUnlock(RwLockMode.FS, "FSTreeTraverser");
   }
   }
 
 
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
@@ -274,14 +274,14 @@ public class FsImageValidation {
 
 
       final FSImageFormat.LoaderDelegator loader
       final FSImageFormat.LoaderDelegator loader
           = FSImageFormat.newLoader(conf, namesystem);
           = FSImageFormat.newLoader(conf, namesystem);
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       namesystem.getFSDirectory().writeLock();
       namesystem.getFSDirectory().writeLock();
       try {
       try {
         loader.load(fsImageFile, false);
         loader.load(fsImageFile, false);
         fsImage.setLastAppliedTxId(loader);
         fsImage.setLastAppliedTxId(loader);
       } finally {
       } finally {
         namesystem.getFSDirectory().writeUnlock();
         namesystem.getFSDirectory().writeUnlock();
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadImage");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "loadImage");
       }
       }
     }
     }
     t.cancel();
     t.cancel();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -136,7 +136,7 @@ public class LeaseManager {
    * calling this method.
    * calling this method.
    */
    */
   synchronized long getNumUnderConstructionBlocks() {
   synchronized long getNumUnderConstructionBlocks() {
-    assert this.fsnamesystem.hasReadLock(FSNamesystemLockMode.GLOBAL) :
+    assert this.fsnamesystem.hasReadLock(RwLockMode.GLOBAL) :
         "The FSNamesystem read lock wasn't acquired before counting under construction blocks";
         "The FSNamesystem read lock wasn't acquired before counting under construction blocks";
     long numUCBlocks = 0;
     long numUCBlocks = 0;
     for (Long id : getINodeIdWithLeases()) {
     for (Long id : getINodeIdWithLeases()) {
@@ -208,7 +208,7 @@ public class LeaseManager {
    */
    */
   public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
   public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
       ancestorDir) throws IOException {
       ancestorDir) throws IOException {
-    assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsnamesystem.hasReadLock(RwLockMode.FS);
     final long startTimeMs = Time.monotonicNow();
     final long startTimeMs = Time.monotonicNow();
     Set<INodesInPath> iipSet = new HashSet<>();
     Set<INodesInPath> iipSet = new HashSet<>();
     final INode[] inodes = getINodesWithLease();
     final INode[] inodes = getINodesWithLease();
@@ -285,7 +285,7 @@ public class LeaseManager {
    */
    */
   public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
   public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
       final long prevId, final String path) throws IOException {
       final long prevId, final String path) throws IOException {
-    assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsnamesystem.hasReadLock(RwLockMode.FS);
     SortedMap<Long, Lease> remainingLeases;
     SortedMap<Long, Lease> remainingLeases;
     synchronized (this) {
     synchronized (this) {
       remainingLeases = leasesById.tailMap(prevId, false);
       remainingLeases = leasesById.tailMap(prevId, false);
@@ -543,13 +543,13 @@ public class LeaseManager {
             continue;
             continue;
           }
           }
 
 
-          fsnamesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+          fsnamesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
           try {
           try {
             if (!fsnamesystem.isInSafeMode()) {
             if (!fsnamesystem.isInSafeMode()) {
               needSync = checkLeases(candidates);
               needSync = checkLeases(candidates);
             }
             }
           } finally {
           } finally {
-            fsnamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "leaseManager");
+            fsnamesystem.writeUnlock(RwLockMode.GLOBAL, "leaseManager");
             // lease reassignments should to be sync'ed.
             // lease reassignments should to be sync'ed.
             if (needSync) {
             if (needSync) {
               fsnamesystem.getEditLog().logSync();
               fsnamesystem.getEditLog().logSync();
@@ -574,7 +574,7 @@ public class LeaseManager {
 
 
   private synchronized boolean checkLeases(Collection<Lease> leasesToCheck) {
   private synchronized boolean checkLeases(Collection<Lease> leasesToCheck) {
     boolean needSync = false;
     boolean needSync = false;
-    assert fsnamesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsnamesystem.hasWriteLock(RwLockMode.GLOBAL);
 
 
     long start = monotonicNow();
     long start = monotonicNow();
     for (Lease leaseToCheck : leasesToCheck) {
     for (Lease leaseToCheck : leasesToCheck) {

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
@@ -73,6 +72,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ExternalCall;
 import org.apache.hadoop.ipc.ExternalCall;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -2239,14 +2239,14 @@ public class NameNode extends ReconfigurableBase implements
     
     
     @Override
     @Override
     public void writeLock() {
     public void writeLock() {
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       namesystem.lockRetryCache();
       namesystem.lockRetryCache();
     }
     }
     
     
     @Override
     @Override
     public void writeUnlock() {
     public void writeUnlock() {
       namesystem.unlockRetryCache();
       namesystem.unlockRetryCache();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "HAState");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "HAState");
     }
     }
     
     
     /** Check if an operation of given category is allowed */
     /** Check if an operation of given category is allowed */
@@ -2397,7 +2397,7 @@ public class NameNode extends ReconfigurableBase implements
       final String property) throws ReconfigurationException {
       final String property) throws ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
     BlockManager bm = namesystem.getBlockManager();
     int newSetting;
     int newSetting;
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)) {
       if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)) {
         bm.setMaxReplicationStreams(
         bm.setMaxReplicationStreams(
@@ -2435,7 +2435,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
           property), e);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfReplicationParameters");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfReplicationParameters");
     }
     }
   }
   }
 
 
@@ -2455,7 +2455,7 @@ public class NameNode extends ReconfigurableBase implements
   private String reconfHeartbeatInterval(final DatanodeManager datanodeManager,
   private String reconfHeartbeatInterval(final DatanodeManager datanodeManager,
       final String property, final String newVal)
       final String property, final String newVal)
       throws ReconfigurationException {
       throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       if (newVal == null) {
       if (newVal == null) {
         // set to default
         // set to default
@@ -2472,7 +2472,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
           property), nfe);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfHeartbeatInterval");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfHeartbeatInterval");
       LOG.info("RECONFIGURE* changed heartbeatInterval to "
       LOG.info("RECONFIGURE* changed heartbeatInterval to "
           + datanodeManager.getHeartbeatInterval());
           + datanodeManager.getHeartbeatInterval());
     }
     }
@@ -2481,7 +2481,7 @@ public class NameNode extends ReconfigurableBase implements
   private String reconfHeartbeatRecheckInterval(
   private String reconfHeartbeatRecheckInterval(
       final DatanodeManager datanodeManager, final String property,
       final DatanodeManager datanodeManager, final String property,
       final String newVal) throws ReconfigurationException {
       final String newVal) throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       if (newVal == null) {
       if (newVal == null) {
         // set to default
         // set to default
@@ -2496,7 +2496,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
           property), nfe);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfHeartbeatRecheckInterval");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfHeartbeatRecheckInterval");
       LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
       LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
           + datanodeManager.getHeartbeatRecheckInterval());
           + datanodeManager.getHeartbeatRecheckInterval());
     }
     }
@@ -2621,7 +2621,7 @@ public class NameNode extends ReconfigurableBase implements
   String reconfigureSlowNodesParameters(final DatanodeManager datanodeManager,
   String reconfigureSlowNodesParameters(final DatanodeManager datanodeManager,
       final String property, final String newVal) throws ReconfigurationException {
       final String property, final String newVal) throws ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
     BlockManager bm = namesystem.getBlockManager();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     String result;
     String result;
     try {
     try {
       switch (property) {
       switch (property) {
@@ -2698,13 +2698,13 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
           property), e);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfigureSlowNodesParameters");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfigureSlowNodesParameters");
     }
     }
   }
   }
 
 
   private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeManager,
   private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeManager,
       final String property, final String newVal) throws ReconfigurationException {
       final String property, final String newVal) throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       if (newVal == null) {
       if (newVal == null) {
         datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
         datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
@@ -2718,7 +2718,7 @@ public class NameNode extends ReconfigurableBase implements
     } catch (NumberFormatException e) {
     } catch (NumberFormatException e) {
       throw new ReconfigurationException(property, newVal, getConf().get(property), e);
       throw new ReconfigurationException(property, newVal, getConf().get(property), e);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfigureBlockInvalidateLimit");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfigureBlockInvalidateLimit");
     }
     }
   }
   }
 
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -38,7 +38,6 @@ import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadLocalRandom;
 
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -80,6 +79,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
@@ -291,7 +291,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     }
     }
 
 
     // TODO: Just hold the BM read lock.
     // TODO: Just hold the BM read lock.
-    namenode.getNamesystem().readLock(FSNamesystemLockMode.GLOBAL);
+    namenode.getNamesystem().readLock(RwLockMode.GLOBAL);
     try {
     try {
       //get blockInfo
       //get blockInfo
       Block block = new Block(Block.getBlockId(blockId));
       Block block = new Block(Block.getBlockId(blockId));
@@ -355,7 +355,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       out.print("\n\n" + errMsg);
       out.print("\n\n" + errMsg);
       LOG.warn("Error in looking up block", e);
       LOG.warn("Error in looking up block", e);
     } finally {
     } finally {
-      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.GLOBAL, "fsck");
+      namenode.getNamesystem().readUnlock(RwLockMode.GLOBAL, "fsck");
     }
     }
   }
   }
 
 
@@ -587,7 +587,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     final String operationName = "fsckGetBlockLocations";
     final String operationName = "fsckGetBlockLocations";
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker pc = fsn.getPermissionChecker();
     FSPermissionChecker pc = fsn.getPermissionChecker();
-    fsn.readLock(FSNamesystemLockMode.GLOBAL);
+    fsn.readLock(RwLockMode.GLOBAL);
     try {
     try {
       blocks = FSDirStatAndListingOp.getBlockLocations(
       blocks = FSDirStatAndListingOp.getBlockLocations(
           fsn.getFSDirectory(), pc,
           fsn.getFSDirectory(), pc,
@@ -596,7 +596,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     } catch (FileNotFoundException fnfe) {
     } catch (FileNotFoundException fnfe) {
       blocks = null;
       blocks = null;
     } finally {
     } finally {
-      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, operationName);
+      fsn.readUnlock(RwLockMode.GLOBAL, operationName);
     }
     }
     return blocks;
     return blocks;
   }
   }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
@@ -35,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ReencryptionTask;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ReencryptionTask;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ZoneSubmissionTracker;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ZoneSubmissionTracker;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StopWatch;
@@ -339,7 +339,7 @@ public class ReencryptionHandler implements Runnable {
       }
       }
 
 
       final Long zoneId;
       final Long zoneId;
-      dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+      dir.getFSNamesystem().readLock(RwLockMode.FS);
       try {
       try {
         zoneId = getReencryptionStatus().getNextUnprocessedZone();
         zoneId = getReencryptionStatus().getNextUnprocessedZone();
         if (zoneId == null) {
         if (zoneId == null) {
@@ -351,7 +351,7 @@ public class ReencryptionHandler implements Runnable {
         getReencryptionStatus().markZoneStarted(zoneId);
         getReencryptionStatus().markZoneStarted(zoneId);
         resetSubmissionTracker(zoneId);
         resetSubmissionTracker(zoneId);
       } finally {
       } finally {
-        dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "reEncryptThread");
+        dir.getFSNamesystem().readUnlock(RwLockMode.FS, "reEncryptThread");
       }
       }
 
 
       try {
       try {
@@ -443,7 +443,7 @@ public class ReencryptionHandler implements Runnable {
 
 
   List<XAttr> completeReencryption(final INode zoneNode) throws IOException {
   List<XAttr> completeReencryption(final INode zoneNode) throws IOException {
     assert dir.hasWriteLock();
     assert dir.hasWriteLock();
-    assert dir.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert dir.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     final Long zoneId = zoneNode.getId();
     final Long zoneId = zoneNode.getId();
     ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId);
     ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId);
     assert zs != null;
     assert zs != null;
@@ -614,7 +614,7 @@ public class ReencryptionHandler implements Runnable {
     protected void checkPauseForTesting()
     protected void checkPauseForTesting()
         throws InterruptedException {
         throws InterruptedException {
       assert !dir.hasReadLock();
       assert !dir.hasReadLock();
-      assert !dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+      assert !dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
       while (shouldPauseForTesting) {
       while (shouldPauseForTesting) {
         LOG.info("Sleeping in the re-encrypt handler for unit test.");
         LOG.info("Sleeping in the re-encrypt handler for unit test.");
         synchronized (reencryptionHandler) {
         synchronized (reencryptionHandler) {
@@ -748,7 +748,7 @@ public class ReencryptionHandler implements Runnable {
     @Override
     @Override
     protected void throttle() throws InterruptedException {
     protected void throttle() throws InterruptedException {
       assert !dir.hasReadLock();
       assert !dir.hasReadLock();
-      assert !dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+      assert !dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
       final int numCores = Runtime.getRuntime().availableProcessors();
       final int numCores = Runtime.getRuntime().availableProcessors();
       if (taskQueue.size() >= numCores) {
       if (taskQueue.size() >= numCores) {
         LOG.debug("Re-encryption handler throttling because queue size {} is"
         LOG.debug("Re-encryption handler throttling because queue size {} is"

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionHandler.ReencryptionBatch;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionHandler.ReencryptionBatch;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StopWatch;
@@ -435,7 +435,7 @@ public final class ReencryptionUpdater implements Runnable {
 
 
     boolean shouldRetry;
     boolean shouldRetry;
     do {
     do {
-      dir.getFSNamesystem().writeLock(FSNamesystemLockMode.FS);
+      dir.getFSNamesystem().writeLock(RwLockMode.FS);
       try {
       try {
         throttleTimerLocked.start();
         throttleTimerLocked.start();
         processTask(task);
         processTask(task);
@@ -453,7 +453,7 @@ public final class ReencryptionUpdater implements Runnable {
         task.processed = true;
         task.processed = true;
         shouldRetry = false;
         shouldRetry = false;
       } finally {
       } finally {
-        dir.getFSNamesystem().writeUnlock(FSNamesystemLockMode.FS, "reencryptUpdater");
+        dir.getFSNamesystem().writeUnlock(RwLockMode.FS, "reencryptUpdater");
         throttleTimerLocked.stop();
         throttleTimerLocked.stop();
       }
       }
       // logSync regardless, to prevent edit log buffer overflow triggering
       // logSync regardless, to prevent edit log buffer overflow triggering
@@ -501,7 +501,7 @@ public final class ReencryptionUpdater implements Runnable {
 
 
   private synchronized void checkPauseForTesting() throws InterruptedException {
   private synchronized void checkPauseForTesting() throws InterruptedException {
     assert !dir.hasWriteLock();
     assert !dir.hasWriteLock();
-    assert !dir.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert !dir.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (pauseAfterNthCheckpoint != 0) {
     if (pauseAfterNthCheckpoint != 0) {
       ZoneSubmissionTracker tracker =
       ZoneSubmissionTracker tracker =
           handler.unprotectedGetTracker(pauseZoneId);
           handler.unprotectedGetTracker(pauseZoneId);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -37,7 +37,6 @@ import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.cli.PosixParser;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -63,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
@@ -1095,11 +1095,11 @@ public class SecondaryNameNode implements Runnable,
             sig.mostRecentCheckpointTxId + " even though it should have " +
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
             "just been downloaded");
       }
       }
-      dstNamesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      dstNamesystem.writeLock(RwLockMode.GLOBAL);
       try {
       try {
         dstImage.reloadFromImageFile(file, dstNamesystem);
         dstImage.reloadFromImageFile(file, dstNamesystem);
       } finally {
       } finally {
-        dstNamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "reloadFromImageFile");
+        dstNamesystem.writeUnlock(RwLockMode.GLOBAL, "reloadFromImageFile");
       }
       }
       dstNamesystem.imageLoadComplete();
       dstNamesystem.imageLoadComplete();
     }
     }

+ 17 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNLockManager.java

@@ -22,26 +22,28 @@ import org.apache.hadoop.classification.VisibleForTesting;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Supplier;
 import java.util.function.Supplier;
 
 
+import org.apache.hadoop.hdfs.util.RwLockMode;
+
 public interface FSNLockManager {
 public interface FSNLockManager {
 
 
   /**
   /**
    * Acquire read lock for an operation according to the lock mode.
    * Acquire read lock for an operation according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    */
    */
-  void readLock(FSNamesystemLockMode lockMode);
+  void readLock(RwLockMode lockMode);
 
 
   /**
   /**
    * Acquire read lock according to the lock mode, unless interrupted while waiting.
    * Acquire read lock according to the lock mode, unless interrupted while waiting.
    * @param lockMode locking mode
    * @param lockMode locking mode
    */
    */
-  void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
 
   /**
   /**
    * Release read lock for the operation according to the lock mode.
    * Release read lock for the operation according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    * @param opName operation name
    * @param opName operation name
    */
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName);
+  void readUnlock(RwLockMode lockMode, String opName);
 
 
   /**
   /**
    * Release read lock for the operation according to the lock mode.
    * Release read lock for the operation according to the lock mode.
@@ -49,21 +51,21 @@ public interface FSNLockManager {
    * @param opName operation name
    * @param opName operation name
    * @param lockReportInfoSupplier supplier used to report some information for this lock.
    * @param lockReportInfoSupplier supplier used to report some information for this lock.
    */
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier);
       Supplier<String> lockReportInfoSupplier);
 
 
   /**
   /**
    * Acquire write lock for an operation according to the lock mode.
    * Acquire write lock for an operation according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    */
    */
-  void writeLock(FSNamesystemLockMode lockMode);
+  void writeLock(RwLockMode lockMode);
 
 
   /**
   /**
    * Release write lock for the operation according to the lock mode.
    * Release write lock for the operation according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    * @param opName operation name
    * @param opName operation name
    */
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName);
+  void writeUnlock(RwLockMode lockMode, String opName);
 
 
   /**
   /**
    * Release write lock for the operation according to the lock mode.
    * Release write lock for the operation according to the lock mode.
@@ -72,7 +74,7 @@ public interface FSNLockManager {
    * @param suppressWriteLockReport When false, event of write lock being held
    * @param suppressWriteLockReport When false, event of write lock being held
    * for long time will be logged in logs and metrics.
    * for long time will be logged in logs and metrics.
    */
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport);
       boolean suppressWriteLockReport);
 
 
   /**
   /**
@@ -81,24 +83,24 @@ public interface FSNLockManager {
    * @param opName operation name
    * @param opName operation name
    * @param lockReportInfoSupplier supplier used to report information for this lock.
    * @param lockReportInfoSupplier supplier used to report information for this lock.
    */
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier);
       Supplier<String> lockReportInfoSupplier);
 
 
-  void writeLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void writeLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
 
   /**
   /**
    * Check if the current thread holds write lock according to the lock mode.
    * Check if the current thread holds write lock according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    * @return true if the current thread is holding the write-lock, else false.
    * @return true if the current thread is holding the write-lock, else false.
    */
    */
-  boolean hasWriteLock(FSNamesystemLockMode lockMode);
+  boolean hasWriteLock(RwLockMode lockMode);
 
 
   /**
   /**
    * Check if the current thread holds read lock according to the lock mode.
    * Check if the current thread holds read lock according to the lock mode.
    * @param lockMode locking mode
    * @param lockMode locking mode
    * @return true if the current thread is holding the read-lock, else false.
    * @return true if the current thread is holding the read-lock, else false.
    */
    */
-  boolean hasReadLock(FSNamesystemLockMode lockMode);
+  boolean hasReadLock(RwLockMode lockMode);
 
 
   /**
   /**
    * Queries the number of reentrant read holds on this lock by the
    * Queries the number of reentrant read holds on this lock by the
@@ -109,7 +111,7 @@ public interface FSNLockManager {
    * @return the number of holds on the read lock by the current thread,
    * @return the number of holds on the read lock by the current thread,
    *         or zero if the read lock is not held by the current thread
    *         or zero if the read lock is not held by the current thread
    */
    */
-  int getReadHoldCount(FSNamesystemLockMode lockMode);
+  int getReadHoldCount(RwLockMode lockMode);
 
 
   /**
   /**
    * Returns the QueueLength of waiting threads.
    * Returns the QueueLength of waiting threads.
@@ -118,7 +120,7 @@ public interface FSNLockManager {
    * @param lockMode locking mode
    * @param lockMode locking mode
    * @return int - Number of threads waiting on this lock
    * @return int - Number of threads waiting on this lock
    */
    */
-  int getQueueLength(FSNamesystemLockMode lockMode);
+  int getQueueLength(RwLockMode lockMode);
 
 
   /**
   /**
    * Returns the number of time the read lock
    * Returns the number of time the read lock
@@ -128,7 +130,7 @@ public interface FSNLockManager {
    * @return long - Number of time the read lock
    * @return long - Number of time the read lock
    * has been held longer than the threshold
    * has been held longer than the threshold
    */
    */
-  long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode);
+  long getNumOfReadLockLongHold(RwLockMode lockMode);
 
 
   /**
   /**
    * Returns the number of time the write-lock
    * Returns the number of time the write-lock
@@ -138,7 +140,7 @@ public interface FSNLockManager {
    * @return long - Number of time the write-lock
    * @return long - Number of time the write-lock
    * has been held longer than the threshold.
    * has been held longer than the threshold.
    */
    */
-  long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode);
+  long getNumOfWriteLockLongHold(RwLockMode lockMode);
 
 
   /**
   /**
    * Check if the metrics is enabled.
    * Check if the metrics is enabled.

+ 62 - 61
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FineGrainedFSNamesystemLock.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.fgl;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -40,19 +41,19 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
   }
   }
 
 
   @Override
   @Override
-  public void readLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.readLock();
       this.fsLock.readLock();
       this.bmLock.readLock();
       this.bmLock.readLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readLock();
       this.fsLock.readLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readLock();
       this.bmLock.readLock();
     }
     }
   }
   }
 
 
-  public void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException  {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException  {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.readLockInterruptibly();
       this.fsLock.readLockInterruptibly();
       try {
       try {
         this.bmLock.readLockInterruptibly();
         this.bmLock.readLockInterruptibly();
@@ -62,90 +63,90 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         this.fsLock.readUnlock("BMReadLockInterruptiblyFailed");
         this.fsLock.readUnlock("BMReadLockInterruptiblyFailed");
         throw e;
         throw e;
       }
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readLockInterruptibly();
       this.fsLock.readLockInterruptibly();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readLockInterruptibly();
       this.bmLock.readLockInterruptibly();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readUnlock(RwLockMode lockMode, String opName) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.readUnlock(opName);
       this.bmLock.readUnlock(opName);
       this.fsLock.readUnlock(opName);
       this.fsLock.readUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readUnlock(opName);
       this.fsLock.readUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readUnlock(opName);
       this.bmLock.readUnlock(opName);
     }
     }
   }
   }
 
 
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
       Supplier<String> lockReportInfoSupplier) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void writeLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void writeLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.writeLock();
       this.fsLock.writeLock();
       this.bmLock.writeLock();
       this.bmLock.writeLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeLock();
       this.fsLock.writeLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeLock();
       this.bmLock.writeLock();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void writeUnlock(RwLockMode lockMode, String opName) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName);
       this.bmLock.writeUnlock(opName);
       this.fsLock.writeUnlock(opName);
       this.fsLock.writeUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName);
       this.fsLock.writeUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName);
       this.bmLock.writeUnlock(opName);
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport) {
       boolean suppressWriteLockReport) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
     }
     }
   }
   }
 
 
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
       Supplier<String> lockReportInfoSupplier) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void writeLockInterruptibly(FSNamesystemLockMode lockMode)
+  public void writeLockInterruptibly(RwLockMode lockMode)
       throws InterruptedException {
       throws InterruptedException {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.writeLockInterruptibly();
       this.fsLock.writeLockInterruptibly();
       try {
       try {
         this.bmLock.writeLockInterruptibly();
         this.bmLock.writeLockInterruptibly();
@@ -155,16 +156,16 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         this.fsLock.writeUnlock("BMWriteLockInterruptiblyFailed");
         this.fsLock.writeUnlock("BMWriteLockInterruptiblyFailed");
         throw e;
         throw e;
       }
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeLockInterruptibly();
       this.fsLock.writeLockInterruptibly();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeLockInterruptibly();
       this.bmLock.writeLockInterruptibly();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public boolean hasWriteLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public boolean hasWriteLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       if (this.fsLock.isWriteLockedByCurrentThread()) {
       if (this.fsLock.isWriteLockedByCurrentThread()) {
         // The bm writeLock should be held by the current thread.
         // The bm writeLock should be held by the current thread.
         assert this.bmLock.isWriteLockedByCurrentThread();
         assert this.bmLock.isWriteLockedByCurrentThread();
@@ -174,18 +175,18 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         assert !this.bmLock.isWriteLockedByCurrentThread();
         assert !this.bmLock.isWriteLockedByCurrentThread();
         return false;
         return false;
       }
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.isWriteLockedByCurrentThread();
       return this.fsLock.isWriteLockedByCurrentThread();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.isWriteLockedByCurrentThread();
       return this.bmLock.isWriteLockedByCurrentThread();
     }
     }
     return false;
     return false;
   }
   }
 
 
   @Override
   @Override
-  public boolean hasReadLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
-      if (hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
+  public boolean hasReadLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
+      if (hasWriteLock(RwLockMode.GLOBAL)) {
         return true;
         return true;
       } else if (this.fsLock.getReadHoldCount() > 0) {
       } else if (this.fsLock.getReadHoldCount() > 0) {
         // The bm readLock should be held by the current thread.
         // The bm readLock should be held by the current thread.
@@ -196,9 +197,9 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         assert this.bmLock.getReadHoldCount() <= 0;
         assert this.bmLock.getReadHoldCount() <= 0;
         return false;
         return false;
       }
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getReadHoldCount() > 0 || this.fsLock.isWriteLockedByCurrentThread();
       return this.fsLock.getReadHoldCount() > 0 || this.fsLock.isWriteLockedByCurrentThread();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getReadHoldCount() > 0 || this.bmLock.isWriteLockedByCurrentThread();
       return this.bmLock.getReadHoldCount() > 0 || this.bmLock.isWriteLockedByCurrentThread();
     }
     }
     return false;
     return false;
@@ -209,48 +210,48 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
    * This method is only used for ComputeDirectoryContentSummary.
    * This method is only used for ComputeDirectoryContentSummary.
    * For the GLOBAL mode, just return the FSLock's ReadHoldCount.
    * For the GLOBAL mode, just return the FSLock's ReadHoldCount.
    */
    */
-  public int getReadHoldCount(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public int getReadHoldCount(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return this.fsLock.getReadHoldCount();
       return this.fsLock.getReadHoldCount();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getReadHoldCount();
       return this.fsLock.getReadHoldCount();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getReadHoldCount();
       return this.bmLock.getReadHoldCount();
     }
     }
     return -1;
     return -1;
   }
   }
 
 
   @Override
   @Override
-  public int getQueueLength(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public int getQueueLength(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getQueueLength();
       return this.fsLock.getQueueLength();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getQueueLength();
       return this.bmLock.getQueueLength();
     }
     }
     return -1;
     return -1;
   }
   }
 
 
   @Override
   @Override
-  public long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public long getNumOfReadLockLongHold(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getNumOfReadLockLongHold();
       return this.fsLock.getNumOfReadLockLongHold();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getNumOfReadLockLongHold();
       return this.bmLock.getNumOfReadLockLongHold();
     }
     }
     return -1;
     return -1;
   }
   }
 
 
   @Override
   @Override
-  public long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public long getNumOfWriteLockLongHold(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getNumOfWriteLockLongHold();
       return this.fsLock.getNumOfWriteLockLongHold();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getNumOfWriteLockLongHold();
       return this.bmLock.getNumOfWriteLockLongHold();
     }
     }
     return -1;
     return -1;

+ 16 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/GlobalFSNamesystemLock.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.fgl;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -33,78 +34,78 @@ public class GlobalFSNamesystemLock implements FSNLockManager {
   }
   }
 
 
   @Override
   @Override
-  public void readLock(FSNamesystemLockMode lockMode) {
+  public void readLock(RwLockMode lockMode) {
     this.lock.readLock();
     this.lock.readLock();
   }
   }
 
 
-  public void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException  {
+  public void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException  {
     this.lock.readLockInterruptibly();
     this.lock.readLockInterruptibly();
   }
   }
 
 
   @Override
   @Override
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName) {
+  public void readUnlock(RwLockMode lockMode, String opName) {
     this.lock.readUnlock(opName);
     this.lock.readUnlock(opName);
   }
   }
 
 
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
       Supplier<String> lockReportInfoSupplier) {
     this.lock.readUnlock(opName, lockReportInfoSupplier);
     this.lock.readUnlock(opName, lockReportInfoSupplier);
   }
   }
 
 
   @Override
   @Override
-  public void writeLock(FSNamesystemLockMode lockMode) {
+  public void writeLock(RwLockMode lockMode) {
     this.lock.writeLock();
     this.lock.writeLock();
   }
   }
 
 
   @Override
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName) {
+  public void writeUnlock(RwLockMode lockMode, String opName) {
     this.lock.writeUnlock(opName);
     this.lock.writeUnlock(opName);
   }
   }
 
 
   @Override
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport) {
       boolean suppressWriteLockReport) {
     this.lock.writeUnlock(opName, suppressWriteLockReport);
     this.lock.writeUnlock(opName, suppressWriteLockReport);
   }
   }
 
 
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
       Supplier<String> lockReportInfoSupplier) {
     this.lock.writeUnlock(opName, lockReportInfoSupplier);
     this.lock.writeUnlock(opName, lockReportInfoSupplier);
   }
   }
 
 
   @Override
   @Override
-  public void writeLockInterruptibly(FSNamesystemLockMode lockMode)
+  public void writeLockInterruptibly(RwLockMode lockMode)
       throws InterruptedException {
       throws InterruptedException {
     this.lock.writeLockInterruptibly();
     this.lock.writeLockInterruptibly();
   }
   }
 
 
   @Override
   @Override
-  public boolean hasWriteLock(FSNamesystemLockMode lockMode) {
+  public boolean hasWriteLock(RwLockMode lockMode) {
     return this.lock.isWriteLockedByCurrentThread();
     return this.lock.isWriteLockedByCurrentThread();
   }
   }
 
 
   @Override
   @Override
-  public boolean hasReadLock(FSNamesystemLockMode lockMode) {
+  public boolean hasReadLock(RwLockMode lockMode) {
     return this.lock.getReadHoldCount() > 0 || hasWriteLock(lockMode);
     return this.lock.getReadHoldCount() > 0 || hasWriteLock(lockMode);
   }
   }
 
 
   @Override
   @Override
-  public int getReadHoldCount(FSNamesystemLockMode lockMode) {
+  public int getReadHoldCount(RwLockMode lockMode) {
     return this.lock.getReadHoldCount();
     return this.lock.getReadHoldCount();
   }
   }
 
 
   @Override
   @Override
-  public int getQueueLength(FSNamesystemLockMode lockMode) {
+  public int getQueueLength(RwLockMode lockMode) {
     return this.lock.getQueueLength();
     return this.lock.getQueueLength();
   }
   }
 
 
   @Override
   @Override
-  public long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode) {
+  public long getNumOfReadLockLongHold(RwLockMode lockMode) {
     return this.lock.getNumOfReadLockLongHold();
     return this.lock.getNumOfReadLockLongHold();
   }
   }
 
 
   @Override
   @Override
-  public long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode) {
+  public long getNumOfWriteLockLongHold(RwLockMode lockMode) {
     return this.lock.getNumOfWriteLockLongHold();
     return this.lock.getNumOfWriteLockLongHold();
   }
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java

@@ -34,7 +34,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.Timer;
 import org.apache.hadoop.util.Timer;
@@ -54,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 
 
@@ -356,7 +356,7 @@ public class EditLogTailer {
     // transitionToActive RPC takes the write lock before calling
     // transitionToActive RPC takes the write lock before calling
     // tailer.stop() -- so if we're not interruptible, it will
     // tailer.stop() -- so if we're not interruptible, it will
     // deadlock.
     // deadlock.
-    namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
     try {
     try {
       long currentLastTxnId = image.getLastAppliedTxId();
       long currentLastTxnId = image.getLastAppliedTxId();
       if (lastTxnId != currentLastTxnId) {
       if (lastTxnId != currentLastTxnId) {
@@ -387,7 +387,7 @@ public class EditLogTailer {
       lastLoadedTxnId = image.getLastAppliedTxId();
       lastLoadedTxnId = image.getLastAppliedTxId();
       return editsLoaded;
       return editsLoaded;
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "doTailEdits");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "doTailEdits");
     }
     }
   }
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -73,14 +73,14 @@ public class SnapshotDeletionGc {
 
 
   private void gcDeletedSnapshot(String name) {
   private void gcDeletedSnapshot(String name) {
     final Snapshot.Root deleted;
     final Snapshot.Root deleted;
-    namesystem.readLock(FSNamesystemLockMode.FS);
+    namesystem.readLock(RwLockMode.FS);
     try {
     try {
       deleted = namesystem.getSnapshotManager().chooseDeletedSnapshot();
       deleted = namesystem.getSnapshotManager().chooseDeletedSnapshot();
     } catch (Throwable e) {
     } catch (Throwable e) {
       LOG.error("Failed to chooseDeletedSnapshot", e);
       LOG.error("Failed to chooseDeletedSnapshot", e);
       throw e;
       throw e;
     } finally {
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.FS, "gcDeletedSnapshot");
+      namesystem.readUnlock(RwLockMode.FS, "gcDeletedSnapshot");
     }
     }
     if (deleted == null) {
     if (deleted == null) {
       LOG.trace("{}: no snapshots are marked as deleted.", name);
       LOG.trace("{}: no snapshots are marked as deleted.", name);

+ 18 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java

@@ -17,29 +17,27 @@
  */
  */
 package org.apache.hadoop.hdfs.util;
 package org.apache.hadoop.hdfs.util;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
-
 /** Read-write lock interface for FSNamesystem. */
 /** Read-write lock interface for FSNamesystem. */
 public interface RwLock {
 public interface RwLock {
   /** Acquire read lock. */
   /** Acquire read lock. */
   default void readLock() {
   default void readLock() {
-    readLock(FSNamesystemLockMode.GLOBAL);
+    readLock(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Acquire read lock. */
   /** Acquire read lock. */
-  void readLock(FSNamesystemLockMode lockMode);
+  void readLock(RwLockMode lockMode);
 
 
   /** Acquire read lock, unless interrupted while waiting.  */
   /** Acquire read lock, unless interrupted while waiting.  */
   default void readLockInterruptibly() throws InterruptedException {
   default void readLockInterruptibly() throws InterruptedException {
-    readLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    readLockInterruptibly(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Acquire read lock, unless interrupted while waiting.  */
   /** Acquire read lock, unless interrupted while waiting.  */
-  void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
 
   /** Release read lock. */
   /** Release read lock. */
   default void readUnlock() {
   default void readUnlock() {
-    readUnlock(FSNamesystemLockMode.GLOBAL, "OTHER");
+    readUnlock(RwLockMode.GLOBAL, "OTHER");
   }
   }
 
 
   /**
   /**
@@ -47,42 +45,42 @@ public interface RwLock {
    * @param opName Option name.
    * @param opName Option name.
    */
    */
   default void readUnlock(String opName) {
   default void readUnlock(String opName) {
-    readUnlock(FSNamesystemLockMode.GLOBAL, opName);
+    readUnlock(RwLockMode.GLOBAL, opName);
   }
   }
 
 
   /**
   /**
    * Release read lock with operation name.
    * Release read lock with operation name.
    * @param opName Option name.
    * @param opName Option name.
    */
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName);
+  void readUnlock(RwLockMode lockMode, String opName);
 
 
   /** Check if the current thread holds read lock. */
   /** Check if the current thread holds read lock. */
   default boolean hasReadLock() {
   default boolean hasReadLock() {
-    return hasReadLock(FSNamesystemLockMode.GLOBAL);
+    return hasReadLock(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Check if the current thread holds read lock. */
   /** Check if the current thread holds read lock. */
-  boolean hasReadLock(FSNamesystemLockMode lockMode);
+  boolean hasReadLock(RwLockMode lockMode);
 
 
   /** Acquire write lock. */
   /** Acquire write lock. */
   default void writeLock() {
   default void writeLock() {
-    writeLock(FSNamesystemLockMode.GLOBAL);
+    writeLock(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Acquire write lock. */
   /** Acquire write lock. */
-  void writeLock(FSNamesystemLockMode lockMode);
+  void writeLock(RwLockMode lockMode);
   
   
   /** Acquire write lock, unless interrupted while waiting.  */
   /** Acquire write lock, unless interrupted while waiting.  */
   default void writeLockInterruptibly() throws InterruptedException {
   default void writeLockInterruptibly() throws InterruptedException {
-    writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    writeLockInterruptibly(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Acquire write lock, unless interrupted while waiting.  */
   /** Acquire write lock, unless interrupted while waiting.  */
-  void writeLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void writeLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
 
   /** Release write lock. */
   /** Release write lock. */
   default void writeUnlock() {
   default void writeUnlock() {
-    writeUnlock(FSNamesystemLockMode.GLOBAL, "OTHER");
+    writeUnlock(RwLockMode.GLOBAL, "OTHER");
   }
   }
 
 
   /**
   /**
@@ -90,20 +88,20 @@ public interface RwLock {
    * @param opName Option name.
    * @param opName Option name.
    */
    */
   default void writeUnlock(String opName) {
   default void writeUnlock(String opName) {
-    writeUnlock(FSNamesystemLockMode.GLOBAL, opName);
+    writeUnlock(RwLockMode.GLOBAL, opName);
   }
   }
 
 
   /**
   /**
    * Release write lock with operation name.
    * Release write lock with operation name.
    * @param opName Option name.
    * @param opName Option name.
    */
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName);
+  void writeUnlock(RwLockMode lockMode, String opName);
 
 
   /** Check if the current thread holds write lock. */
   /** Check if the current thread holds write lock. */
   default boolean hasWriteLock() {
   default boolean hasWriteLock() {
-    return hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    return hasWriteLock(RwLockMode.GLOBAL);
   }
   }
 
 
   /** Check if the current thread holds write lock. */
   /** Check if the current thread holds write lock. */
-  boolean hasWriteLock(FSNamesystemLockMode lockMode);
+  boolean hasWriteLock(RwLockMode lockMode);
 }
 }

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNamesystemLockMode.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLockMode.java

@@ -15,10 +15,13 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.server.namenode.fgl;
+package org.apache.hadoop.hdfs.util;
 
 
-public enum FSNamesystemLockMode {
+/**
+ * This lock mode is used for FGL.
+ */
+public enum RwLockMode {
   GLOBAL,
   GLOBAL,
   FS,
   FS,
   BM
   BM
-}
+}

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -176,7 +176,7 @@ public class TestBlocksScheduledCounter {
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .get(0);
           .get(0);
       DatanodeInfo[] locs = block.getLocations();
       DatanodeInfo[] locs = block.getLocations();
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
             "TEST");
             "TEST");
@@ -186,7 +186,7 @@ public class TestBlocksScheduledCounter {
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "findAndMarkBlockAsCorrupt");
             "findAndMarkBlockAsCorrupt");
       }
       }
 
 
@@ -240,13 +240,13 @@ public class TestBlocksScheduledCounter {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       }
       }
 
 
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
       try {
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "testBlocksScheduledCounterOnTruncate");
             "testBlocksScheduledCounterOnTruncate");
       }
       }
 
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -51,9 +51,9 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.Test;
 import org.junit.Test;
@@ -160,13 +160,13 @@ public class TestFileCorruption {
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
       FSNamesystem ns = cluster.getNamesystem();
       FSNamesystem ns = cluster.getNamesystem();
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       try {
       try {
         cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
         cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             "STORAGE_ID");
             "STORAGE_ID");
       } finally {
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testArrayOutOfBoundsException");
+        ns.writeUnlock(RwLockMode.BM, "testArrayOutOfBoundsException");
       }
       }
       
       
       // open the file
       // open the file
@@ -211,16 +211,16 @@ public class TestFileCorruption {
       FSNamesystem ns = cluster.getNamesystem();
       FSNamesystem ns = cluster.getNamesystem();
       //fail the storage on that node which has the block
       //fail the storage on that node which has the block
       try {
       try {
-        ns.writeLock(FSNamesystemLockMode.BM);
+        ns.writeLock(RwLockMode.BM);
         updateAllStorages(bm);
         updateAllStorages(bm);
       } finally {
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
+        ns.writeUnlock(RwLockMode.BM, "testCorruptionWithDiskFailure");
       }
       }
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       try {
       try {
         markAllBlocksAsCorrupt(bm, blk);
         markAllBlocksAsCorrupt(bm, blk);
       } finally {
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
+        ns.writeUnlock(RwLockMode.BM, "testCorruptionWithDiskFailure");
       }
       }
 
 
       // open the file
       // open the file

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java

@@ -31,9 +31,9 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.Assert;
 import org.junit.Assert;
 
 
@@ -51,23 +51,23 @@ public class BlockManagerTestUtil {
   /** @return the datanode descriptor for the given the given storageID. */
   /** @return the datanode descriptor for the given the given storageID. */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       final String storageID) {
       final String storageID) {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
     } finally {
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
+      ns.readUnlock(RwLockMode.BM, "getDatanode");
     }
     }
   }
   }
 
 
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
       final String storageID, final int startBlock) {
       final String storageID, final int startBlock) {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
     try {
       DatanodeDescriptor dn =
       DatanodeDescriptor dn =
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return dn.getBlockIterator(startBlock);
       return dn.getBlockIterator(startBlock);
     } finally {
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getBlockIterator");
+      ns.readUnlock(RwLockMode.BM, "getBlockIterator");
     }
     }
   }
   }
 
 
@@ -89,7 +89,7 @@ public class BlockManagerTestUtil {
    */
    */
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
     final BlockManager bm = namesystem.getBlockManager();
     final BlockManager bm = namesystem.getBlockManager();
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
     try {
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       return new int[]{getNumberOfRacks(bm, b),
       return new int[]{getNumberOfRacks(bm, b),
@@ -97,7 +97,7 @@ public class BlockManagerTestUtil {
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           getNumberOfDomains(bm, b)};
           getNumberOfDomains(bm, b)};
     } finally {
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "getReplicaInfo");
+      namesystem.readUnlock(RwLockMode.BM, "getReplicaInfo");
     }
     }
   }
   }
 
 
@@ -248,7 +248,7 @@ public class BlockManagerTestUtil {
    */
    */
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
     FSNamesystem namesystem = nn.getNamesystem();
     FSNamesystem namesystem = nn.getNamesystem();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
@@ -266,7 +266,7 @@ public class BlockManagerTestUtil {
         hbm.heartbeatCheck();
         hbm.heartbeatCheck();
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "noticeDeadDatanode");
+      namesystem.writeUnlock(RwLockMode.BM, "noticeDeadDatanode");
     }
     }
   }
   }
   
   
@@ -303,12 +303,12 @@ public class BlockManagerTestUtil {
    */
    */
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
       FSNamesystem namesystem, BlockManager bm) {
       FSNamesystem namesystem, BlockManager bm) {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       return bm.getUnderReplicatedNotMissingBlocks();
       return bm.getUnderReplicatedNotMissingBlocks();
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM,
+      namesystem.writeUnlock(RwLockMode.BM,
           "checkHeartbeatAndGetUnderReplicatedBlocksCount");
           "checkHeartbeatAndGetUnderReplicatedBlocksCount");
     }
     }
   }
   }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -21,7 +21,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
 import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
@@ -76,6 +75,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ECSchema;
@@ -167,10 +167,10 @@ public class TestBlockManager {
     fsn = Mockito.mock(FSNamesystem.class);
     fsn = Mockito.mock(FSNamesystem.class);
     Mockito.doReturn(true).when(fsn).hasWriteLock();
     Mockito.doReturn(true).when(fsn).hasWriteLock();
     Mockito.doReturn(true).when(fsn).hasReadLock();
     Mockito.doReturn(true).when(fsn).hasReadLock();
-    Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.GLOBAL);
-    Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.GLOBAL);
-    Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
-    Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
+    Mockito.doReturn(true).when(fsn).hasWriteLock(RwLockMode.GLOBAL);
+    Mockito.doReturn(true).when(fsn).hasReadLock(RwLockMode.GLOBAL);
+    Mockito.doReturn(true).when(fsn).hasWriteLock(RwLockMode.BM);
+    Mockito.doReturn(true).when(fsn).hasReadLock(RwLockMode.BM);
     Mockito.doReturn(true).when(fsn).isRunning();
     Mockito.doReturn(true).when(fsn).isRunning();
     //Make shouldPopulaeReplQueues return true
     //Make shouldPopulaeReplQueues return true
     HAContext haContext = Mockito.mock(HAContext.class);
     HAContext haContext = Mockito.mock(HAContext.class);
@@ -1624,7 +1624,7 @@ public class TestBlockManager {
       }
       }
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       DatanodeStorageInfo corruptStorageInfo= null;
       DatanodeStorageInfo corruptStorageInfo= null;
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
@@ -1638,16 +1638,16 @@ public class TestBlockManager {
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
           corruptStorageInfo.getStorageID(),
           corruptStorageInfo.getStorageID(),
           CorruptReplicasMap.Reason.ANY.toString());
           CorruptReplicasMap.Reason.ANY.toString());
-      ns.writeUnlock(FSNamesystemLockMode.BM, "testBlockManagerMachinesArray");
+      ns.writeUnlock(RwLockMode.BM, "testBlockManagerMachinesArray");
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
-      ns.readLock(FSNamesystemLockMode.BM);
+      ns.readLock(RwLockMode.BM);
       LocatedBlocks locatedBlocks =
       LocatedBlocks locatedBlocks =
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
               false, false, null, null);
               false, false, null, null);
       assertTrue("Located Blocks should exclude corrupt" +
       assertTrue("Located Blocks should exclude corrupt" +
               "replicas and failed storages",
               "replicas and failed storages",
           locatedBlocks.getLocatedBlocks().size() == 1);
           locatedBlocks.getLocatedBlocks().size() == 1);
-      ns.readUnlock(FSNamesystemLockMode.BM, "open");
+      ns.readUnlock(RwLockMode.BM, "open");
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafe
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 
 
@@ -96,8 +96,8 @@ public class TestBlockManagerSafeMode {
     fsn = mock(FSNamesystem.class);
     fsn = mock(FSNamesystem.class);
     doReturn(true).when(fsn).hasWriteLock();
     doReturn(true).when(fsn).hasWriteLock();
     doReturn(true).when(fsn).hasReadLock();
     doReturn(true).when(fsn).hasReadLock();
-    doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
-    doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
+    doReturn(true).when(fsn).hasWriteLock(RwLockMode.BM);
+    doReturn(true).when(fsn).hasReadLock(RwLockMode.BM);
     doReturn(true).when(fsn).isRunning();
     doReturn(true).when(fsn).isRunning();
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java

@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
@@ -603,11 +603,11 @@ public class TestBlocksWithNotEnoughRacks {
 
 
   static BlockReconstructionWork scheduleReconstruction(
   static BlockReconstructionWork scheduleReconstruction(
       FSNamesystem fsn, BlockInfo block, int priority) {
       FSNamesystem fsn, BlockInfo block, int priority) {
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
     try {
     try {
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "scheduleReconstruction");
+      fsn.writeUnlock(RwLockMode.BM, "scheduleReconstruction");
     }
     }
   }
   }
 
 

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java

@@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 import org.junit.After;
 import org.junit.After;
@@ -132,7 +132,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateReplicas() throws Exception {
   public void testComputeInvalidateReplicas() throws Exception {
     final int blockInvalidateLimit = bm.getDatanodeManager()
     final int blockInvalidateLimit = bm.getDatanodeManager()
         .getBlockInvalidateLimit();
         .getBlockInvalidateLimit();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       for (int i=0; i<nodes.length; i++) {
       for (int i=0; i<nodes.length; i++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
@@ -143,7 +143,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateReplicas");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidateReplicas");
     }
     }
   }
   }
 
 
@@ -155,7 +155,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
     final int blockInvalidateLimit =
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
         bm.getDatanodeManager().getBlockInvalidateLimit();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
       for (int i = 0; i < nodeCount; i++) {
@@ -168,7 +168,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateStripedBlockGroups");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidateStripedBlockGroups");
     }
     }
   }
   }
 
 
@@ -182,7 +182,7 @@ public class TestComputeInvalidateWork {
     final int blockInvalidateLimit =
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
         bm.getDatanodeManager().getBlockInvalidateLimit();
     final Random random = new Random(System.currentTimeMillis());
     final Random random = new Random(System.currentTimeMillis());
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
       for (int i = 0; i < nodeCount; i++) {
@@ -202,7 +202,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidate");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidate");
     }
     }
   }
   }
 
 
@@ -213,7 +213,7 @@ public class TestComputeInvalidateWork {
    */
    */
   @Test(timeout=120000)
   @Test(timeout=120000)
   public void testDatanodeReformat() throws Exception {
   public void testDatanodeReformat() throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
     try {
       // Change the datanode UUID to emulate a reformat
       // Change the datanode UUID to emulate a reformat
       String poolId = cluster.getNamesystem().getBlockPoolId();
       String poolId = cluster.getNamesystem().getBlockPoolId();
@@ -235,7 +235,7 @@ public class TestComputeInvalidateWork {
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.getPendingDeletionBlocksCount());
       assertEquals(0, bm.getPendingDeletionBlocksCount());
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReformat");
+      namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReformat");
     }
     }
   }
   }
 
 
@@ -256,7 +256,7 @@ public class TestComputeInvalidateWork {
     dfs.delete(ecFile, false);
     dfs.delete(ecFile, false);
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
         cluster.getNamesystem(0).getBlockManager());
         cluster.getNamesystem(0).getBlockManager());
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     InvalidateBlocks invalidateBlocks;
     InvalidateBlocks invalidateBlocks;
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
         + ecPolicy.getNumParityUnits());
         + ecPolicy.getNumParityUnits());
@@ -273,7 +273,7 @@ public class TestComputeInvalidateWork {
       assertEquals("Unexpected invalidate count for striped block groups!",
       assertEquals("Unexpected invalidate count for striped block groups!",
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
     } finally {
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
+      namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReRegistration");
     }
     }
     // Re-register each DN and see that it wipes the invalidation work
     // Re-register each DN and see that it wipes the invalidation work
     int totalBlockGroupsPerDataNode = totalBlockGroups;
     int totalBlockGroupsPerDataNode = totalBlockGroups;
@@ -285,14 +285,14 @@ public class TestComputeInvalidateWork {
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new ExportedBlockKeys(),
           new ExportedBlockKeys(),
           VersionInfo.getVersion());
           VersionInfo.getVersion());
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
       try {
         bm.getDatanodeManager().registerDatanode(reg);
         bm.getDatanodeManager().registerDatanode(reg);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         assertEquals("Expected number of invalidate blocks to decrease",
         assertEquals("Expected number of invalidate blocks to decrease",
             (long) expected, invalidateBlocks.numBlocks());
             (long) expected, invalidateBlocks.numBlocks());
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
+        namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReRegistration");
       }
       }
     }
     }
   }
   }

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -36,7 +36,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Random;
 import java.util.Set;
 import java.util.Set;
 
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -65,6 +64,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
@@ -120,7 +120,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10);
@@ -156,7 +156,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     DatanodeManager dm = mockDatanodeManager(fsn, conf);
     DatanodeManager dm = mockDatanodeManager(fsn, conf);
 
 
@@ -187,7 +187,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
 
 
     //Seed the RNG with a known value so test failures are easier to reproduce
     //Seed the RNG with a known value so test failures are easier to reproduce
@@ -287,7 +287,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
 
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
@@ -406,7 +406,7 @@ public class TestDatanodeManager {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     if (scriptFileName != null && !scriptFileName.isEmpty()) {
     if (scriptFileName != null && !scriptFileName.isEmpty()) {
       URL shellScript = getClass().getResource(scriptFileName);
       URL shellScript = getClass().getResource(scriptFileName);
       Path resourcePath = Paths.get(shellScript.toURI());
       Path resourcePath = Paths.get(shellScript.toURI());
@@ -505,7 +505,7 @@ public class TestDatanodeManager {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass().getResource(
     URL shellScript = getClass().getResource(
         "/" + Shell.appendScriptExtension("topology-script"));
         "/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -655,7 +655,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -723,7 +723,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -810,7 +810,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -900,7 +900,7 @@ public class TestDatanodeManager {
 
 
     // Set the write lock so that the DatanodeManager can start
     // Set the write lock so that the DatanodeManager can start
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
 
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
     HostFileManager hm = new HostFileManager();
     HostFileManager hm = new HostFileManager();
@@ -999,7 +999,7 @@ public class TestDatanodeManager {
       throws IOException {
       throws IOException {
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
@@ -1154,7 +1154,7 @@ public class TestDatanodeManager {
       throws IOException {
       throws IOException {
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));
     DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java

@@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Rule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.junit.rules.Timeout;
@@ -92,7 +92,7 @@ public class TestHeartbeatHandling {
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
 
 
       try {
       try {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         synchronized(hm) {
         synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
             dd.addBlockToBeReplicated(
@@ -137,7 +137,7 @@ public class TestHeartbeatHandling {
           assertEquals(0, cmds.length);
           assertEquals(0, cmds.length);
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
+        namesystem.writeUnlock(RwLockMode.BM, "testHeartbeat");
       }
       }
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
@@ -177,7 +177,7 @@ public class TestHeartbeatHandling {
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
 
 
       try {
       try {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         synchronized(hm) {
         synchronized(hm) {
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
@@ -256,7 +256,7 @@ public class TestHeartbeatHandling {
           assertEquals(recoveringNodes[2], dd3);
           assertEquals(recoveringNodes[2], dd3);
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
+        namesystem.writeUnlock(RwLockMode.BM, "testHeartbeat");
       }
       }
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java

@@ -39,12 +39,12 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
@@ -185,7 +185,7 @@ public class TestNameNodePrunesMissingStorages {
         DataNodeTestUtils.triggerBlockReport(dn);
         DataNodeTestUtils.triggerBlockReport(dn);
       }
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       final String storageIdToRemove;
       final String storageIdToRemove;
       String datanodeUuid;
       String datanodeUuid;
       // Find the first storage which this block is in.
       // Find the first storage which this block is in.
@@ -201,7 +201,7 @@ public class TestNameNodePrunesMissingStorages {
         storageIdToRemove = info.getStorageID();
         storageIdToRemove = info.getStorageID();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "testRemovingStorageDoesNotProduceZombies");
             "testRemovingStorageDoesNotProduceZombies");
       }
       }
       // Find the DataNode which holds that first storage.
       // Find the DataNode which holds that first storage.
@@ -347,7 +347,7 @@ public class TestNameNodePrunesMissingStorages {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override
         @Override
         public Boolean get() {
         public Boolean get() {
-          cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+          cluster.getNamesystem().writeLock(RwLockMode.BM);
           try {
           try {
             Iterator<DatanodeStorageInfo> storageInfoIter =
             Iterator<DatanodeStorageInfo> storageInfoIter =
                 cluster.getNamesystem().getBlockManager().
                 cluster.getNamesystem().getBlockManager().
@@ -369,7 +369,7 @@ public class TestNameNodePrunesMissingStorages {
             LOG.info("Successfully found " + block.getBlockName() + " in " +
             LOG.info("Successfully found " + block.getBlockName() + " in " +
                 "be in storage id " + newStorageId);
                 "be in storage id " + newStorageId);
           } finally {
           } finally {
-            cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testRenamingStorageIds");
+            cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testRenamingStorageIds");
           }
           }
           return true;
           return true;
         }
         }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -175,14 +175,14 @@ public class TestNodeCount {
   /* threadsafe read of the replication counts for this block */
   /* threadsafe read of the replication counts for this block */
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
     BlockManager blockManager = namesystem.getBlockManager();
     BlockManager blockManager = namesystem.getBlockManager();
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
     try {
       lastBlock = block;
       lastBlock = block;
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       return lastNum;
       return lastNum;
     }
     }
     finally {
     finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "countNodes");
+      namesystem.readUnlock(RwLockMode.BM, "countNodes");
     }
     }
   }
   }
 }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestOverReplicatedBlocks {
 public class TestOverReplicatedBlocks {
@@ -96,7 +96,7 @@ public class TestOverReplicatedBlocks {
       final BlockManager bm = namesystem.getBlockManager();
       final BlockManager bm = namesystem.getBlockManager();
       final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
       final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
       try {
       try {
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
         synchronized(hm) {
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
           // so they will be chosen to be deleted when over-replication occurs
@@ -119,7 +119,7 @@ public class TestOverReplicatedBlocks {
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "testProcesOverReplicateBlock");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "testProcesOverReplicateBlock");
       }
       }
       
       
     } finally {
     } finally {
@@ -182,11 +182,11 @@ public class TestOverReplicatedBlocks {
 
 
       // All replicas for deletion should be scheduled on lastDN.
       // All replicas for deletion should be scheduled on lastDN.
       // And should not actually be deleted, because lastDN does not heartbeat.
       // And should not actually be deleted, because lastDN does not heartbeat.
-      namesystem.readLock(FSNamesystemLockMode.BM);
+      namesystem.readLock(RwLockMode.BM);
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       assertEquals("Replicas on node " + lastDNid + " should have been deleted",
       assertEquals("Replicas on node " + lastDNid + " should have been deleted",
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "excessSize4Testing");
+      namesystem.readUnlock(RwLockMode.BM, "excessSize4Testing");
       for(BlockLocation location : locs)
       for(BlockLocation location : locs)
         assertEquals("Block should still have 4 replicas",
         assertEquals("Block should still have 4 replicas",
             4, location.getNames().length);
             4, location.getNames().length);

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java

@@ -51,12 +51,12 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -289,13 +289,13 @@ public class TestPendingReconstruction {
 
 
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // report is not the same, it should stay in pending.
       // report is not the same, it should stay in pending.
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
       try {
         // Use a wrong gen stamp.
         // Use a wrong gen stamp.
         blkManager.addBlock(desc[0].getStorageInfos()[0],
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 0), null);
             new Block(1, 1, 0), null);
       } finally {
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
+        fsn.writeUnlock(RwLockMode.BM, "testProcessPendingReconstructions");
       }
       }
 
 
       // The block should still be pending
       // The block should still be pending
@@ -304,12 +304,12 @@ public class TestPendingReconstruction {
 
 
       // A block report with the correct gen stamp should remove the record
       // A block report with the correct gen stamp should remove the record
       // from the pending queue.
       // from the pending queue.
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
       try {
         blkManager.addBlock(desc[0].getStorageInfos()[0],
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 1), null);
             new Block(1, 1, 1), null);
       } finally {
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
+        fsn.writeUnlock(RwLockMode.BM, "testProcessPendingReconstructions");
       }
       }
 
 
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
@@ -460,7 +460,7 @@ public class TestPendingReconstruction {
       // 3. mark a couple of blocks as corrupt
       // 3. mark a couple of blocks as corrupt
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
             "STORAGE_ID", "TEST");
             "STORAGE_ID", "TEST");
@@ -472,7 +472,7 @@ public class TestPendingReconstruction {
         BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
         BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testPendingAndInvalidate");
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testPendingAndInvalidate");
       }
       }
 
 
       // 4. delete the file
       // 4. delete the file
@@ -508,7 +508,7 @@ public class TestPendingReconstruction {
         DATANODE_COUNT).build();
         DATANODE_COUNT).build();
     tmpCluster.waitActive();
     tmpCluster.waitActive();
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
 
 
     try {
     try {
       BlockManager bm = fsn.getBlockManager();
       BlockManager bm = fsn.getBlockManager();
@@ -564,7 +564,7 @@ public class TestPendingReconstruction {
       }, 100, 60000);
       }, 100, 60000);
     } finally {
     } finally {
       tmpCluster.shutdown();
       tmpCluster.shutdown();
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReplicationCounter");
+      fsn.writeUnlock(RwLockMode.BM, "testReplicationCounter");
     }
     }
   }
   }
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java

@@ -24,9 +24,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.hdfs.util.RwLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import java.io.IOException;
 import java.io.IOException;
@@ -88,7 +88,7 @@ public class TestProvidedStorageMap {
     DatanodeStorage dn1DiskStorage = new DatanodeStorage(
     DatanodeStorage dn1DiskStorage = new DatanodeStorage(
         "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK);
         "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK);
 
 
-    when(nameSystemLock.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
+    when(nameSystemLock.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
     DatanodeStorageInfo dns1Provided =
     DatanodeStorageInfo dns1Provided =
         providedMap.getStorage(dn1, dn1ProvidedStorage);
         providedMap.getStorage(dn1, dn1ProvidedStorage);
     DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, dn1DiskStorage);
     DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, dn1DiskStorage);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
@@ -197,11 +197,11 @@ public class TestReconstructStripedBlocksWithRackAwareness {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
     }
     }
 
 
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
     try {
     try {
       bm.processMisReplicatedBlocks();
       bm.processMisReplicatedBlocks();
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReconstructForNotEnoughRacks");
+      fsn.writeUnlock(RwLockMode.BM, "testReconstructForNotEnoughRacks");
     }
     }
 
 
     // check if redundancy monitor correctly schedule the reconstruction work.
     // check if redundancy monitor correctly schedule the reconstruction work.
@@ -343,12 +343,12 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     final DatanodeAdminManager decomManager =
     final DatanodeAdminManager decomManager =
         (DatanodeAdminManager) Whitebox.getInternalState(
         (DatanodeAdminManager) Whitebox.getInternalState(
             dm, "datanodeAdminManager");
             dm, "datanodeAdminManager");
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       dn9.stopDecommission();
       dn9.stopDecommission();
       decomManager.startDecommission(dn9);
       decomManager.startDecommission(dn9);
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testReconstructionWithDecommission");
           "testReconstructionWithDecommission");
     }
     }
 
 

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -66,8 +66,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
@@ -1407,12 +1407,12 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     FSNamesystem mockNS = mock(FSNamesystem.class);
     FSNamesystem mockNS = mock(FSNamesystem.class);
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.BM)).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.FS)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.BM)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.BM)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.FS)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.FS)).thenReturn(true);
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
 
 
@@ -1462,7 +1462,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
           throws IOException {
           throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
     Namesystem mockNS = mock(Namesystem.class);
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasWriteLock()).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
 
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized;
@@ -73,7 +73,7 @@ public class TestReplicationPolicyConsiderLoad
    */
    */
   @Test
   @Test
   public void testChooseTargetWithDecomNodes() throws IOException {
   public void testChooseTargetWithDecomNodes() throws IOException {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -125,7 +125,7 @@ public class TestReplicationPolicyConsiderLoad
       dataNodes[0].stopDecommission();
       dataNodes[0].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[2].stopDecommission();
       dataNodes[2].stopDecommission();
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetWithDecomNodes");
           "testChooseTargetWithDecomNodes");
     }
     }
     NameNode.LOG.info("Done working on it");
     NameNode.LOG.info("Done working on it");
@@ -133,7 +133,7 @@ public class TestReplicationPolicyConsiderLoad
 
 
   @Test
   @Test
   public void testConsiderLoadFactor() throws IOException {
   public void testConsiderLoadFactor() throws IOException {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
@@ -180,7 +180,7 @@ public class TestReplicationPolicyConsiderLoad
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
       }
       }
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testConsiderLoadFactor");
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM, "testConsiderLoadFactor");
     }
     }
   }
   }
 }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -86,7 +86,7 @@ public class TestReplicationPolicyExcludeSlowNodes
    */
    */
   @Test
   @Test
   public void testChooseTargetExcludeSlowNodes() throws Exception {
   public void testChooseTargetExcludeSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       // add nodes
       // add nodes
       for (int i = 0; i < dataNodes.length; i++) {
       for (int i = 0; i < dataNodes.length; i++) {
@@ -136,7 +136,7 @@ public class TestReplicationPolicyExcludeSlowNodes
             .getDatanodeUuid()));
             .getDatanodeUuid()));
       }
       }
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetExcludeSlowNodes");
           "testChooseTargetExcludeSlowNodes");
     }
     }
     NameNode.LOG.info("Done working on it");
     NameNode.LOG.info("Done working on it");
@@ -144,7 +144,7 @@ public class TestReplicationPolicyExcludeSlowNodes
 
 
   @Test
   @Test
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       // add nodes
       // add nodes
       for (DatanodeDescriptor dataNode : dataNodes) {
       for (DatanodeDescriptor dataNode : dataNodes) {
@@ -174,7 +174,7 @@ public class TestReplicationPolicyExcludeSlowNodes
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testSlowPeerTrackerEnabledClearSlowNodes");
           "testSlowPeerTrackerEnabledClearSlowNodes");
     }
     }
   }
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -92,7 +92,7 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
    */
    */
   @Test
   @Test
   public void testChooseTargetWithRatioConsiderLoad() {
   public void testChooseTargetWithRatioConsiderLoad() {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       // After heartbeat has been processed, the total load should be 200.
       // After heartbeat has been processed, the total load should be 200.
       // And average load per node should be 40. The max load should be 2 * 40;
       // And average load per node should be 40. The max load should be 2 * 40;
@@ -164,7 +164,7 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[4]));
       assertTrue(targetSet.contains(dataNodes[4]));
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetWithRatioConsiderLoad");
           "testChooseTargetWithRatioConsiderLoad");
     }
     }
   }
   }

+ 9 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 
 
 
@@ -47,19 +46,13 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
-import org.mockito.ArgumentMatcher;
-import org.mockito.ArgumentMatchers;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.FSIMAGE_ATTRIBUTE_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.FSIMAGE_ATTRIBUTE_KEY;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 
 /**
 /**
  * This is a utility class to expose NameNode functionality for unit tests.
  * This is a utility class to expose NameNode functionality for unit tests.
@@ -90,13 +83,13 @@ public class NameNodeAdapter {
     // consistent with FSNamesystem#getFileInfo()
     // consistent with FSNamesystem#getFileInfo()
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker.setOperationType(operationName);
-    namenode.getNamesystem().readLock(FSNamesystemLockMode.FS);
+    namenode.getNamesystem().readLock(RwLockMode.FS);
     try {
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           needBlockToken);
           needBlockToken);
     } finally {
     } finally {
-      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "getFileInfo");
+      namenode.getNamesystem().readUnlock(RwLockMode.FS, "getFileInfo");
     }
     }
   }
   }
   
   
@@ -209,11 +202,11 @@ public class NameNodeAdapter {
    */
    */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
       DatanodeID id) throws IOException {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
+      ns.readUnlock(RwLockMode.BM, "getDatanode");
     }
     }
   }
   }
   
   
@@ -237,7 +230,7 @@ public class NameNodeAdapter {
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
       final String src, final DatanodeStorageInfo[] targets)
       final String src, final DatanodeStorageInfo[] targets)
       throws IOException {
       throws IOException {
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
@@ -246,17 +239,17 @@ public class NameNodeAdapter {
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
       return file.getLastBlock();
       return file.getLastBlock();
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "addBlockNoJournal");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "addBlockNoJournal");
     }
     }
   }
   }
 
 
   public static void persistBlocks(final FSNamesystem fsn,
   public static void persistBlocks(final FSNamesystem fsn,
       final String src, final INodeFile file) throws IOException {
       final String src, final INodeFile file) throws IOException {
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
     try {
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "persistBlocks");
+      fsn.writeUnlock(RwLockMode.FS, "persistBlocks");
     }
     }
   }
   }
 
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
@@ -93,7 +93,7 @@ public class TestAddBlockRetry {
     // start first addBlock()
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    ns.readLock(FSNamesystemLockMode.GLOBAL);
+    ns.readLock(RwLockMode.GLOBAL);
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     try {
     try {
@@ -101,7 +101,7 @@ public class TestAddBlockRetry {
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             "clientName", null, onRetryBlock);
                                             "clientName", null, onRetryBlock);
     } finally {
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "validateAddBlock");
+      ns.readUnlock(RwLockMode.GLOBAL, "validateAddBlock");
     }
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);
         ns.getBlockManager(), src, null, null, null, r);
@@ -119,13 +119,13 @@ public class TestAddBlockRetry {
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
 
 
     // continue first addBlock()
     // continue first addBlock()
-    ns.writeLock(FSNamesystemLockMode.GLOBAL);
+    ns.writeLock(RwLockMode.GLOBAL);
     LocatedBlock newBlock;
     LocatedBlock newBlock;
     try {
     try {
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
     } finally {
     } finally {
-      ns.writeUnlock(FSNamesystemLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
+      ns.writeUnlock(RwLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
     }
     }
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
 
 
@@ -139,11 +139,11 @@ public class TestAddBlockRetry {
 
 
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
     final FSNamesystem ns = cluster.getNamesystem();
     final FSNamesystem ns = cluster.getNamesystem();
-    ns.readLock(FSNamesystemLockMode.GLOBAL);
+    ns.readLock(RwLockMode.GLOBAL);
     try {
     try {
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
     } finally {
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "checkFileProgress");
+      ns.readUnlock(RwLockMode.GLOBAL, "checkFileProgress");
     }
     }
   }
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -205,12 +205,12 @@ public class TestAddOverReplicatedStripedBlocks {
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
           infos.get(0), storages.get(0), "TEST");
           infos.get(0), storages.get(0), "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testProcessOverReplicatedAndCorruptStripedBlock");
           "testProcessOverReplicatedAndCorruptStripedBlock");
     }
     }
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -37,8 +37,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.After;
@@ -254,11 +254,11 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
 
 
     //test if decommission succeeded
     //test if decommission succeeded
     DatanodeDescriptor dnd3 = dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
     DatanodeDescriptor dnd3 = dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       dm.getDatanodeAdminManager().startDecommission(dnd3);
       dm.getDatanodeAdminManager().startDecommission(dnd3);
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testPlacementWithOnlyOneNodeInRackDecommission");
           "testPlacementWithOnlyOneNodeInRackDecommission");
     }
     }
 
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -44,7 +44,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.commons.lang3.time.DateUtils;
 import org.apache.commons.lang3.time.DateUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -85,6 +84,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -763,7 +763,7 @@ public class TestCacheDirectives {
       @Override
       @Override
       public Boolean get() {
       public Boolean get() {
         int numCachedBlocks = 0, numCachedReplicas = 0;
         int numCachedBlocks = 0, numCachedReplicas = 0;
-        namesystem.readLock(FSNamesystemLockMode.BM);
+        namesystem.readLock(RwLockMode.BM);
         try {
         try {
           GSet<CachedBlock, CachedBlock> cachedBlocks =
           GSet<CachedBlock, CachedBlock> cachedBlocks =
               cacheManager.getCachedBlocks();
               cacheManager.getCachedBlocks();
@@ -776,7 +776,7 @@ public class TestCacheDirectives {
             }
             }
           }
           }
         } finally {
         } finally {
-          namesystem.readUnlock(FSNamesystemLockMode.BM, "checkBlocks");
+          namesystem.readUnlock(RwLockMode.BM, "checkBlocks");
         }
         }
 
 
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
@@ -1507,7 +1507,7 @@ public class TestCacheDirectives {
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
       throws Exception {
     Thread.sleep(1000);
     Thread.sleep(1000);
-    cluster.getNamesystem().readLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().readLock(RwLockMode.BM);
     try {
     try {
       final DatanodeManager datanodeManager =
       final DatanodeManager datanodeManager =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -1520,7 +1520,7 @@ public class TestCacheDirectives {
             descriptor.getPendingCached().isEmpty());
             descriptor.getPendingCached().isEmpty());
       }
       }
     } finally {
     } finally {
-      cluster.getNamesystem().readUnlock(FSNamesystemLockMode.BM, "checkPendingCachedEmpty");
+      cluster.getNamesystem().readUnlock(RwLockMode.BM, "checkPendingCachedEmpty");
     }
     }
   }
   }
 
 
@@ -1667,9 +1667,9 @@ public class TestCacheDirectives {
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     GenericTestUtils.waitFor(() -> {
     GenericTestUtils.waitFor(() -> {
       boolean isConsistence = false;
       boolean isConsistence = false;
-      ann.getNamesystem().readLock(FSNamesystemLockMode.FS);
+      ann.getNamesystem().readLock(RwLockMode.FS);
       try {
       try {
-        sbn.getNamesystem().readLock(FSNamesystemLockMode.FS);
+        sbn.getNamesystem().readLock(RwLockMode.FS);
         try {
         try {
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
               getCacheDirectives().iterator();
               getCacheDirectives().iterator();
@@ -1684,10 +1684,10 @@ public class TestCacheDirectives {
             }
             }
           }
           }
         } finally {
         } finally {
-          sbn.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
+          sbn.getNamesystem().readUnlock(RwLockMode.FS, "expiryTimeConsistency");
         }
         }
       } finally {
       } finally {
-        ann.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
+        ann.getNamesystem().readUnlock(RwLockMode.FS, "expiryTimeConsistency");
       }
       }
       if (!isConsistence) {
       if (!isConsistence) {
         LOG.info("testEexpiryTimeConsistency:"
         LOG.info("testEexpiryTimeConsistency:"

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

@@ -52,8 +52,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -472,7 +472,7 @@ public class TestDeleteRace {
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
         }
         }
       });
       });
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       open.start();
       open.start();
       openSem.acquire();
       openSem.acquire();
       Thread.yield();
       Thread.yield();
@@ -480,7 +480,7 @@ public class TestDeleteRace {
       rename.start();
       rename.start();
       renameSem.acquire();
       renameSem.acquire();
       Thread.yield();
       Thread.yield();
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testOpenRenameRace");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testOpenRenameRace");
 
 
       // wait open and rename threads finish.
       // wait open and rename threads finish.
       open.join();
       open.join();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java

@@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -393,11 +393,11 @@ public class TestDiskspaceQuotaUpdate {
 
 
   private void updateCountForQuota(int i) {
   private void updateCountForQuota(int i) {
     FSNamesystem fsn = cluster.getNamesystem();
     FSNamesystem fsn = cluster.getNamesystem();
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
     try {
       getFSDirectory().updateCountForQuota(i);
       getFSDirectory().updateCountForQuota(i);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "updateCountForQuota");
+      fsn.writeUnlock(RwLockMode.FS, "updateCountForQuota");
     }
     }
   }
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

@@ -56,12 +56,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -525,11 +525,11 @@ public class TestEditLogRace {
         public void run() {
         public void run() {
           try {
           try {
             LOG.info("Starting setOwner");
             LOG.info("Starting setOwner");
-            namesystem.writeLock(FSNamesystemLockMode.FS);
+            namesystem.writeLock(RwLockMode.FS);
             try {
             try {
               editLog.logSetOwner("/","test","test");
               editLog.logSetOwner("/","test","test");
             } finally {
             } finally {
-              namesystem.writeUnlock(FSNamesystemLockMode.FS, "testSaveRightBeforeSync");
+              namesystem.writeUnlock(RwLockMode.FS, "testSaveRightBeforeSync");
             }
             }
             sleepingBeforeSync.countDown();
             sleepingBeforeSync.countDown();
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java

@@ -32,13 +32,13 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -152,11 +152,11 @@ public class TestFSImageWithSnapshot {
         conf);
         conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     File imageFile = getImageFile(testDir, txid);
     File imageFile = getImageFile(testDir, txid);
-    fsn.readLock(FSNamesystemLockMode.GLOBAL);
+    fsn.readLock(RwLockMode.GLOBAL);
     try {
     try {
       saver.save(imageFile, compression);
       saver.save(imageFile, compression);
     } finally {
     } finally {
-      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "saveFSImage");
+      fsn.readUnlock(RwLockMode.GLOBAL, "saveFSImage");
     }
     }
     return imageFile;
     return imageFile;
   }
   }
@@ -164,14 +164,14 @@ public class TestFSImageWithSnapshot {
   /** Load the fsimage from a temp file */
   /** Load the fsimage from a temp file */
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     fsn.getFSDirectory().writeLock();
     fsn.getFSDirectory().writeLock();
     try {
     try {
       loader.load(imageFile, false);
       loader.load(imageFile, false);
       fsn.getFSDirectory().updateCountForQuota();
       fsn.getFSDirectory().updateCountForQuota();
     } finally {
     } finally {
       fsn.getFSDirectory().writeUnlock();
       fsn.getFSDirectory().writeUnlock();
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSImageFromTempFile");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "loadFSImageFromTempFile");
     }
     }
   }
   }
   
   

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java

@@ -41,12 +41,12 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Test;
 import org.junit.Test;
@@ -195,12 +195,12 @@ public class TestFSNamesystem {
   }
   }
 
 
   private void clearNamesystem(FSNamesystem fsn) {
   private void clearNamesystem(FSNamesystem fsn) {
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       fsn.clear();
       fsn.clear();
       assertFalse(fsn.isImageLoaded());
       assertFalse(fsn.isImageLoaded());
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "clearNamesystem");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "clearNamesystem");
     }
     }
   }
   }
 
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java

@@ -34,10 +34,10 @@ import javax.management.ObjectName;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -154,7 +154,7 @@ public class TestFSNamesystemMBean {
       cluster.waitActive();
       cluster.waitActive();
 
 
       fsn = cluster.getNameNode().namesystem;
       fsn = cluster.getNameNode().namesystem;
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       Thread.sleep(jmxCachePeriod * 1000);
       Thread.sleep(jmxCachePeriod * 1000);
 
 
       MBeanClient client = new MBeanClient();
       MBeanClient client = new MBeanClient();
@@ -164,8 +164,8 @@ public class TestFSNamesystemMBean {
           "is owned by another thread", client.succeeded);
           "is owned by another thread", client.succeeded);
       client.interrupt();
       client.interrupt();
     } finally {
     } finally {
-      if (fsn != null && fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
-        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testWithFSNamesystemWriteLock");
+      if (fsn != null && fsn.hasWriteLock(RwLockMode.GLOBAL)) {
+        fsn.writeUnlock(RwLockMode.GLOBAL, "testWithFSNamesystemWriteLock");
       }
       }
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java

@@ -36,7 +36,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -1084,7 +1084,7 @@ public class TestFileTruncate {
     INodeFile file = iip.getLastINode().asFile();
     INodeFile file = iip.getLastINode().asFile();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up in-place truncate.
     // Test that prepareFileForTruncate sets up in-place truncate.
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       Block oldBlock = file.getLastBlock();
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1104,7 +1104,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testTruncateRecovery");
     }
     }
 
 
     // Re-create file and ensure we are ready to copy on truncate
     // Re-create file and ensure we are ready to copy on truncate
@@ -1118,7 +1118,7 @@ public class TestFileTruncate {
         (BlockInfoContiguous) file.getLastBlock()), is(true));
         (BlockInfoContiguous) file.getLastBlock()), is(true));
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up copy-on-write truncate
     // Test that prepareFileForTruncate sets up copy-on-write truncate
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
     try {
       Block oldBlock = file.getLastBlock();
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1138,7 +1138,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testTruncateRecovery");
     }
     }
     checkBlockRecovery(srcPath);
     checkBlockRecovery(srcPath);
     fs.deleteSnapshot(parent, "ss0");
     fs.deleteSnapshot(parent, "ss0");

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -105,12 +105,12 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -1512,11 +1512,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     BlockCollection bc = null;
     try {
     try {
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
       bc = fsn.getBlockCollection(bi);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testBlockIdCKDecommission");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testBlockIdCKDecommission");
     }
     }
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
@@ -1954,11 +1954,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     BlockCollection bc = null;
     try {
     try {
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
       bc = fsn.getBlockCollection(bi);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testFsckWithDecommissionedReplicas");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testFsckWithDecommissionedReplicas");
     }
     }
     DatanodeDescriptor dn = bc.getBlocks()[0]
     DatanodeDescriptor dn = bc.getBlocks()[0]
         .getDatanode(0);
         .getDatanode(0);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
@@ -73,14 +73,14 @@ public class TestGetBlockLocations {
       @Override
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if(!deleted[0]) {
         if(!deleted[0]) {
-          fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+          fsn.writeLock(RwLockMode.GLOBAL);
           try {
           try {
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  now());
                                  now());
           } finally {
           } finally {
-            fsn.writeUnlock(FSNamesystemLockMode.GLOBAL,
+            fsn.writeUnlock(RwLockMode.GLOBAL,
                 "testGetBlockLocationsRacingWithDelete");
                 "testGetBlockLocationsRacingWithDelete");
           }
           }
           deleted[0] = true;
           deleted[0] = true;
@@ -108,14 +108,14 @@ public class TestGetBlockLocations {
       @Override
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if (!renamed[0]) {
         if (!renamed[0]) {
-          fsn.writeLock(FSNamesystemLockMode.FS);
+          fsn.writeLock(RwLockMode.FS);
           try {
           try {
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    false);
                                    false);
             renamed[0] = true;
             renamed[0] = true;
           } finally {
           } finally {
-            fsn.writeUnlock(FSNamesystemLockMode.FS, "testGetBlockLocationsRacingWithRename");
+            fsn.writeUnlock(RwLockMode.FS, "testGetBlockLocationsRacingWithRename");
           }
           }
         }
         }
         invocation.callRealMethod();
         invocation.callRealMethod();
@@ -144,13 +144,13 @@ public class TestGetBlockLocations {
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         DFS_BLOCK_SIZE_DEFAULT);
         DFS_BLOCK_SIZE_DEFAULT);
 
 
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
     try {
       final FSDirectory fsd = fsn.getFSDirectory();
       final FSDirectory fsd = fsn.getFSDirectory();
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       fsd.addINode(iip, file, null);
       fsd.addINode(iip, file, null);
     } finally {
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "setupFileSystem");
+      fsn.writeUnlock(RwLockMode.FS, "setupFileSystem");
     }
     }
     return fsn;
     return fsn;
   }
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java

@@ -21,12 +21,12 @@ import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -121,11 +121,11 @@ public class TestLargeDirectoryDelete {
           try {
           try {
             int blockcount = getBlockCount();
             int blockcount = getBlockCount();
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
-              mc.getNamesystem().writeLock(FSNamesystemLockMode.GLOBAL);
+              mc.getNamesystem().writeLock(RwLockMode.GLOBAL);
               try {
               try {
                 lockOps++;
                 lockOps++;
               } finally {
               } finally {
-                mc.getNamesystem().writeUnlock(FSNamesystemLockMode.GLOBAL, "runThreads");
+                mc.getNamesystem().writeUnlock(RwLockMode.GLOBAL, "runThreads");
               }
               }
               Thread.sleep(1);
               Thread.sleep(1);
             }
             }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java

@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 
 
 import org.junit.Rule;
 import org.junit.Rule;
@@ -467,10 +467,10 @@ public class TestLeaseManager {
     when(fsn.isRunning()).thenReturn(true);
     when(fsn.isRunning()).thenReturn(true);
     when(fsn.hasReadLock()).thenReturn(true);
     when(fsn.hasReadLock()).thenReturn(true);
     when(fsn.hasWriteLock()).thenReturn(true);
     when(fsn.hasWriteLock()).thenReturn(true);
-    when(fsn.hasReadLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(fsn.hasWriteLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(fsn.hasReadLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
+    when(fsn.hasReadLock(RwLockMode.FS)).thenReturn(true);
+    when(fsn.hasWriteLock(RwLockMode.FS)).thenReturn(true);
+    when(fsn.hasReadLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(fsn.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
     when(fsn.getFSDirectory()).thenReturn(dir);
     when(fsn.getFSDirectory()).thenReturn(dir);
     when(fsn.getMaxLockHoldToReleaseLeaseMs()).thenReturn(maxLockHoldToReleaseLeaseMs);
     when(fsn.getMaxLockHoldToReleaseLeaseMs()).thenReturn(maxLockHoldToReleaseLeaseMs);
     return fsn;
     return fsn;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java

@@ -47,9 +47,9 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -339,7 +339,7 @@ public class TestListOpenFiles {
     FSDirectory dir = fsNamesystem.getFSDirectory();
     FSDirectory dir = fsNamesystem.getFSDirectory();
     List<INode> removedINodes = new ChunkedArrayList<>();
     List<INode> removedINodes = new ChunkedArrayList<>();
     removedINodes.add(dir.getINode(path));
     removedINodes.add(dir.getINode(path));
-    fsNamesystem.writeLock(FSNamesystemLockMode.FS);
+    fsNamesystem.writeLock(RwLockMode.FS);
     try {
     try {
       dir.removeFromInodeMap(removedINodes);
       dir.removeFromInodeMap(removedINodes);
       openFileEntryBatchedEntries = nnRpc
       openFileEntryBatchedEntries = nnRpc
@@ -350,7 +350,7 @@ public class TestListOpenFiles {
     } catch (NullPointerException e) {
     } catch (NullPointerException e) {
       Assert.fail("Should not throw NPE when the file is deleted but has lease!");
       Assert.fail("Should not throw NPE when the file is deleted but has lease!");
     } finally {
     } finally {
-      fsNamesystem.writeUnlock(FSNamesystemLockMode.FS, "testListOpenFilesWithDeletedPath");
+      fsNamesystem.writeUnlock(RwLockMode.FS, "testListOpenFilesWithDeletedPath");
     }
     }
   }
   }
 }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 
 
 import java.util.function.Supplier;
 import java.util.function.Supplier;
@@ -96,13 +96,13 @@ public class TestNameNodeMetadataConsistency {
 
 
     // Simulate Namenode forgetting a Block
     // Simulate Namenode forgetting a Block
     cluster.restartNameNode(true);
     cluster.restartNameNode(true);
-    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNameNode().getNamesystem().writeLock(RwLockMode.BM);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
         .getStoredBlock(block.getLocalBlock());
         .getStoredBlock(block.getLocalBlock());
     bInfo.delete();
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+    cluster.getNameNode().getNamesystem().writeUnlock(RwLockMode.BM,
         "testGenerationStampInFuture");
         "testGenerationStampInFuture");
 
 
     // we also need to tell block manager that we are in the startup path
     // we also need to tell block manager that we are in the startup path
@@ -147,11 +147,11 @@ public class TestNameNodeMetadataConsistency {
     cluster.restartNameNode(true);
     cluster.restartNameNode(true);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
         ().getStoredBlock(block.getLocalBlock());
         ().getStoredBlock(block.getLocalBlock());
-    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNameNode().getNamesystem().writeLock(RwLockMode.BM);
     bInfo.delete();
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+    cluster.getNameNode().getNamesystem().writeUnlock(RwLockMode.BM,
         "testEnsureGenStampsIsStartupOnly");
         "testEnsureGenStampsIsStartupOnly");
 
 
     cluster.restartDataNode(dnProps);
     cluster.restartDataNode(dnProps);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java

@@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 
 
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
@@ -339,13 +339,13 @@ public class TestReconstructStripedBlocks {
       boolean reconstructed = false;
       boolean reconstructed = false;
       for (int i = 0; i < 5; i++) {
       for (int i = 0; i < 5; i++) {
         NumberReplicas num = null;
         NumberReplicas num = null;
-        fsn.readLock(FSNamesystemLockMode.GLOBAL);
+        fsn.readLock(RwLockMode.GLOBAL);
         try {
         try {
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
           num = bm.countNodes(blockInfo);
           num = bm.countNodes(blockInfo);
         } finally {
         } finally {
-          fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "testCountLiveReplicas");
+          fsn.readUnlock(RwLockMode.GLOBAL, "testCountLiveReplicas");
         }
         }
         if (num.liveReplicas() >= groupSize) {
         if (num.liveReplicas() >= groupSize) {
           reconstructed = true;
           reconstructed = true;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -197,8 +197,8 @@ public class TestSecurityTokenEditLog {
         @Override
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
         public Void answer(InvocationOnMock invocation) throws Throwable {
           // fsn claims read lock if either read or write locked.
           // fsn claims read lock if either read or write locked.
-          Assert.assertTrue(fsnRef.get().hasReadLock(FSNamesystemLockMode.FS));
-          Assert.assertFalse(fsnRef.get().hasWriteLock(FSNamesystemLockMode.FS));
+          Assert.assertTrue(fsnRef.get().hasReadLock(RwLockMode.FS));
+          Assert.assertFalse(fsnRef.get().hasWriteLock(RwLockMode.FS));
           return null;
           return null;
         }
         }
       }
       }

+ 17 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/fgl/TestFineGrainedFSNamesystemLock.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode.fgl;
 package org.apache.hadoop.hdfs.server.namenode.fgl;
 
 
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.junit.Test;
 import org.junit.Test;
@@ -63,7 +64,7 @@ public class TestFineGrainedFSNamesystemLock {
       if (index == 0) { // Test the global write lock via multiple threads.
       if (index == 0) { // Test the global write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            writeLock(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
             globalNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -71,7 +72,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 1) { // Test the fs write lock via multiple threads.
       } else if (index == 1) { // Test the fs write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            writeLock(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
             fsNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -79,7 +80,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 2) { // Test the bm write lock via multiple threads.
       } else if (index == 2) { // Test the bm write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            writeLock(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
             bmNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -87,7 +88,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 3) { // Test the bm read lock via multiple threads.
       } else if (index == 3) { // Test the bm read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            readLock(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
             bmNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -95,7 +96,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 4) { // Test the fs read lock via multiple threads.
       } else if (index == 4) { // Test the fs read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            readLock(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
             fsNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -103,7 +104,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 5) { // Test the global read lock via multiple threads.
       } else if (index == 5) { // Test the global read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            readLock(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
             globalNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -111,7 +112,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 6) { // Test the global interruptable write lock via multiple threads.
       } else if (index == 6) { // Test the global interruptable write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            writeLockInterruptibly(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
             globalNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -119,7 +120,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 7) { // Test the fs interruptable write lock via multiple threads.
       } else if (index == 7) { // Test the fs interruptable write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            writeLockInterruptibly(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
             fsNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -127,7 +128,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 8) { // Test the bm interruptable write lock via multiple threads.
       } else if (index == 8) { // Test the bm interruptable write lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            writeLockInterruptibly(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
             bmNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -135,7 +136,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 9) { // Test the bm interruptable read lock via multiple threads.
       } else if (index == 9) { // Test the bm interruptable read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            readLockInterruptibly(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
             bmNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -143,7 +144,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 10) { // Test the fs interruptable read lock via multiple threads.
       } else if (index == 10) { // Test the fs interruptable read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            readLockInterruptibly(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
             fsNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -151,7 +152,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else { // Test the global interruptable read lock via multiple threads.
       } else { // Test the global interruptable read lock via multiple threads.
         callableList.add(() -> {
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            readLockInterruptibly(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
             globalNumber.incrementAndGet();
           }
           }
           return true;
           return true;
@@ -177,7 +178,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param opName operation name
    * @param counter counter to trace this lock mode
    * @param counter counter to trace this lock mode
    */
    */
-  private void writeLock(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void writeLock(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
       String opName, AtomicLong counter)  {
     fsn.writeLock(mode);
     fsn.writeLock(mode);
     try {
     try {
@@ -200,7 +201,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param opName operation name
    * @param counter counter to trace this lock mode
    * @param counter counter to trace this lock mode
    */
    */
-  private void readLock(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void readLock(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
       String opName, AtomicLong counter)  {
     fsn.readLock(mode);
     fsn.readLock(mode);
     try {
     try {
@@ -217,7 +218,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param opName operation name
    * @param counter counter to trace this lock mode
    * @param counter counter to trace this lock mode
    */
    */
-  private void writeLockInterruptibly(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void writeLockInterruptibly(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
       String opName, AtomicLong counter)  {
     boolean success = false;
     boolean success = false;
     try {
     try {
@@ -257,7 +258,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param opName operation name
    * @param counter counter to trace this lock mode
    * @param counter counter to trace this lock mode
    */
    */
-  private void readLockInterruptibly(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void readLockInterruptibly(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
       String opName, AtomicLong counter)  {
     try {
     try {
       fsn.readLockInterruptibly(mode);
       fsn.readLockInterruptibly(mode);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java

@@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -569,13 +569,13 @@ public class TestDNFencing {
   }
   }
 
 
   private void doMetasave(NameNode nn2) {
   private void doMetasave(NameNode nn2) {
-    nn2.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    nn2.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       PrintWriter pw = new PrintWriter(System.err);
       PrintWriter pw = new PrintWriter(System.err);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       pw.flush();
       pw.flush();
     } finally {
     } finally {
-      nn2.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "metaSave");
+      nn2.getNamesystem().writeUnlock(RwLockMode.BM, "metaSave");
     }
     }
   }
   }
 
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -50,7 +50,6 @@ import java.util.List;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
 
@@ -88,6 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -495,12 +495,12 @@ public class TestNameNodeMetrics {
     // Corrupt first replica of the block
     // Corrupt first replica of the block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testCorruptBlock");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testCorruptBlock");
     }
     }
 
 
     BlockManagerTestUtil.updateState(bm);
     BlockManagerTestUtil.updateState(bm);
@@ -589,12 +589,12 @@ public class TestNameNodeMetrics {
     assert lbs.get(0) instanceof LocatedStripedBlock;
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
 
 
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testStripedFileCorruptBlocks");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testStripedFileCorruptBlocks");
     }
     }
 
 
     BlockManagerTestUtil.updateState(bm);
     BlockManagerTestUtil.updateState(bm);
@@ -688,12 +688,12 @@ public class TestNameNodeMetrics {
     // Corrupt the only replica of the block to result in a missing block
     // Corrupt the only replica of the block to result in a missing block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testMissingBlock");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testMissingBlock");
     }
     }
     Thread.sleep(1000); // Wait for block to be marked corrupt
     Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java

@@ -46,8 +46,8 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.After;
@@ -298,10 +298,10 @@ public class TestINodeFileUnderConstructionWithSnapshot {
       hdfs.delete(foo, true);
       hdfs.delete(foo, true);
       Thread.sleep(1000);
       Thread.sleep(1000);
       try {
       try {
-        fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+        fsn.writeLock(RwLockMode.GLOBAL);
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
       } finally {
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testLease");
+        fsn.writeUnlock(RwLockMode.GLOBAL, "testLease");
       }
       }
     } finally {
     } finally {
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -81,7 +81,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -784,14 +784,14 @@ public class TestDFSAdmin {
       LocatedStripedBlock bg =
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
           (LocatedStripedBlock)(lbs.get(0));
 
 
-      miniCluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      miniCluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
       try {
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
             "STORAGE_ID", "TEST");
             "STORAGE_ID", "TEST");
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
       } finally {
       } finally {
-        miniCluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testReportCommand");
+        miniCluster.getNamesystem().writeUnlock(RwLockMode.BM, "testReportCommand");
       }
       }
       waitForCorruptBlock(miniCluster, client, file);
       waitForCorruptBlock(miniCluster, client, file);
 
 

+ 7 - 7
hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

@@ -84,9 +84,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 
 
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
@@ -1095,26 +1095,26 @@ public class ITestProvidedImplementation {
 
 
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
-    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startDecommission");
+    namesystem.writeUnlock(RwLockMode.BM, "startDecommission");
   }
   }
 
 
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
-    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startMaintenance");
+    namesystem.writeUnlock(RwLockMode.BM, "startMaintenance");
   }
   }
 
 
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
-    namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "stopMaintenance");
+    namesystem.writeUnlock(RwLockMode.GLOBAL, "stopMaintenance");
   }
   }
 
 
   @Test
   @Test

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است