Browse Source

HDFS-17691. [FGL] Move FSNamesystemLockMode to org.apache.hadoop.hdfs.util package (#7232)

ZanderXu 6 months ago
parent
commit
b289f9abd3
87 changed files with 767 additions and 768 deletions
  1. 3 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  2. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
  3. 57 57
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  4. 16 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
  5. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
  6. 19 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
  7. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
  8. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  9. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  10. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
  11. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
  12. 17 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  13. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
  14. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
  15. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
  16. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
  17. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
  18. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
  19. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  20. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
  21. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
  22. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
  23. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
  24. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  25. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  26. 123 123
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  27. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
  28. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
  29. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
  30. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  31. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  32. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  33. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
  34. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
  35. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  36. 17 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNLockManager.java
  37. 62 61
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FineGrainedFSNamesystemLock.java
  38. 16 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/GlobalFSNamesystemLock.java
  39. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
  40. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java
  41. 18 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
  42. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLockMode.java
  43. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  44. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
  45. 11 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
  46. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
  47. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
  48. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
  49. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
  50. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
  51. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
  52. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
  53. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
  54. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  55. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
  56. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
  57. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
  58. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  59. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
  60. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
  61. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
  62. 9 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  63. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  64. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
  65. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  66. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  67. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
  68. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
  69. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
  70. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
  71. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
  72. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
  73. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
  74. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  75. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
  76. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
  77. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
  78. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
  79. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
  80. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
  81. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
  82. 17 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/fgl/TestFineGrainedFSNamesystemLock.java
  83. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
  84. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  85. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
  86. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
  87. 7 7
      hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -121,13 +121,13 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
@@ -1699,10 +1699,10 @@ public class TestRouterRpc {
       // mark a replica as corrupt
       LocatedBlock block = NameNodeAdapter
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "findAndMarkBlockAsCorrupt");
+      namesystem.writeUnlock(RwLockMode.BM, "findAndMarkBlockAsCorrupt");
       BlockManagerTestUtil.updateState(bm);
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
           new Path(testFile), block.getBlock(), 1);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java

@@ -27,7 +27,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -373,7 +373,7 @@ public class DelegationTokenSecretManager
       // closes the edit log files. Doing this inside the
       // fsn lock will prevent being interrupted when stopping
       // the secret manager.
-      namesystem.readLockInterruptibly(FSNamesystemLockMode.FS);
+      namesystem.readLockInterruptibly(RwLockMode.FS);
       try {
         // this monitor isn't necessary if stopped while holding write lock
         // but for safety, guard against a stop with read lock.
@@ -384,7 +384,7 @@ public class DelegationTokenSecretManager
           namesystem.logUpdateMasterKey(key);
         }
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.FS, "logUpdateMasterKey");
+        namesystem.readUnlock(RwLockMode.FS, "logUpdateMasterKey");
       }
     } catch (InterruptedException ie) {
       // AbstractDelegationTokenManager may crash if an exception is thrown.
@@ -402,7 +402,7 @@ public class DelegationTokenSecretManager
       // closes the edit log files. Doing this inside the
       // fsn lock will prevent being interrupted when stopping
       // the secret manager.
-      namesystem.readLockInterruptibly(FSNamesystemLockMode.FS);
+      namesystem.readLockInterruptibly(RwLockMode.FS);
       try {
         // this monitor isn't necessary if stopped while holding write lock
         // but for safety, guard against a stop with read lock.
@@ -413,7 +413,7 @@ public class DelegationTokenSecretManager
           namesystem.logExpireDelegationToken(dtId);
         }
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.FS, "logExpireToken");
+        namesystem.readUnlock(RwLockMode.FS, "logExpireToken");
       }
     } catch (InterruptedException ie) {
       // AbstractDelegationTokenManager may crash if an exception is thrown.

+ 57 - 57
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -97,7 +97,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
@@ -121,6 +120,7 @@ import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -861,7 +861,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   /** Dump meta data to out. */
   public void metaSave(PrintWriter out) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
     datanodeManager.fetchDatanodes(live, dead, false);
@@ -1584,7 +1584,7 @@ public class BlockManager implements BlockStatsMXBean {
       final boolean inSnapshot, FileEncryptionInfo feInfo,
       ErasureCodingPolicy ecPolicy)
       throws IOException {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     if (blocks == null) {
       return null;
     } else if (blocks.length == 0) {
@@ -1830,7 +1830,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   /** Remove the blocks associated to the given DatanodeStorageInfo. */
   void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     while(it.hasNext()) {
@@ -1901,7 +1901,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
       final DatanodeInfo dn, String storageID, String reason) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     final Block reportedBlock = blk.getLocalBlock();
     final BlockInfo storedBlock = getStoredBlock(reportedBlock);
     if (storedBlock == null) {
@@ -2107,9 +2107,9 @@ public class BlockManager implements BlockStatsMXBean {
    */
   int computeBlockReconstructionWork(int blocksToProcess) {
     List<List<BlockInfo>> blocksToReconstruct = null;
-    // TODO: Change it to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change it to readLock(RwLockMode.BM)
     //  since chooseLowRedundancyBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       boolean reset = false;
       if (replQueueResetToHeadThreshold > 0) {
@@ -2124,7 +2124,7 @@ public class BlockManager implements BlockStatsMXBean {
       blocksToReconstruct = neededReconstruction
           .chooseLowRedundancyBlocks(blocksToProcess, reset);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeBlockReconstructionWork");
+      namesystem.writeUnlock(RwLockMode.BM, "computeBlockReconstructionWork");
     }
     return computeReconstructionWorkForBlocks(blocksToReconstruct);
   }
@@ -2143,9 +2143,9 @@ public class BlockManager implements BlockStatsMXBean {
     List<BlockReconstructionWork> reconWork = new ArrayList<>();
 
     // Step 1: categorize at-risk blocks into replication and EC tasks
-    // TODO: Change to readLock(FSNamesystemLockMode.GLOBAL)
+    // TODO: Change to readLock(RwLockMode.GLOBAL)
     //  since neededReconstruction is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       synchronized (neededReconstruction) {
         for (int priority = 0; priority < blocksToReconstruct
@@ -2160,7 +2160,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "computeReconstructionWorkForBlocks");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "computeReconstructionWorkForBlocks");
     }
 
     // Step 2: choose target nodes for each reconstruction task
@@ -2185,9 +2185,9 @@ public class BlockManager implements BlockStatsMXBean {
     }
 
     // Step 3: add tasks to the DN
-    // TODO: Change to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change to readLock(RwLockMode.BM)
     //  since pendingReconstruction and neededReconstruction are thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       for (BlockReconstructionWork rw : reconWork) {
         final DatanodeStorageInfo[] targets = rw.getTargets();
@@ -2203,7 +2203,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeReconstructionWorkForBlocks");
+      namesystem.writeUnlock(RwLockMode.BM, "computeReconstructionWorkForBlocks");
     }
 
     if (blockLog.isDebugEnabled()) {
@@ -2694,9 +2694,9 @@ public class BlockManager implements BlockStatsMXBean {
   void processPendingReconstructions() {
     BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
     if (timedOutItems != null) {
-      // TODO: Change to readLock(FSNamesystemLockMode.BM)
+      // TODO: Change to readLock(RwLockMode.BM)
       //  since neededReconstruction is thread safe.
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
         for (int i = 0; i < timedOutItems.length; i++) {
           /*
@@ -2715,7 +2715,7 @@ public class BlockManager implements BlockStatsMXBean {
           }
         }
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "processPendingReconstructions");
+        namesystem.writeUnlock(RwLockMode.BM, "processPendingReconstructions");
       }
       /* If we know the target datanodes where the replication timedout,
        * we could invoke decBlocksScheduled() on it. Its ok for now.
@@ -2724,7 +2724,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     DatanodeDescriptor node = null;
     try {
       node = datanodeManager.getDatanode(nodeReg);
@@ -2795,7 +2795,7 @@ public class BlockManager implements BlockStatsMXBean {
    *               list of blocks that need to be removed from blocksMap
    */
   public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // In the case that we are a Standby tailing edits from the
     // active while in safe-mode, we need to track the total number
     // of blocks and safe blocks in the system.
@@ -2910,7 +2910,7 @@ public class BlockManager implements BlockStatsMXBean {
       final DatanodeStorage storage,
       final BlockListAsLongs newReport,
       BlockReportContext context) throws IOException {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     final long startTime = Time.monotonicNow(); //after acquiring write lock
     final long endTime;
     DatanodeDescriptor node;
@@ -2968,7 +2968,7 @@ public class BlockManager implements BlockStatsMXBean {
       storageInfo.receivedBlockReport();
     } finally {
       endTime = Time.monotonicNow();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processReport");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processReport");
     }
 
     if (blockLog.isDebugEnabled()) {
@@ -3012,7 +3012,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
       final BlockReportContext context) throws IOException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor node;
     try {
       node = datanodeManager.getDatanode(nodeID);
@@ -3030,7 +3030,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBRLeaseIfNeeded");
+      namesystem.writeUnlock(RwLockMode.BM, "removeBRLeaseIfNeeded");
     }
   }
 
@@ -3041,7 +3041,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (getPostponedMisreplicatedBlocksCount() == 0) {
       return;
     }
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     long startTime = Time.monotonicNow();
     long startSize = postponedMisreplicatedBlocks.size();
     try {
@@ -3070,7 +3070,7 @@ public class BlockManager implements BlockStatsMXBean {
       postponedMisreplicatedBlocks.addAll(rescannedMisreplicatedBlocks);
       rescannedMisreplicatedBlocks.clear();
       long endSize = postponedMisreplicatedBlocks.size();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL,
+      namesystem.writeUnlock(RwLockMode.GLOBAL,
           "rescanPostponedMisreplicatedBlocks");
       LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" +
           " msecs. {} blocks are left. {} blocks were removed.",
@@ -3114,7 +3114,7 @@ public class BlockManager implements BlockStatsMXBean {
       return;
     }
     // TODO: Change to readLock(FSNamesysteLockMode.BM) since invalidateBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     long now = Time.monotonicNow();
     int processed = 0;
     try {
@@ -3168,7 +3168,7 @@ public class BlockManager implements BlockStatsMXBean {
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processTimedOutExcessBlocks");
+      namesystem.writeUnlock(RwLockMode.BM, "processTimedOutExcessBlocks");
       LOG.info("processTimedOutExcessBlocks {} msecs.", (Time.monotonicNow() - now));
     }
   }
@@ -3224,7 +3224,7 @@ public class BlockManager implements BlockStatsMXBean {
       BlockInfo block,
       long oldGenerationStamp, long oldNumBytes, 
       DatanodeStorageInfo[] newStorages) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     BlockToMarkCorrupt b = null;
     if (block.getGenerationStamp() != oldGenerationStamp) {
       b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
@@ -3274,7 +3274,7 @@ public class BlockManager implements BlockStatsMXBean {
       final DatanodeStorageInfo storageInfo,
       final BlockListAsLongs report) throws IOException {
     if (report == null) return;
-    assert (namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    assert (namesystem.hasWriteLock(RwLockMode.GLOBAL));
     assert (storageInfo.getBlockReportCount() == 0);
 
     for (BlockReportReplica iblk : report) {
@@ -3742,7 +3742,7 @@ public class BlockManager implements BlockStatsMXBean {
   private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported,
       DatanodeStorageInfo storageInfo)
   throws IOException {
-    assert (storedBlock != null && namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    assert (storedBlock != null && namesystem.hasWriteLock(RwLockMode.GLOBAL));
     if (!namesystem.isInStartupSafeMode()
         || isPopulatingReplQueues()) {
       addStoredBlock(storedBlock, reported, storageInfo, null, false);
@@ -3777,7 +3777,7 @@ public class BlockManager implements BlockStatsMXBean {
                                DatanodeDescriptor delNodeHint,
                                boolean logEveryBlock)
   throws IOException {
-    assert block != null && namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert block != null && namesystem.hasWriteLock(RwLockMode.GLOBAL);
     BlockInfo storedBlock;
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     if (!block.isComplete()) {
@@ -3954,7 +3954,7 @@ public class BlockManager implements BlockStatsMXBean {
    * extra or low redundancy. Place it into the respective queue.
    */
   public void processMisReplicatedBlocks() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     stopReconstructionInitializer();
     neededReconstruction.clear();
     reconstructionQueuesInitializer = new Daemon() {
@@ -4013,7 +4013,7 @@ public class BlockManager implements BlockStatsMXBean {
 
     while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) {
       int processed = 0;
-      namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
       try {
         while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
           BlockInfo block = blocksItr.next();
@@ -4072,7 +4072,7 @@ public class BlockManager implements BlockStatsMXBean {
           break;
         }
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMisReplicatesAsync");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "processMisReplicatesAsync");
         LOG.info("Reconstruction queues initialisation progress: {}, total number of blocks " +
             "processed: {}/{}", reconstructionQueuesInitProgress, totalProcessed, totalBlocks);
         // Make sure it is out of the write lock for sufficiently long time.
@@ -4119,7 +4119,7 @@ public class BlockManager implements BlockStatsMXBean {
               && !Thread.currentThread().isInterrupted()
               && iter.hasNext()) {
         int limit = processed + numBlocksPerIteration;
-        namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
         try {
           while (iter.hasNext() && processed < limit) {
             BlockInfo blk = iter.next();
@@ -4129,7 +4129,7 @@ public class BlockManager implements BlockStatsMXBean {
                 blk, r);
           }
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMisReplicatedBlocks");
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "processMisReplicatedBlocks");
         }
       }
     } catch (InterruptedException ex) {
@@ -4225,7 +4225,7 @@ public class BlockManager implements BlockStatsMXBean {
   private boolean processExtraRedundancyBlockWithoutPostpone(final BlockInfo block,
       final short replication, final DatanodeDescriptor addedNode,
       DatanodeDescriptor delNodeHint) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     if (addedNode == delNodeHint) {
       delNodeHint = null;
     }
@@ -4270,9 +4270,9 @@ public class BlockManager implements BlockStatsMXBean {
       DatanodeDescriptor addedNode,
       DatanodeDescriptor delNodeHint) {
     // bc.getStoragePolicyID() needs FSReadLock.
-    // TODO: Change to hasReadLock(FSNamesystemLockMode.GLOBAL)
+    // TODO: Change to hasReadLock(RwLockMode.GLOBAL)
     //  since chooseExcessRedundancyContiguous is thread safe.
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     // first form a rack to datanodes map and
     BlockCollection bc = getBlockCollection(storedBlock);
     if (storedBlock.isStriped()) {
@@ -4447,7 +4447,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
     blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
-    assert (namesystem.hasWriteLock(FSNamesystemLockMode.BM));
+    assert (namesystem.hasWriteLock(RwLockMode.BM));
     {
       if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
         blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}",
@@ -4641,7 +4641,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void processIncrementalBlockReport(final DatanodeID nodeID,
       final StorageReceivedDeletedBlocks srdb) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
     if (node == null || !node.isRegistered()) {
       blockLog.warn("BLOCK* processIncrementalBlockReport"
@@ -4892,15 +4892,15 @@ public class BlockManager implements BlockStatsMXBean {
       // When called by tests like TestDefaultBlockPlacementPolicy.
       // testPlacementWithLocalRackNodesDecommissioned, it is not protected by
       // lock, only when called by DatanodeManager.refreshNodes have writeLock
-      if (namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL,
+      if (namesystem.hasWriteLock(RwLockMode.GLOBAL)) {
+        namesystem.writeUnlock(RwLockMode.GLOBAL,
             "processExtraRedundancyBlocksOnInService");
         try {
           Thread.sleep(1);
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
         }
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
     }
     LOG.info("Invalidated {} extra redundancy blocks on {} after "
@@ -4964,7 +4964,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   public void removeBlock(BlockInfo block) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // No need to ACK blocks that are being removed entirely
     // from the namespace, since the removal of the associated
     // file already removes them from the block map below.
@@ -5007,9 +5007,9 @@ public class BlockManager implements BlockStatsMXBean {
   /** updates a block in needed reconstruction queue. */
   private void updateNeededReconstructions(final BlockInfo block,
       final int curReplicasDelta, int expectedReplicasDelta) {
-    // TODO: Change to readLock(FSNamesystemLockMode.BM)
+    // TODO: Change to readLock(RwLockMode.BM)
     //  since pendingReconstruction and neededReconstruction are thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       if (!isPopulatingReplQueues() || !block.isComplete()) {
         return;
@@ -5028,7 +5028,7 @@ public class BlockManager implements BlockStatsMXBean {
             repl.outOfServiceReplicas(), oldExpectedReplicas);
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "updateNeededReconstructions");
+      namesystem.writeUnlock(RwLockMode.BM, "updateNeededReconstructions");
     }
   }
 
@@ -5061,8 +5061,8 @@ public class BlockManager implements BlockStatsMXBean {
   private int invalidateWorkForOneNode(DatanodeInfo dn) {
     final List<Block> toInvalidate;
 
-    // TODO: Change to readLock(FSNamesystemLockMode.BM) since invalidateBlocks is thread safe.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    // TODO: Change to readLock(RwLockMode.BM) since invalidateBlocks is thread safe.
+    namesystem.writeLock(RwLockMode.BM);
     try {
       // blocks should not be replicated or removed if safe mode is on
       if (namesystem.isInSafeMode()) {
@@ -5086,7 +5086,7 @@ public class BlockManager implements BlockStatsMXBean {
         return 0;
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "invalidateWorkForOneNode");
+      namesystem.writeUnlock(RwLockMode.BM, "invalidateWorkForOneNode");
     }
     if (blockLog.isDebugEnabled()) {
       blockLog.debug("BLOCK* {}: ask {} to delete {}",
@@ -5314,7 +5314,7 @@ public class BlockManager implements BlockStatsMXBean {
 
     private void remove(long time) {
       if (checkToDeleteIterator()) {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
           while (toDeleteIterator.hasNext()) {
             removeBlock(toDeleteIterator.next());
@@ -5325,7 +5325,7 @@ public class BlockManager implements BlockStatsMXBean {
             }
           }
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "markedDeleteBlockScrubberThread");
+          namesystem.writeUnlock(RwLockMode.BM, "markedDeleteBlockScrubberThread");
         }
       }
     }
@@ -5440,12 +5440,12 @@ public class BlockManager implements BlockStatsMXBean {
 
     // Update counters
     // TODO: Make corruptReplicas thread safe to remove this lock.
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       this.updateState();
       this.scheduledReplicationBlocksCount = workFound;
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "computeDatanodeWork");
+      namesystem.writeUnlock(RwLockMode.BM, "computeDatanodeWork");
     }
     workFound += this.computeInvalidateWork(nodesToProcess);
     return workFound;
@@ -5672,7 +5672,7 @@ public class BlockManager implements BlockStatsMXBean {
           // batch as many operations in the write lock until the queue
           // runs dry, or the max lock hold is reached.
           int processed = 0;
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeLock(RwLockMode.GLOBAL);
           metrics.setBlockOpsQueued(queue.size() + 1);
           try {
             long start = Time.monotonicNow();
@@ -5685,7 +5685,7 @@ public class BlockManager implements BlockStatsMXBean {
               action = queue.poll();
             } while (action != null);
           } finally {
-            namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processQueue");
+            namesystem.writeUnlock(RwLockMode.GLOBAL, "processQueue");
             metrics.addBlockOpsBatched(processed - 1);
           }
         } catch (InterruptedException e) {

+ 16 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeSt
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Daemon;
 
@@ -170,7 +170,7 @@ class BlockManagerSafeMode {
    * @param total initial total blocks
    */
   void activate(long total) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     assert status == BMSafeModeStatus.OFF;
 
     startTime = monotonicNow();
@@ -204,7 +204,7 @@ class BlockManagerSafeMode {
    * If safe mode is not currently on, this is a no-op.
    */
   void checkSafeMode() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (namesystem.inTransitionToActive()) {
       return;
     }
@@ -246,7 +246,7 @@ class BlockManagerSafeMode {
    * @param deltaTotal the change in number of total blocks expected
    */
   void adjustBlockTotals(int deltaSafe, int deltaTotal) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (!isSafeModeTrackingBlocks()) {
       return;
     }
@@ -280,7 +280,7 @@ class BlockManagerSafeMode {
    * set after the image has been loaded.
    */
   boolean isSafeModeTrackingBlocks() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     return haEnabled && status != BMSafeModeStatus.OFF;
   }
 
@@ -288,7 +288,7 @@ class BlockManagerSafeMode {
    * Set total number of blocks.
    */
   void setBlockTotal(long total) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     synchronized (this) {
       this.blockTotal = total;
       this.blockThreshold = (long) (total * threshold);
@@ -374,7 +374,7 @@ class BlockManagerSafeMode {
    * @return true if it leaves safe mode successfully else false
    */
   boolean leaveSafeMode(boolean force) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM) : "Leaving safe mode needs write lock!";
+    assert namesystem.hasWriteLock(RwLockMode.BM) : "Leaving safe mode needs write lock!";
 
     final long bytesInFuture = getBytesInFuture();
     if (bytesInFuture > 0) {
@@ -445,7 +445,7 @@ class BlockManagerSafeMode {
    */
   synchronized void incrementSafeBlockCount(int storageNum,
       BlockInfo storedBlock) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
       return;
     }
@@ -477,7 +477,7 @@ class BlockManagerSafeMode {
    * If safe mode is not currently on, this is a no-op.
    */
   synchronized void decrementSafeBlockCount(BlockInfo b) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
       return;
     }
@@ -500,7 +500,7 @@ class BlockManagerSafeMode {
    * @param brr block report replica which belongs to no file in BlockManager
    */
   void checkBlocksWithFutureGS(BlockReportReplica brr) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     if (status == BMSafeModeStatus.OFF) {
       return;
     }
@@ -534,7 +534,7 @@ class BlockManagerSafeMode {
   }
 
   void close() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL)
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL)
         : "Closing bmSafeMode needs write lock!";
     try {
       smmthread.interrupt();
@@ -569,7 +569,7 @@ class BlockManagerSafeMode {
 
   /** Check if we are ready to initialize replication queues. */
   private void initializeReplQueuesIfNecessary() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // Whether it has reached the threshold for initializing replication queues.
     boolean canInitializeReplQueues = blockManager.shouldPopulateReplQueues() &&
         blockSafe >= blockReplQueueThreshold;
@@ -584,7 +584,7 @@ class BlockManagerSafeMode {
    * @return true if both block and datanode threshold are met else false.
    */
   private boolean areThresholdsMet() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     // Calculating the number of live datanodes is time-consuming
     // in large clusters. Skip it when datanodeThreshold is zero.
     // We need to evaluate getNumLiveDataNodes only when
@@ -629,7 +629,7 @@ class BlockManagerSafeMode {
    * Print status every 20 seconds.
    */
   private void reportStatus(String msg, boolean rightNow) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     long curTime = monotonicNow();
     if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) {
       return;
@@ -663,7 +663,7 @@ class BlockManagerSafeMode {
     public void run() {
       while (namesystem.isRunning()) {
         try {
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeLock(RwLockMode.GLOBAL);
           if (status == BMSafeModeStatus.OFF) { // Not in safe mode.
             break;
           }
@@ -673,7 +673,7 @@ class BlockManagerSafeMode {
             break;
           }
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "leaveSafeMode");
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "leaveSafeMode");
         }
 
         try {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

@@ -48,9 +48,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -224,7 +224,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
    * after are not atomic.
    */
   public void waitForRescanIfNeeded() {
-    Preconditions.checkArgument(!namesystem.hasWriteLock(FSNamesystemLockMode.FS),
+    Preconditions.checkArgument(!namesystem.hasWriteLock(RwLockMode.FS),
         "Must not hold the FSN write lock when waiting for a rescan.");
     Preconditions.checkArgument(lock.isHeldByCurrentThread(),
         "Must hold the CRM lock when waiting for a rescan.");
@@ -269,7 +269,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
    */
   @Override
   public void close() throws IOException {
-    Preconditions.checkArgument(namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL));
+    Preconditions.checkArgument(namesystem.hasWriteLock(RwLockMode.GLOBAL));
     lock.lock();
     try {
       if (shutdown) return;
@@ -292,7 +292,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     scannedBlocks = 0;
     lastScanTimeMs = Time.monotonicNow();
     try {
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       try {
         lock.lock();
         if (shutdown) {
@@ -309,7 +309,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
       rescanCachedBlockMap();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "cacheReplicationMonitorRescan");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "cacheReplicationMonitorRescan");
     }
   }
 
@@ -326,11 +326,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     long now = Time.monotonicNow();
     if (now - last > cacheManager.getMaxLockTimeMs()) {
       try {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "cacheReplicationMonitorRescan");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "cacheReplicationMonitorRescan");
         Thread.sleep(cacheManager.getSleepTimeMs());
       } catch (InterruptedException e) {
       } finally {
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
     }
   }

+ 19 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -25,6 +24,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -171,7 +171,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
     numBlocksChecked = 0;
     // Check decommission or maintenance progress.
     try {
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
         /**
          * Other threads can modify the pendingNode list and the cancelled
@@ -209,7 +209,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
 
         processPendingNodes();
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "DatanodeAdminMonitorV2Thread");
+        namesystem.writeUnlock(RwLockMode.BM, "DatanodeAdminMonitorV2Thread");
       }
       // After processing the above, various parts of the check() method will
       // take and drop the read / write lock as needed. Aside from the
@@ -327,7 +327,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
    */
   private void processMaintenanceNodes() {
     // Check for any maintenance state nodes which need to be expired
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       for (DatanodeDescriptor dn : outOfServiceNodeBlocks.keySet()) {
         if (dn.isMaintenance() && dn.maintenanceExpired()) {
@@ -339,12 +339,12 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           // which added the node to the cancelled list. Therefore expired
           // maintenance nodes do not need to be added to the toRemove list.
           dnAdmin.stopMaintenance(dn);
-          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMaintenanceNodes");
-          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+          namesystem.writeUnlock(RwLockMode.GLOBAL, "processMaintenanceNodes");
+          namesystem.writeLock(RwLockMode.GLOBAL);
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processMaintenanceNodes");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processMaintenanceNodes");
     }
   }
 
@@ -361,7 +361,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
       // taking the write lock at all.
       return;
     }
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       for (DatanodeDescriptor dn : toRemove) {
         final boolean isHealthy =
@@ -403,7 +403,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processCompletedNodes");
+      namesystem.writeUnlock(RwLockMode.BM, "processCompletedNodes");
     }
   }
 
@@ -487,7 +487,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
       return;
     }
 
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       long repQueueSize = blockManager.getLowRedundancyBlocksCount();
 
@@ -525,8 +525,8 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           // replication
           if (blocksProcessed >= blocksPerLock) {
             blocksProcessed = 0;
-            namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "moveBlocksToPending");
-            namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+            namesystem.writeUnlock(RwLockMode.GLOBAL, "moveBlocksToPending");
+            namesystem.writeLock(RwLockMode.GLOBAL);
           }
           blocksProcessed++;
           if (nextBlockAddedToPending(blockIt, dn)) {
@@ -547,7 +547,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
         }
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "moveBlocksToPending");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "moveBlocksToPending");
     }
     LOG.debug("{} blocks are now pending replication", pendingCount);
   }
@@ -627,16 +627,16 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
     }
 
     DatanodeStorageInfo[] storage;
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
       storage = dn.getStorageInfos();
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "scanDatanodeStorage");
+      namesystem.readUnlock(RwLockMode.BM, "scanDatanodeStorage");
     }
 
     for (DatanodeStorageInfo s : storage) {
       // isBlockReplicatedOk involves FS.
-      namesystem.readLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.readLock(RwLockMode.GLOBAL);
       try {
         // As the lock is dropped and re-taken between each storage, we need
         // to check the storage is still present before processing it, as it
@@ -662,7 +662,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
           numBlocksChecked++;
         }
       } finally {
-        namesystem.readUnlock(FSNamesystemLockMode.GLOBAL, "scanDatanodeStorage");
+        namesystem.readUnlock(RwLockMode.GLOBAL, "scanDatanodeStorage");
       }
     }
   }
@@ -685,7 +685,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
    * namenode write lock while it runs.
    */
   private void processPendingReplication() {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       for (Iterator<Map.Entry<DatanodeDescriptor, List<BlockInfo>>>
            entIt = pendingRep.entrySet().iterator(); entIt.hasNext();) {
@@ -717,7 +717,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
             suspectBlocks.getOutOfServiceBlockCount());
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processPendingReplication");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "processPendingReplication");
     }
   }
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -27,6 +26,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
@@ -185,7 +185,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
     // Check decommission or maintenance progress.
     // dnAdmin.stopMaintenance(dn) needs FSReadLock
     // since processExtraRedundancyBlock involves storage policy and isSufficient involves bc.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       processCancelledNodes();
       processPendingNodes();
@@ -194,7 +194,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
       LOG.warn("DatanodeAdminMonitor caught exception when processing node.",
           e);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "DatanodeAdminMonitorThread");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "DatanodeAdminMonitorThread");
     }
     if (numBlocksChecked + numNodesChecked > 0) {
       LOG.info("Checked {} blocks and {} nodes this tick. {} nodes are now " +
@@ -429,7 +429,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
         // lock.
         // Yielding is required in case of block number is greater than the
         // configured per-iteration-limit.
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "processBlocksInternal");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "processBlocksInternal");
         try {
           LOG.debug("Yielded lock during decommission/maintenance check");
           Thread.sleep(0, 500);
@@ -438,7 +438,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
         }
         // reset
         numBlocksCheckedPerLock = 0;
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
       }
       numBlocksChecked++;
       numBlocksCheckedPerLock++;

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -22,7 +22,6 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -51,6 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@@ -864,7 +864,7 @@ public class DatanodeManager {
    */
   private void removeDatanode(DatanodeDescriptor nodeInfo,
       boolean removeBlocksFromBlocksMap) {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasWriteLock(RwLockMode.BM);
     heartbeatManager.removeDatanode(nodeInfo);
     if (removeBlocksFromBlocksMap) {
       blockManager.removeBlocksAssociatedTo(nodeInfo);
@@ -883,7 +883,7 @@ public class DatanodeManager {
    */
   public void removeDatanode(final DatanodeID node)
       throws UnregisteredNodeException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       final DatanodeDescriptor descriptor = getDatanode(node);
       if (descriptor != null) {
@@ -893,7 +893,7 @@ public class DatanodeManager {
                                      + node + " does not exist");
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDatanode");
+      namesystem.writeUnlock(RwLockMode.BM, "removeDatanode");
     }
   }
 
@@ -1344,12 +1344,12 @@ public class DatanodeManager {
   public void refreshNodes(final Configuration conf) throws IOException {
     refreshHostsReader(conf);
     // processExtraRedundancyBlocksOnInService involves FS in stopMaintenance and stopDecommission.
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       refreshDatanodes();
       countSoftwareVersions();
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "refreshNodes");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "refreshNodes");
     }
   }
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -28,9 +28,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Time;
@@ -515,20 +515,20 @@ class HeartbeatManager implements DatanodeStatistics {
 
       for (DatanodeDescriptor dead : deadDatanodes) {
         // acquire the fsnamesystem lock, and then remove the dead node.
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
           dm.removeDeadDatanode(dead, !dead.isMaintenance());
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDeadDatanode");
+          namesystem.writeUnlock(RwLockMode.BM, "removeDeadDatanode");
         }
       }
       for (DatanodeStorageInfo failedStorage : failedStorages) {
         // acquire the fsnamesystem lock, and remove blocks on the storage.
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         try {
           blockManager.removeBlocksAssociatedTo(failedStorage);
         } finally {
-          namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBlocksAssociatedTo");
+          namesystem.writeUnlock(RwLockMode.BM, "removeBlocksAssociatedTo");
         }
       }
     }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java

@@ -45,10 +45,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
 import org.apache.hadoop.hdfs.server.common.BlockAlias;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.util.RwLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import org.slf4j.Logger;
@@ -145,7 +145,7 @@ public class ProvidedStorageMap {
 
   private void processProvidedStorageReport()
       throws IOException {
-    assert lock.hasWriteLock(FSNamesystemLockMode.GLOBAL) : "Not holding write lock";
+    assert lock.hasWriteLock(RwLockMode.GLOBAL) : "Not holding write lock";
     if (providedStorageInfo.getBlockReportCount() == 0
         || providedDescriptor.activeProvidedDatanodes() == 0) {
       LOG.info("Calling process first blk report from storage: "
@@ -174,7 +174,7 @@ public class ProvidedStorageMap {
 
   public void removeDatanode(DatanodeDescriptor dnToRemove) {
     if (providedEnabled) {
-      assert lock.hasWriteLock(FSNamesystemLockMode.BM) : "Not holding write lock";
+      assert lock.hasWriteLock(RwLockMode.BM) : "Not holding write lock";
       providedDescriptor.remove(dnToRemove);
       // if all datanodes fail, set the block report count to 0
       if (providedDescriptor.activeProvidedDatanodes() == 0) {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 
@@ -219,11 +219,11 @@ public class BackupImage extends FSImage {
       }
       lastAppliedTxId = logLoader.getLastAppliedTxId();
 
-      getNamesystem().writeLock(FSNamesystemLockMode.FS);
+      getNamesystem().writeLock(RwLockMode.FS);
       try {
         getNamesystem().dir.updateCountForQuota();
       } finally {
-        getNamesystem().writeUnlock(FSNamesystemLockMode.FS, "applyEdits");
+        getNamesystem().writeUnlock(RwLockMode.FS, "applyEdits");
       }
     } finally {
       backupInputStream.clear();

+ 17 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBl
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
@@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Co
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.GSet;
@@ -318,7 +318,7 @@ public class CacheManager {
   }
 
   public void clearDirectiveStats() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     for (CacheDirective directive : directivesById.values()) {
       directive.resetStatistics();
     }
@@ -328,7 +328,7 @@ public class CacheManager {
    * @return Unmodifiable view of the collection of CachePools.
    */
   public Collection<CachePool> getCachePools() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     return Collections.unmodifiableCollection(cachePools.values());
   }
 
@@ -336,18 +336,18 @@ public class CacheManager {
    * @return Unmodifiable view of the collection of CacheDirectives.
    */
   public Collection<CacheDirective> getCacheDirectives() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     return Collections.unmodifiableCollection(directivesById.values());
   }
   
   @VisibleForTesting
   public GSet<CachedBlock, CachedBlock> getCachedBlocks() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
+    assert namesystem.hasReadLock(RwLockMode.BM);
     return cachedBlocks;
   }
 
   private long getNextDirectiveId() throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     if (nextDirectiveId >= Long.MAX_VALUE - 1) {
       throw new IOException("No more available IDs.");
     }
@@ -575,7 +575,7 @@ public class CacheManager {
   public CacheDirectiveInfo addDirective(
       CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     CacheDirective directive;
     try {
       CachePool pool = getCachePool(validatePoolName(info));
@@ -653,7 +653,7 @@ public class CacheManager {
 
   public void modifyDirective(CacheDirectiveInfo info,
       FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     String idString =
         (info.getId() == null) ?
             "(null)" : info.getId().toString();
@@ -704,7 +704,7 @@ public class CacheManager {
 
   private void removeInternal(CacheDirective directive)
       throws InvalidRequestException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     // Remove the corresponding entry in directivesByPath.
     String path = directive.getPath();
     if (!directivesByPath.remove(path, directive)) {
@@ -725,7 +725,7 @@ public class CacheManager {
 
   public void removeDirective(long id, FSPermissionChecker pc)
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     try {
       CacheDirective directive = getById(id);
       checkWritePermission(pc, directive.getPool());
@@ -741,7 +741,7 @@ public class CacheManager {
         listCacheDirectives(long prevId,
             CacheDirectiveInfo filter,
             FSPermissionChecker pc) throws IOException {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     String filterPath = null;
     if (filter.getPath() != null) {
@@ -816,7 +816,7 @@ public class CacheManager {
    */
   public CachePoolInfo addCachePool(CachePoolInfo info)
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     CachePool pool;
     try {
       CachePoolInfo.validate(info);
@@ -846,7 +846,7 @@ public class CacheManager {
    */
   public void modifyCachePool(CachePoolInfo info)
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     StringBuilder bld = new StringBuilder();
     try {
       CachePoolInfo.validate(info);
@@ -916,7 +916,7 @@ public class CacheManager {
    */
   public void removeCachePool(String poolName)
       throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasWriteLock(RwLockMode.FS);
     try {
       CachePoolInfo.validateName(poolName);
       CachePool pool = cachePools.remove(poolName);
@@ -942,7 +942,7 @@ public class CacheManager {
 
   public BatchedListEntries<CachePoolEntry>
       listCachePools(FSPermissionChecker pc, String prevKey) {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert namesystem.hasReadLock(RwLockMode.FS);
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     ArrayList<CachePoolEntry> results = 
         new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
@@ -1009,7 +1009,7 @@ public class CacheManager {
           datanodeID, DFS_NAMENODE_CACHING_ENABLED_KEY, blockIds.size());
       return;
     }
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     final long startTime = Time.monotonicNow();
     final long endTime;
     try {
@@ -1023,7 +1023,7 @@ public class CacheManager {
       processCacheReportImpl(datanode, blockIds);
     } finally {
       endTime = Time.monotonicNow();
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "processCacheReport");
+      namesystem.writeUnlock(RwLockMode.BM, "processCacheReport");
     }
 
     // Log the block report processing stats from Namenode perspective

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -32,12 +32,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Lists;
@@ -245,14 +245,14 @@ class Checkpointer extends Daemon {
 
       if(needReloadImage) {
         LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
-        backupNode.namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        backupNode.namesystem.writeLock(RwLockMode.GLOBAL);
         try {
           File file = bnStorage.findImageFile(NameNodeFile.IMAGE,
               sig.mostRecentCheckpointTxId);
           bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
         } finally {
           backupNode.namesystem.writeUnlock(
-              FSNamesystemLockMode.GLOBAL, "doCheckpointByBackupNode");
+              RwLockMode.GLOBAL, "doCheckpointByBackupNode");
         }
       }
       rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
@@ -260,7 +260,7 @@ class Checkpointer extends Daemon {
     
     long txid = bnImage.getLastAppliedTxId();
     
-    backupNode.namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    backupNode.namesystem.writeLock(RwLockMode.GLOBAL);
     try {
       backupNode.namesystem.setImageLoaded();
       if(backupNode.namesystem.getBlocksTotal() > 0) {
@@ -274,7 +274,7 @@ class Checkpointer extends Daemon {
         bnImage.updateStorageVersion();
       }
     } finally {
-      backupNode.namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "doCheckpoint");
+      backupNode.namesystem.writeUnlock(RwLockMode.GLOBAL, "doCheckpoint");
     }
 
     if(cpCmd.needToReturnImage()) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.XAttr;
@@ -119,8 +119,8 @@ public class ContentSummaryComputationContext {
 
     boolean hadDirReadLock = dir.hasReadLock();
     boolean hadDirWriteLock = dir.hasWriteLock();
-    boolean hadFsnReadLock = fsn.hasReadLock(FSNamesystemLockMode.GLOBAL);
-    boolean hadFsnWriteLock = fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    boolean hadFsnReadLock = fsn.hasReadLock(RwLockMode.GLOBAL);
+    boolean hadFsnWriteLock = fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     // sanity check.
     if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock ||
@@ -131,14 +131,14 @@ public class ContentSummaryComputationContext {
 
     // unlock
     dir.readUnlock();
-    fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "contentSummary");
+    fsn.readUnlock(RwLockMode.GLOBAL, "contentSummary");
 
     try {
       Thread.sleep(sleepMilliSec, sleepNanoSec);
     } catch (InterruptedException ie) {
     } finally {
       // reacquire
-      fsn.readLock(FSNamesystemLockMode.GLOBAL);
+      fsn.readLock(RwLockMode.GLOBAL);
       dir.readLock();
     }
     yieldCount++;

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java

@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Lists;
 
@@ -189,12 +189,12 @@ public class EncryptionZoneManager {
       final int count) throws IOException {
     INodesInPath iip;
     final FSPermissionChecker pc = dir.getPermissionChecker();
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     try {
       iip = dir.resolvePath(pc, zone, DirOp.READ);
     } finally {
       dir.getFSNamesystem().readUnlock(
-          FSNamesystemLockMode.FS, "pauseForTestingAfterNthCheckpoint");
+          RwLockMode.FS, "pauseForTestingAfterNthCheckpoint");
     }
     reencryptionHandler
         .pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count);
@@ -215,7 +215,7 @@ public class EncryptionZoneManager {
       throws IOException {
     final FSPermissionChecker pc = dir.getPermissionChecker();
     final INode inode;
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     dir.readLock();
     try {
       final INodesInPath iip = dir.resolvePath(pc, zone, DirOp.READ);
@@ -226,7 +226,7 @@ public class EncryptionZoneManager {
       return getReencryptionStatus().getZoneStatus(inode.getId());
     } finally {
       dir.readUnlock();
-      dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "getZoneStatus");
+      dir.getFSNamesystem().readUnlock(RwLockMode.FS, "getZoneStatus");
     }
   }
 
@@ -283,11 +283,11 @@ public class EncryptionZoneManager {
     if (getProvider() == null || reencryptionHandler == null) {
       return;
     }
-    dir.getFSNamesystem().writeLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().writeLock(RwLockMode.FS);
     try {
       reencryptionHandler.stopThreads();
     } finally {
-      dir.getFSNamesystem().writeUnlock(FSNamesystemLockMode.FS, "stopReencryptThread");
+      dir.getFSNamesystem().writeUnlock(RwLockMode.FS, "stopReencryptThread");
     }
     if (reencryptHandlerExecutor != null) {
       reencryptHandlerExecutor.shutdownNow();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 
 import org.apache.hadoop.util.Preconditions;
@@ -83,7 +83,7 @@ final class FSDirAppendOp {
       final String srcArg, final FSPermissionChecker pc, final String holder,
       final String clientMachine, final boolean newBlock,
       final boolean logRetryCache) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     final LocatedBlock lb;
     final FSDirectory fsd = fsn.getFSDirectory();
@@ -181,7 +181,7 @@ final class FSDirAppendOp {
       final String clientMachine, final boolean newBlock,
       final boolean writeToEditLog, final boolean logRetryCache)
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     final INodeFile file = iip.getLastINode().asFile();
     final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.IOException;
@@ -172,7 +172,7 @@ class FSDirDeleteOp {
       FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       throws IOException {
     // Delete INode and modify BlockInfo
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
     }

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java

@@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
@@ -84,8 +84,8 @@ final class FSDirEncryptionZoneOp {
   private static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
       final FSDirectory fsd, final String ezKeyName) throws IOException {
     // must not be holding lock during this operation
-    assert !fsd.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
-    assert !fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert !fsd.getFSNamesystem().hasReadLock(RwLockMode.FS);
+    assert !fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (ezKeyName == null) {
       return null;
     }
@@ -383,7 +383,7 @@ final class FSDirEncryptionZoneOp {
    */
   static void saveFileXAttrsForBatch(FSDirectory fsd,
       List<FileEdekInfo> batch) {
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (batch != null && !batch.isEmpty()) {
       for (FileEdekInfo entry : batch) {
         final INode inode = fsd.getInode(entry.getInodeId());
@@ -657,13 +657,13 @@ final class FSDirEncryptionZoneOp {
     Preconditions.checkNotNull(ezKeyName);
 
     // Generate EDEK while not holding the fsn lock.
-    fsn.writeUnlock(FSNamesystemLockMode.FS, "getEncryptionKeyInfo");
+    fsn.writeUnlock(RwLockMode.FS, "getEncryptionKeyInfo");
     try {
       EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
       return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName,
           generateEncryptedDataEncryptionKey(fsd, ezKeyName));
     } finally {
-      fsn.writeLock(FSNamesystemLockMode.FS);
+      fsn.writeLock(RwLockMode.FS);
       EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
     }
   }
@@ -728,13 +728,13 @@ final class FSDirEncryptionZoneOp {
       final FSPermissionChecker pc, final String zone) throws IOException {
     assert dir.getProvider() != null;
     final INodesInPath iip;
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     try {
       iip = dir.resolvePath(pc, zone, DirOp.READ);
       dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), zone);
       return dir.ezManager.getKeyName(iip);
     } finally {
-      dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "getKeyNameForZone");
+      dir.getFSNamesystem().readUnlock(RwLockMode.FS, "getKeyNameForZone");
     }
   }
 }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.NoECPolicySetException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.erasurecode.CodecRegistry;
@@ -72,7 +72,7 @@ final class FSDirErasureCodingOp {
    */
   static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
         .getEnabledPolicyByName(ecPolicyName);
     if (ecPolicy == null) {
@@ -104,7 +104,7 @@ final class FSDirErasureCodingOp {
    */
   static ErasureCodingPolicy getErasureCodingPolicyByName(
       final FSNamesystem fsn, final String ecPolicyName) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
         .getErasureCodingPolicyByName(ecPolicyName);
     if (ecPolicy == null) {
@@ -133,7 +133,7 @@ final class FSDirErasureCodingOp {
       final String srcArg, final String ecPolicyName,
       final FSPermissionChecker pc, final boolean logRetryCache)
       throws IOException, AccessControlException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
 
     String src = srcArg;
     FSDirectory fsd = fsn.getFSDirectory();
@@ -210,7 +210,7 @@ final class FSDirErasureCodingOp {
   static FileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
       final String srcArg, final FSPermissionChecker pc,
       final boolean logRetryCache) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
 
     String src = srcArg;
     FSDirectory fsd = fsn.getFSDirectory();
@@ -354,7 +354,7 @@ final class FSDirErasureCodingOp {
   static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
       final String src, FSPermissionChecker pc)
       throws IOException, AccessControlException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
 
     if (FSDirectory.isExactReservedName(src)) {
       return null;
@@ -417,7 +417,7 @@ final class FSDirErasureCodingOp {
    */
   static ErasureCodingPolicy unprotectedGetErasureCodingPolicy(
       final FSNamesystem fsn, final INodesInPath iip) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
 
     return getErasureCodingPolicyForPath(fsn.getFSDirectory(), iip);
   }
@@ -430,7 +430,7 @@ final class FSDirErasureCodingOp {
    */
   static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
       final FSNamesystem fsn) throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     return fsn.getErasureCodingPolicyManager().getPolicies();
   }
 
@@ -442,7 +442,7 @@ final class FSDirErasureCodingOp {
    */
   static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
       throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsn.hasReadLock(RwLockMode.FS);
     return CodecRegistry.getInstance().getCodec2CoderCompactMap();
   }
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java

@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 
 /**
@@ -65,7 +65,7 @@ final class FSDirSatisfyStoragePolicyOp {
   static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src, boolean logRetryCache) throws IOException {
 
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 
 import org.apache.hadoop.fs.ContentSummary;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 
 import java.io.FileNotFoundException;
@@ -447,7 +447,7 @@ class FSDirStatAndListingOp {
       }
       // ComputeFileSize and needLocation need BM lock.
       if (needLocation) {
-        fsd.getFSNamesystem().readLock(FSNamesystemLockMode.BM);
+        fsd.getFSNamesystem().readLock(RwLockMode.BM);
         try {
           final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
           final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
@@ -460,7 +460,7 @@ class FSDirStatAndListingOp {
             loc = new LocatedBlocks();
           }
         } finally {
-          fsd.getFSNamesystem().readUnlock(FSNamesystemLockMode.BM, "createFileStatus");
+          fsd.getFSNamesystem().readUnlock(RwLockMode.BM, "createFileStatus");
         }
       }
     } else if (node.isDirectory()) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java

@@ -38,9 +38,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 
 /**
  * Helper class to perform truncate operation.
@@ -72,7 +72,7 @@ final class FSDirTruncateOp {
       final String clientMachine, final long mtime,
       final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
       throws IOException, UnresolvedLinkException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     FSDirectory fsd = fsn.getFSDirectory();
     final String src;
@@ -176,7 +176,7 @@ final class FSDirTruncateOp {
       final long newLength, final long mtime, final Block truncateBlock)
       throws UnresolvedLinkException, QuotaExceededException,
       SnapshotAccessControlException, IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     FSDirectory fsd = fsn.getFSDirectory();
     INodeFile file = iip.getLastINode().asFile();
@@ -220,7 +220,7 @@ final class FSDirTruncateOp {
   static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip,
       String leaseHolder, String clientMachine, long lastBlockDelta,
       Block newBlock) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     INodeFile file = iip.getLastINode().asFile();
     assert !file.isStriped();
@@ -304,7 +304,7 @@ final class FSDirTruncateOp {
   private static boolean unprotectedTruncate(FSNamesystem fsn,
       INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks,
       long mtime, QuotaCounts delta) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
 
     INodeFile file = iip.getLastINode().asFile();
     int latestSnapshot = iip.getLatestSnapshotId();

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -105,7 +105,7 @@ class FSDirWriteFileOp {
    */
   static void persistBlocks(
       FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
-    assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     Preconditions.checkArgument(file.isUnderConstruction());
     fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
     if(NameNode.stateChangeLog.isDebugEnabled()) {
@@ -364,7 +364,7 @@ class FSDirWriteFileOp {
       boolean shouldReplicate, String ecPolicyName, String storagePolicy,
       boolean logRetryEntry)
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.FS);
+    assert fsn.hasWriteLock(RwLockMode.FS);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
     boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
 
@@ -372,7 +372,7 @@ class FSDirWriteFileOp {
     FSDirectory fsd = fsn.getFSDirectory();
 
     if (iip.getLastINode() != null) {
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
         if (overwrite) {
           List<INode> toRemoveINodes = new ChunkedArrayList<>();
@@ -392,7 +392,7 @@ class FSDirWriteFileOp {
               clientMachine + " already exists");
         }
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "create");
+        fsn.writeUnlock(RwLockMode.BM, "create");
       }
     }
     fsn.checkFsObjectLimit();
@@ -602,7 +602,7 @@ class FSDirWriteFileOp {
       FSNamesystem fsn, INodesInPath iip, long fileId, String clientName,
       ExtendedBlock previous, LocatedBlock[] onRetryBlock)
       throws IOException {
-    assert fsn.hasReadLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasReadLock(RwLockMode.GLOBAL);
     String src = iip.getPath();
     checkBlock(fsn, previous);
     onRetryBlock[0] = null;
@@ -700,7 +700,7 @@ class FSDirWriteFileOp {
       FSNamesystem fsn, INodesInPath iip,
       String holder, Block last, long fileId)
       throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     final String src = iip.getPath();
     final INodeFile pendingFile;
     INode inode = null;
@@ -784,7 +784,7 @@ class FSDirWriteFileOp {
   static void saveAllocatedBlock(FSNamesystem fsn, String src,
       INodesInPath inodesInPath, Block newBlock, DatanodeStorageInfo[] targets,
       BlockType blockType) throws IOException {
-    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsn.hasWriteLock(RwLockMode.GLOBAL);
     BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock, targets,
         blockType);
     logAllocatedBlock(src, b);

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.util.StringUtils;
 
@@ -64,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
@@ -263,31 +263,31 @@ public class FSDirectory implements Closeable {
    * remain as placeholders only
    */
   void readLock() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasReadLock(RwLockMode.FS) :
         "Should hold read lock of namesystem FSLock";
   }
 
   void readUnlock() {
-    assert namesystem.hasReadLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasReadLock(RwLockMode.FS) :
         "Should hold read lock of namesystem FSLock";
   }
 
   void writeLock() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasWriteLock(RwLockMode.FS) :
         "Should hold write lock of namesystem FSLock";
   }
 
   void writeUnlock() {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.FS) :
+    assert namesystem.hasWriteLock(RwLockMode.FS) :
         "Should hold write lock of namesystem FSLock";
   }
 
   boolean hasWriteLock() {
-    return namesystem.hasWriteLock(FSNamesystemLockMode.FS);
+    return namesystem.hasWriteLock(RwLockMode.FS);
   }
 
   boolean hasReadLock() {
-    return namesystem.hasReadLock(FSNamesystemLockMode.FS);
+    return namesystem.hasReadLock(RwLockMode.FS);
   }
 
   public int getListLimit() {
@@ -1106,7 +1106,7 @@ public class FSDirectory implements Closeable {
    */
   public void updateSpaceForCompleteBlock(BlockInfo completeBlk,
       INodesInPath inodes) throws IOException {
-    assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert namesystem.hasWriteLock(RwLockMode.GLOBAL);
     INodesInPath iip = inodes != null ? inodes :
         INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
     INodeFile fileINode = iip.getLastINode().asFile();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -27,7 +27,6 @@ import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.List;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -113,6 +112,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.util.Holder;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.log.LogThrottlingHelper;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Timer;
@@ -172,7 +172,7 @@ public class FSEditLogLoader {
     StartupProgress prog = NameNode.getStartupProgress();
     Step step = createStartupProgressStep(edits);
     prog.beginStep(Phase.LOADING_EDITS, step);
-    fsNamesys.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsNamesys.writeLock(RwLockMode.GLOBAL);
     try {
       long startTime = timer.monotonicNow();
       LogAction preLogAction = LOAD_EDITS_LOG_HELPER.record("pre", startTime);
@@ -197,7 +197,7 @@ public class FSEditLogLoader {
       return numEdits;
     } finally {
       edits.close();
-      fsNamesys.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSEdits");
+      fsNamesys.writeUnlock(RwLockMode.GLOBAL, "loadFSEdits");
       prog.endStep(Phase.LOADING_EDITS, step);
     }
   }
@@ -219,7 +219,7 @@ public class FSEditLogLoader {
       LOG.trace("Acquiring write lock to replay edit log");
     }
 
-    fsNamesys.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsNamesys.writeLock(RwLockMode.GLOBAL);
     FSDirectory fsDir = fsNamesys.dir;
     fsDir.writeLock();
 
@@ -343,7 +343,7 @@ public class FSEditLogLoader {
         in.close();
       }
       fsDir.writeUnlock();
-      fsNamesys.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadEditRecords");
+      fsNamesys.writeUnlock(RwLockMode.GLOBAL, "loadEditRecords");
 
       if (LOG.isTraceEnabled()) {
         LOG.trace("replaying edit log finished");

File diff suppressed because it is too large
+ 123 - 123
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java


+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java

@@ -25,7 +25,6 @@ import java.util.Optional;
 import java.util.Stack;
 import java.util.function.LongFunction;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.util.Time;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AuthorizationContext;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -50,7 +50,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  * The state of this class need not be synchronized as it has data structures that
  * are read-only.
  * 
- * Some of the helper methods are guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}.
+ * Some of the helper methods are guarded by {@link FSNamesystem#readLock(RwLockMode)}.
  */
 public class FSPermissionChecker implements AccessControlEnforcer {
   static final Logger LOG = LoggerFactory.getLogger(UserGroupInformation.class);
@@ -342,7 +342,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
    * @param ignoreEmptyDir Ignore permission checking for empty directory?
    * @throws AccessControlException
    * 
-   * Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}
+   * Guarded by {@link FSNamesystem#readLock(RwLockMode)}
    * Caller of this method must hold that lock.
    */
   void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
@@ -555,7 +555,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     return inodeAttrs;
   }
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
       throws AccessControlException {
     if (getUser().equals(inodes[i].getUserName())) {
@@ -566,7 +566,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
         " is not the owner of inode=" + getPath(components, 0, i));
   }
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}.
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}.
    * @throws AccessControlException
    * @throws ParentNotDirectoryException
    * @throws UnresolvedPathException
@@ -580,7 +580,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     }
   }
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkSubAccess(byte[][] components, int pathIdx,
       INode inode, int snapshotId, FsAction access, boolean ignoreEmptyDir)
       throws AccessControlException {
@@ -654,7 +654,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     }
   }
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void check(INodeAttributes[] inodes, byte[][] components, int i,
       FsAction access) throws AccessControlException {
     INodeAttributes inode = (i >= 0) ? inodes[i] : null;
@@ -768,7 +768,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     return !foundMatch && mode.getOtherAction().implies(access);
   }
 
-  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
+  /** Guarded by {@link FSNamesystem#readLock(RwLockMode)}. */
   private void checkStickyBit(INodeAttributes[] inodes, byte[][] components,
       int index) throws AccessControlException {
     INodeAttributes parent = inodes[index];

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java

@@ -28,9 +28,9 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -128,7 +128,7 @@ public abstract class FSTreeTraverser {
       List<byte[]> startAfters, final TraverseInfo traverseInfo)
       throws IOException, InterruptedException {
     assert dir.hasReadLock();
-    assert dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+    assert dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
     long lockStartTime = timer.monotonicNow();
     Preconditions.checkNotNull(curr, "Current inode can't be null");
     checkINodeReady(startId);
@@ -262,13 +262,13 @@ public abstract class FSTreeTraverser {
   }
 
   protected void readLock() {
-    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+    dir.getFSNamesystem().readLock(RwLockMode.FS);
     dir.readLock();
   }
 
   protected void readUnlock() {
     dir.readUnlock();
-    dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "FSTreeTraverser");
+    dir.getFSNamesystem().readUnlock(RwLockMode.FS, "FSTreeTraverser");
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -274,14 +274,14 @@ public class FsImageValidation {
 
       final FSImageFormat.LoaderDelegator loader
           = FSImageFormat.newLoader(conf, namesystem);
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       namesystem.getFSDirectory().writeLock();
       try {
         loader.load(fsImageFile, false);
         fsImage.setLastAppliedTxId(loader);
       } finally {
         namesystem.getFSDirectory().writeUnlock();
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadImage");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "loadImage");
       }
     }
     t.cancel();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
@@ -136,7 +136,7 @@ public class LeaseManager {
    * calling this method.
    */
   synchronized long getNumUnderConstructionBlocks() {
-    assert this.fsnamesystem.hasReadLock(FSNamesystemLockMode.GLOBAL) :
+    assert this.fsnamesystem.hasReadLock(RwLockMode.GLOBAL) :
         "The FSNamesystem read lock wasn't acquired before counting under construction blocks";
     long numUCBlocks = 0;
     for (Long id : getINodeIdWithLeases()) {
@@ -208,7 +208,7 @@ public class LeaseManager {
    */
   public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
       ancestorDir) throws IOException {
-    assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsnamesystem.hasReadLock(RwLockMode.FS);
     final long startTimeMs = Time.monotonicNow();
     Set<INodesInPath> iipSet = new HashSet<>();
     final INode[] inodes = getINodesWithLease();
@@ -285,7 +285,7 @@ public class LeaseManager {
    */
   public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
       final long prevId, final String path) throws IOException {
-    assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS);
+    assert fsnamesystem.hasReadLock(RwLockMode.FS);
     SortedMap<Long, Lease> remainingLeases;
     synchronized (this) {
       remainingLeases = leasesById.tailMap(prevId, false);
@@ -543,13 +543,13 @@ public class LeaseManager {
             continue;
           }
 
-          fsnamesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+          fsnamesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
           try {
             if (!fsnamesystem.isInSafeMode()) {
               needSync = checkLeases(candidates);
             }
           } finally {
-            fsnamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "leaseManager");
+            fsnamesystem.writeUnlock(RwLockMode.GLOBAL, "leaseManager");
             // lease reassignments should to be sync'ed.
             if (needSync) {
               fsnamesystem.getEditLog().logSync();
@@ -574,7 +574,7 @@ public class LeaseManager {
 
   private synchronized boolean checkLeases(Collection<Lease> leasesToCheck) {
     boolean needSync = false;
-    assert fsnamesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    assert fsnamesystem.hasWriteLock(RwLockMode.GLOBAL);
 
     long start = monotonicNow();
     for (Lease leaseToCheck : leasesToCheck) {

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.util.Preconditions;
@@ -73,6 +72,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ExternalCall;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -2239,14 +2239,14 @@ public class NameNode extends ReconfigurableBase implements
     
     @Override
     public void writeLock() {
-      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      namesystem.writeLock(RwLockMode.GLOBAL);
       namesystem.lockRetryCache();
     }
     
     @Override
     public void writeUnlock() {
       namesystem.unlockRetryCache();
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "HAState");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "HAState");
     }
     
     /** Check if an operation of given category is allowed */
@@ -2397,7 +2397,7 @@ public class NameNode extends ReconfigurableBase implements
       final String property) throws ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
     int newSetting;
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)) {
         bm.setMaxReplicationStreams(
@@ -2435,7 +2435,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfReplicationParameters");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfReplicationParameters");
     }
   }
 
@@ -2455,7 +2455,7 @@ public class NameNode extends ReconfigurableBase implements
   private String reconfHeartbeatInterval(final DatanodeManager datanodeManager,
       final String property, final String newVal)
       throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       if (newVal == null) {
         // set to default
@@ -2472,7 +2472,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfHeartbeatInterval");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfHeartbeatInterval");
       LOG.info("RECONFIGURE* changed heartbeatInterval to "
           + datanodeManager.getHeartbeatInterval());
     }
@@ -2481,7 +2481,7 @@ public class NameNode extends ReconfigurableBase implements
   private String reconfHeartbeatRecheckInterval(
       final DatanodeManager datanodeManager, final String property,
       final String newVal) throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       if (newVal == null) {
         // set to default
@@ -2496,7 +2496,7 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfHeartbeatRecheckInterval");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfHeartbeatRecheckInterval");
       LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
           + datanodeManager.getHeartbeatRecheckInterval());
     }
@@ -2621,7 +2621,7 @@ public class NameNode extends ReconfigurableBase implements
   String reconfigureSlowNodesParameters(final DatanodeManager datanodeManager,
       final String property, final String newVal) throws ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     String result;
     try {
       switch (property) {
@@ -2698,13 +2698,13 @@ public class NameNode extends ReconfigurableBase implements
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfigureSlowNodesParameters");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfigureSlowNodesParameters");
     }
   }
 
   private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeManager,
       final String property, final String newVal) throws ReconfigurationException {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       if (newVal == null) {
         datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
@@ -2718,7 +2718,7 @@ public class NameNode extends ReconfigurableBase implements
     } catch (NumberFormatException e) {
       throw new ReconfigurationException(property, newVal, getConf().get(property), e);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "reconfigureBlockInvalidateLimit");
+      namesystem.writeUnlock(RwLockMode.BM, "reconfigureBlockInvalidateLimit");
     }
   }
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -38,7 +38,6 @@ import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -80,6 +79,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NodeBase;
@@ -291,7 +291,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     }
 
     // TODO: Just hold the BM read lock.
-    namenode.getNamesystem().readLock(FSNamesystemLockMode.GLOBAL);
+    namenode.getNamesystem().readLock(RwLockMode.GLOBAL);
     try {
       //get blockInfo
       Block block = new Block(Block.getBlockId(blockId));
@@ -355,7 +355,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       out.print("\n\n" + errMsg);
       LOG.warn("Error in looking up block", e);
     } finally {
-      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.GLOBAL, "fsck");
+      namenode.getNamesystem().readUnlock(RwLockMode.GLOBAL, "fsck");
     }
   }
 
@@ -587,7 +587,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     final String operationName = "fsckGetBlockLocations";
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker pc = fsn.getPermissionChecker();
-    fsn.readLock(FSNamesystemLockMode.GLOBAL);
+    fsn.readLock(RwLockMode.GLOBAL);
     try {
       blocks = FSDirStatAndListingOp.getBlockLocations(
           fsn.getFSDirectory(), pc,
@@ -596,7 +596,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     } catch (FileNotFoundException fnfe) {
       blocks = null;
     } finally {
-      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, operationName);
+      fsn.readUnlock(RwLockMode.GLOBAL, operationName);
     }
     return blocks;
   }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -35,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ReencryptionTask;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ZoneSubmissionTracker;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
@@ -339,7 +339,7 @@ public class ReencryptionHandler implements Runnable {
       }
 
       final Long zoneId;
-      dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
+      dir.getFSNamesystem().readLock(RwLockMode.FS);
       try {
         zoneId = getReencryptionStatus().getNextUnprocessedZone();
         if (zoneId == null) {
@@ -351,7 +351,7 @@ public class ReencryptionHandler implements Runnable {
         getReencryptionStatus().markZoneStarted(zoneId);
         resetSubmissionTracker(zoneId);
       } finally {
-        dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, "reEncryptThread");
+        dir.getFSNamesystem().readUnlock(RwLockMode.FS, "reEncryptThread");
       }
 
       try {
@@ -443,7 +443,7 @@ public class ReencryptionHandler implements Runnable {
 
   List<XAttr> completeReencryption(final INode zoneNode) throws IOException {
     assert dir.hasWriteLock();
-    assert dir.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert dir.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     final Long zoneId = zoneNode.getId();
     ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId);
     assert zs != null;
@@ -614,7 +614,7 @@ public class ReencryptionHandler implements Runnable {
     protected void checkPauseForTesting()
         throws InterruptedException {
       assert !dir.hasReadLock();
-      assert !dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+      assert !dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
       while (shouldPauseForTesting) {
         LOG.info("Sleeping in the re-encrypt handler for unit test.");
         synchronized (reencryptionHandler) {
@@ -748,7 +748,7 @@ public class ReencryptionHandler implements Runnable {
     @Override
     protected void throttle() throws InterruptedException {
       assert !dir.hasReadLock();
-      assert !dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
+      assert !dir.getFSNamesystem().hasReadLock(RwLockMode.FS);
       final int numCores = Runtime.getRuntime().availableProcessors();
       if (taskQueue.size() >= numCores) {
         LOG.debug("Re-encryption handler throttling because queue size {} is"

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.server.namenode.ReencryptionHandler.ReencryptionBatch;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StopWatch;
@@ -435,7 +435,7 @@ public final class ReencryptionUpdater implements Runnable {
 
     boolean shouldRetry;
     do {
-      dir.getFSNamesystem().writeLock(FSNamesystemLockMode.FS);
+      dir.getFSNamesystem().writeLock(RwLockMode.FS);
       try {
         throttleTimerLocked.start();
         processTask(task);
@@ -453,7 +453,7 @@ public final class ReencryptionUpdater implements Runnable {
         task.processed = true;
         shouldRetry = false;
       } finally {
-        dir.getFSNamesystem().writeUnlock(FSNamesystemLockMode.FS, "reencryptUpdater");
+        dir.getFSNamesystem().writeUnlock(RwLockMode.FS, "reencryptUpdater");
         throttleTimerLocked.stop();
       }
       // logSync regardless, to prevent edit log buffer overflow triggering
@@ -501,7 +501,7 @@ public final class ReencryptionUpdater implements Runnable {
 
   private synchronized void checkPauseForTesting() throws InterruptedException {
     assert !dir.hasWriteLock();
-    assert !dir.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
+    assert !dir.getFSNamesystem().hasWriteLock(RwLockMode.FS);
     if (pauseAfterNthCheckpoint != 0) {
       ZoneSubmissionTracker tracker =
           handler.unprotectedGetTracker(pauseZoneId);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -37,7 +37,6 @@ import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -63,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.MD5Hash;
@@ -1095,11 +1095,11 @@ public class SecondaryNameNode implements Runnable,
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
       }
-      dstNamesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+      dstNamesystem.writeLock(RwLockMode.GLOBAL);
       try {
         dstImage.reloadFromImageFile(file, dstNamesystem);
       } finally {
-        dstNamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "reloadFromImageFile");
+        dstNamesystem.writeUnlock(RwLockMode.GLOBAL, "reloadFromImageFile");
       }
       dstNamesystem.imageLoadComplete();
     }

+ 17 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNLockManager.java

@@ -22,26 +22,28 @@ import org.apache.hadoop.classification.VisibleForTesting;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Supplier;
 
+import org.apache.hadoop.hdfs.util.RwLockMode;
+
 public interface FSNLockManager {
 
   /**
    * Acquire read lock for an operation according to the lock mode.
    * @param lockMode locking mode
    */
-  void readLock(FSNamesystemLockMode lockMode);
+  void readLock(RwLockMode lockMode);
 
   /**
    * Acquire read lock according to the lock mode, unless interrupted while waiting.
    * @param lockMode locking mode
    */
-  void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
   /**
    * Release read lock for the operation according to the lock mode.
    * @param lockMode locking mode
    * @param opName operation name
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName);
+  void readUnlock(RwLockMode lockMode, String opName);
 
   /**
    * Release read lock for the operation according to the lock mode.
@@ -49,21 +51,21 @@ public interface FSNLockManager {
    * @param opName operation name
    * @param lockReportInfoSupplier supplier used to report some information for this lock.
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier);
 
   /**
    * Acquire write lock for an operation according to the lock mode.
    * @param lockMode locking mode
    */
-  void writeLock(FSNamesystemLockMode lockMode);
+  void writeLock(RwLockMode lockMode);
 
   /**
    * Release write lock for the operation according to the lock mode.
    * @param lockMode locking mode
    * @param opName operation name
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName);
+  void writeUnlock(RwLockMode lockMode, String opName);
 
   /**
    * Release write lock for the operation according to the lock mode.
@@ -72,7 +74,7 @@ public interface FSNLockManager {
    * @param suppressWriteLockReport When false, event of write lock being held
    * for long time will be logged in logs and metrics.
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport);
 
   /**
@@ -81,24 +83,24 @@ public interface FSNLockManager {
    * @param opName operation name
    * @param lockReportInfoSupplier supplier used to report information for this lock.
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier);
 
-  void writeLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void writeLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
   /**
    * Check if the current thread holds write lock according to the lock mode.
    * @param lockMode locking mode
    * @return true if the current thread is holding the write-lock, else false.
    */
-  boolean hasWriteLock(FSNamesystemLockMode lockMode);
+  boolean hasWriteLock(RwLockMode lockMode);
 
   /**
    * Check if the current thread holds read lock according to the lock mode.
    * @param lockMode locking mode
    * @return true if the current thread is holding the read-lock, else false.
    */
-  boolean hasReadLock(FSNamesystemLockMode lockMode);
+  boolean hasReadLock(RwLockMode lockMode);
 
   /**
    * Queries the number of reentrant read holds on this lock by the
@@ -109,7 +111,7 @@ public interface FSNLockManager {
    * @return the number of holds on the read lock by the current thread,
    *         or zero if the read lock is not held by the current thread
    */
-  int getReadHoldCount(FSNamesystemLockMode lockMode);
+  int getReadHoldCount(RwLockMode lockMode);
 
   /**
    * Returns the QueueLength of waiting threads.
@@ -118,7 +120,7 @@ public interface FSNLockManager {
    * @param lockMode locking mode
    * @return int - Number of threads waiting on this lock
    */
-  int getQueueLength(FSNamesystemLockMode lockMode);
+  int getQueueLength(RwLockMode lockMode);
 
   /**
    * Returns the number of time the read lock
@@ -128,7 +130,7 @@ public interface FSNLockManager {
    * @return long - Number of time the read lock
    * has been held longer than the threshold
    */
-  long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode);
+  long getNumOfReadLockLongHold(RwLockMode lockMode);
 
   /**
    * Returns the number of time the write-lock
@@ -138,7 +140,7 @@ public interface FSNLockManager {
    * @return long - Number of time the write-lock
    * has been held longer than the threshold.
    */
-  long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode);
+  long getNumOfWriteLockLongHold(RwLockMode lockMode);
 
   /**
    * Check if the metrics is enabled.

+ 62 - 61
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FineGrainedFSNamesystemLock.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.fgl;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -40,19 +41,19 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
   }
 
   @Override
-  public void readLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.readLock();
       this.bmLock.readLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readLock();
     }
   }
 
-  public void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException  {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException  {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.readLockInterruptibly();
       try {
         this.bmLock.readLockInterruptibly();
@@ -62,90 +63,90 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         this.fsLock.readUnlock("BMReadLockInterruptiblyFailed");
         throw e;
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readLockInterruptibly();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readLockInterruptibly();
     }
   }
 
   @Override
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void readUnlock(RwLockMode lockMode, String opName) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.readUnlock(opName);
       this.fsLock.readUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readUnlock(opName);
     }
   }
 
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.readUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.readUnlock(opName, lockReportInfoSupplier);
     }
   }
 
   @Override
-  public void writeLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void writeLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.writeLock();
       this.bmLock.writeLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeLock();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeLock();
     }
   }
 
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public void writeUnlock(RwLockMode lockMode, String opName) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName);
       this.fsLock.writeUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName);
     }
   }
 
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName, suppressWriteLockReport);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName, suppressWriteLockReport);
     }
   }
 
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeUnlock(opName, lockReportInfoSupplier);
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeUnlock(opName, lockReportInfoSupplier);
     }
   }
 
   @Override
-  public void writeLockInterruptibly(FSNamesystemLockMode lockMode)
+  public void writeLockInterruptibly(RwLockMode lockMode)
       throws InterruptedException {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       this.fsLock.writeLockInterruptibly();
       try {
         this.bmLock.writeLockInterruptibly();
@@ -155,16 +156,16 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         this.fsLock.writeUnlock("BMWriteLockInterruptiblyFailed");
         throw e;
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       this.fsLock.writeLockInterruptibly();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       this.bmLock.writeLockInterruptibly();
     }
   }
 
   @Override
-  public boolean hasWriteLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public boolean hasWriteLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       if (this.fsLock.isWriteLockedByCurrentThread()) {
         // The bm writeLock should be held by the current thread.
         assert this.bmLock.isWriteLockedByCurrentThread();
@@ -174,18 +175,18 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         assert !this.bmLock.isWriteLockedByCurrentThread();
         return false;
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.isWriteLockedByCurrentThread();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.isWriteLockedByCurrentThread();
     }
     return false;
   }
 
   @Override
-  public boolean hasReadLock(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
-      if (hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
+  public boolean hasReadLock(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
+      if (hasWriteLock(RwLockMode.GLOBAL)) {
         return true;
       } else if (this.fsLock.getReadHoldCount() > 0) {
         // The bm readLock should be held by the current thread.
@@ -196,9 +197,9 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
         assert this.bmLock.getReadHoldCount() <= 0;
         return false;
       }
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getReadHoldCount() > 0 || this.fsLock.isWriteLockedByCurrentThread();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getReadHoldCount() > 0 || this.bmLock.isWriteLockedByCurrentThread();
     }
     return false;
@@ -209,48 +210,48 @@ public class FineGrainedFSNamesystemLock implements FSNLockManager {
    * This method is only used for ComputeDirectoryContentSummary.
    * For the GLOBAL mode, just return the FSLock's ReadHoldCount.
    */
-  public int getReadHoldCount(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public int getReadHoldCount(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return this.fsLock.getReadHoldCount();
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getReadHoldCount();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getReadHoldCount();
     }
     return -1;
   }
 
   @Override
-  public int getQueueLength(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public int getQueueLength(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getQueueLength();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getQueueLength();
     }
     return -1;
   }
 
   @Override
-  public long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public long getNumOfReadLockLongHold(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getNumOfReadLockLongHold();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getNumOfReadLockLongHold();
     }
     return -1;
   }
 
   @Override
-  public long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode) {
-    if (lockMode.equals(FSNamesystemLockMode.GLOBAL)) {
+  public long getNumOfWriteLockLongHold(RwLockMode lockMode) {
+    if (lockMode.equals(RwLockMode.GLOBAL)) {
       return -1;
-    } else if (lockMode.equals(FSNamesystemLockMode.FS)) {
+    } else if (lockMode.equals(RwLockMode.FS)) {
       return this.fsLock.getNumOfWriteLockLongHold();
-    } else if (lockMode.equals(FSNamesystemLockMode.BM)) {
+    } else if (lockMode.equals(RwLockMode.BM)) {
       return this.bmLock.getNumOfWriteLockLongHold();
     }
     return -1;

+ 16 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/GlobalFSNamesystemLock.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.fgl;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystemLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -33,78 +34,78 @@ public class GlobalFSNamesystemLock implements FSNLockManager {
   }
 
   @Override
-  public void readLock(FSNamesystemLockMode lockMode) {
+  public void readLock(RwLockMode lockMode) {
     this.lock.readLock();
   }
 
-  public void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException  {
+  public void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException  {
     this.lock.readLockInterruptibly();
   }
 
   @Override
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName) {
+  public void readUnlock(RwLockMode lockMode, String opName) {
     this.lock.readUnlock(opName);
   }
 
-  public void readUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void readUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
     this.lock.readUnlock(opName, lockReportInfoSupplier);
   }
 
   @Override
-  public void writeLock(FSNamesystemLockMode lockMode) {
+  public void writeLock(RwLockMode lockMode) {
     this.lock.writeLock();
   }
 
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName) {
+  public void writeUnlock(RwLockMode lockMode, String opName) {
     this.lock.writeUnlock(opName);
   }
 
   @Override
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       boolean suppressWriteLockReport) {
     this.lock.writeUnlock(opName, suppressWriteLockReport);
   }
 
-  public void writeUnlock(FSNamesystemLockMode lockMode, String opName,
+  public void writeUnlock(RwLockMode lockMode, String opName,
       Supplier<String> lockReportInfoSupplier) {
     this.lock.writeUnlock(opName, lockReportInfoSupplier);
   }
 
   @Override
-  public void writeLockInterruptibly(FSNamesystemLockMode lockMode)
+  public void writeLockInterruptibly(RwLockMode lockMode)
       throws InterruptedException {
     this.lock.writeLockInterruptibly();
   }
 
   @Override
-  public boolean hasWriteLock(FSNamesystemLockMode lockMode) {
+  public boolean hasWriteLock(RwLockMode lockMode) {
     return this.lock.isWriteLockedByCurrentThread();
   }
 
   @Override
-  public boolean hasReadLock(FSNamesystemLockMode lockMode) {
+  public boolean hasReadLock(RwLockMode lockMode) {
     return this.lock.getReadHoldCount() > 0 || hasWriteLock(lockMode);
   }
 
   @Override
-  public int getReadHoldCount(FSNamesystemLockMode lockMode) {
+  public int getReadHoldCount(RwLockMode lockMode) {
     return this.lock.getReadHoldCount();
   }
 
   @Override
-  public int getQueueLength(FSNamesystemLockMode lockMode) {
+  public int getQueueLength(RwLockMode lockMode) {
     return this.lock.getQueueLength();
   }
 
   @Override
-  public long getNumOfReadLockLongHold(FSNamesystemLockMode lockMode) {
+  public long getNumOfReadLockLongHold(RwLockMode lockMode) {
     return this.lock.getNumOfReadLockLongHold();
   }
 
   @Override
-  public long getNumOfWriteLockLongHold(FSNamesystemLockMode lockMode) {
+  public long getNumOfWriteLockLongHold(RwLockMode lockMode) {
     return this.lock.getNumOfWriteLockLongHold();
   }
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java

@@ -34,7 +34,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.Timer;
@@ -54,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.SecurityUtil;
 
@@ -356,7 +356,7 @@ public class EditLogTailer {
     // transitionToActive RPC takes the write lock before calling
     // tailer.stop() -- so if we're not interruptible, it will
     // deadlock.
-    namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
     try {
       long currentLastTxnId = image.getLastAppliedTxId();
       if (lastTxnId != currentLastTxnId) {
@@ -387,7 +387,7 @@ public class EditLogTailer {
       lastLoadedTxnId = image.getLastAppliedTxId();
       return editsLoaded;
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "doTailEdits");
+      namesystem.writeUnlock(RwLockMode.GLOBAL, "doTailEdits");
     }
   }
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -73,14 +73,14 @@ public class SnapshotDeletionGc {
 
   private void gcDeletedSnapshot(String name) {
     final Snapshot.Root deleted;
-    namesystem.readLock(FSNamesystemLockMode.FS);
+    namesystem.readLock(RwLockMode.FS);
     try {
       deleted = namesystem.getSnapshotManager().chooseDeletedSnapshot();
     } catch (Throwable e) {
       LOG.error("Failed to chooseDeletedSnapshot", e);
       throw e;
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.FS, "gcDeletedSnapshot");
+      namesystem.readUnlock(RwLockMode.FS, "gcDeletedSnapshot");
     }
     if (deleted == null) {
       LOG.trace("{}: no snapshots are marked as deleted.", name);

+ 18 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java

@@ -17,29 +17,27 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
-
 /** Read-write lock interface for FSNamesystem. */
 public interface RwLock {
   /** Acquire read lock. */
   default void readLock() {
-    readLock(FSNamesystemLockMode.GLOBAL);
+    readLock(RwLockMode.GLOBAL);
   }
 
   /** Acquire read lock. */
-  void readLock(FSNamesystemLockMode lockMode);
+  void readLock(RwLockMode lockMode);
 
   /** Acquire read lock, unless interrupted while waiting.  */
   default void readLockInterruptibly() throws InterruptedException {
-    readLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    readLockInterruptibly(RwLockMode.GLOBAL);
   }
 
   /** Acquire read lock, unless interrupted while waiting.  */
-  void readLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void readLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
   /** Release read lock. */
   default void readUnlock() {
-    readUnlock(FSNamesystemLockMode.GLOBAL, "OTHER");
+    readUnlock(RwLockMode.GLOBAL, "OTHER");
   }
 
   /**
@@ -47,42 +45,42 @@ public interface RwLock {
    * @param opName Option name.
    */
   default void readUnlock(String opName) {
-    readUnlock(FSNamesystemLockMode.GLOBAL, opName);
+    readUnlock(RwLockMode.GLOBAL, opName);
   }
 
   /**
    * Release read lock with operation name.
    * @param opName Option name.
    */
-  void readUnlock(FSNamesystemLockMode lockMode, String opName);
+  void readUnlock(RwLockMode lockMode, String opName);
 
   /** Check if the current thread holds read lock. */
   default boolean hasReadLock() {
-    return hasReadLock(FSNamesystemLockMode.GLOBAL);
+    return hasReadLock(RwLockMode.GLOBAL);
   }
 
   /** Check if the current thread holds read lock. */
-  boolean hasReadLock(FSNamesystemLockMode lockMode);
+  boolean hasReadLock(RwLockMode lockMode);
 
   /** Acquire write lock. */
   default void writeLock() {
-    writeLock(FSNamesystemLockMode.GLOBAL);
+    writeLock(RwLockMode.GLOBAL);
   }
 
   /** Acquire write lock. */
-  void writeLock(FSNamesystemLockMode lockMode);
+  void writeLock(RwLockMode lockMode);
   
   /** Acquire write lock, unless interrupted while waiting.  */
   default void writeLockInterruptibly() throws InterruptedException {
-    writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
+    writeLockInterruptibly(RwLockMode.GLOBAL);
   }
 
   /** Acquire write lock, unless interrupted while waiting.  */
-  void writeLockInterruptibly(FSNamesystemLockMode lockMode) throws InterruptedException;
+  void writeLockInterruptibly(RwLockMode lockMode) throws InterruptedException;
 
   /** Release write lock. */
   default void writeUnlock() {
-    writeUnlock(FSNamesystemLockMode.GLOBAL, "OTHER");
+    writeUnlock(RwLockMode.GLOBAL, "OTHER");
   }
 
   /**
@@ -90,20 +88,20 @@ public interface RwLock {
    * @param opName Option name.
    */
   default void writeUnlock(String opName) {
-    writeUnlock(FSNamesystemLockMode.GLOBAL, opName);
+    writeUnlock(RwLockMode.GLOBAL, opName);
   }
 
   /**
    * Release write lock with operation name.
    * @param opName Option name.
    */
-  void writeUnlock(FSNamesystemLockMode lockMode, String opName);
+  void writeUnlock(RwLockMode lockMode, String opName);
 
   /** Check if the current thread holds write lock. */
   default boolean hasWriteLock() {
-    return hasWriteLock(FSNamesystemLockMode.GLOBAL);
+    return hasWriteLock(RwLockMode.GLOBAL);
   }
 
   /** Check if the current thread holds write lock. */
-  boolean hasWriteLock(FSNamesystemLockMode lockMode);
+  boolean hasWriteLock(RwLockMode lockMode);
 }

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNamesystemLockMode.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLockMode.java

@@ -15,10 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode.fgl;
+package org.apache.hadoop.hdfs.util;
 
-public enum FSNamesystemLockMode {
+/**
+ * This lock mode is used for FGL.
+ */
+public enum RwLockMode {
   GLOBAL,
   FS,
   BM
-}
+}

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.After;
 import org.junit.Test;
 
@@ -176,7 +176,7 @@ public class TestBlocksScheduledCounter {
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .get(0);
       DatanodeInfo[] locs = block.getLocations();
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
             "TEST");
@@ -186,7 +186,7 @@ public class TestBlocksScheduledCounter {
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "findAndMarkBlockAsCorrupt");
       }
 
@@ -240,13 +240,13 @@ public class TestBlocksScheduledCounter {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       }
 
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "testBlocksScheduledCounterOnTruncate");
       }
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -51,9 +51,9 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.Test;
@@ -160,13 +160,13 @@ public class TestFileCorruption {
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
       FSNamesystem ns = cluster.getNamesystem();
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       try {
         cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             "STORAGE_ID");
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testArrayOutOfBoundsException");
+        ns.writeUnlock(RwLockMode.BM, "testArrayOutOfBoundsException");
       }
       
       // open the file
@@ -211,16 +211,16 @@ public class TestFileCorruption {
       FSNamesystem ns = cluster.getNamesystem();
       //fail the storage on that node which has the block
       try {
-        ns.writeLock(FSNamesystemLockMode.BM);
+        ns.writeLock(RwLockMode.BM);
         updateAllStorages(bm);
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
+        ns.writeUnlock(RwLockMode.BM, "testCorruptionWithDiskFailure");
       }
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       try {
         markAllBlocksAsCorrupt(bm, blk);
       } finally {
-        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
+        ns.writeUnlock(RwLockMode.BM, "testCorruptionWithDiskFailure");
       }
 
       // open the file

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java

@@ -31,9 +31,9 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.Assert;
 
@@ -51,23 +51,23 @@ public class BlockManagerTestUtil {
   /** @return the datanode descriptor for the given the given storageID. */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       final String storageID) {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
+      ns.readUnlock(RwLockMode.BM, "getDatanode");
     }
   }
 
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
       final String storageID, final int startBlock) {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
       DatanodeDescriptor dn =
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return dn.getBlockIterator(startBlock);
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getBlockIterator");
+      ns.readUnlock(RwLockMode.BM, "getBlockIterator");
     }
   }
 
@@ -89,7 +89,7 @@ public class BlockManagerTestUtil {
    */
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
     final BlockManager bm = namesystem.getBlockManager();
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       return new int[]{getNumberOfRacks(bm, b),
@@ -97,7 +97,7 @@ public class BlockManagerTestUtil {
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           getNumberOfDomains(bm, b)};
     } finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "getReplicaInfo");
+      namesystem.readUnlock(RwLockMode.BM, "getReplicaInfo");
     }
   }
 
@@ -248,7 +248,7 @@ public class BlockManagerTestUtil {
    */
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
     FSNamesystem namesystem = nn.getNamesystem();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
@@ -266,7 +266,7 @@ public class BlockManagerTestUtil {
         hbm.heartbeatCheck();
       }
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "noticeDeadDatanode");
+      namesystem.writeUnlock(RwLockMode.BM, "noticeDeadDatanode");
     }
   }
   
@@ -303,12 +303,12 @@ public class BlockManagerTestUtil {
    */
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
       FSNamesystem namesystem, BlockManager bm) {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       return bm.getUnderReplicatedNotMissingBlocks();
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM,
+      namesystem.writeUnlock(RwLockMode.BM,
           "checkHeartbeatAndGetUnderReplicatedBlocksCount");
     }
   }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -21,7 +21,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
@@ -76,6 +75,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.ECSchema;
@@ -167,10 +167,10 @@ public class TestBlockManager {
     fsn = Mockito.mock(FSNamesystem.class);
     Mockito.doReturn(true).when(fsn).hasWriteLock();
     Mockito.doReturn(true).when(fsn).hasReadLock();
-    Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.GLOBAL);
-    Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.GLOBAL);
-    Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
-    Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
+    Mockito.doReturn(true).when(fsn).hasWriteLock(RwLockMode.GLOBAL);
+    Mockito.doReturn(true).when(fsn).hasReadLock(RwLockMode.GLOBAL);
+    Mockito.doReturn(true).when(fsn).hasWriteLock(RwLockMode.BM);
+    Mockito.doReturn(true).when(fsn).hasReadLock(RwLockMode.BM);
     Mockito.doReturn(true).when(fsn).isRunning();
     //Make shouldPopulaeReplQueues return true
     HAContext haContext = Mockito.mock(HAContext.class);
@@ -1624,7 +1624,7 @@ public class TestBlockManager {
       }
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
-      ns.writeLock(FSNamesystemLockMode.BM);
+      ns.writeLock(RwLockMode.BM);
       DatanodeStorageInfo corruptStorageInfo= null;
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
@@ -1638,16 +1638,16 @@ public class TestBlockManager {
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
           corruptStorageInfo.getStorageID(),
           CorruptReplicasMap.Reason.ANY.toString());
-      ns.writeUnlock(FSNamesystemLockMode.BM, "testBlockManagerMachinesArray");
+      ns.writeUnlock(RwLockMode.BM, "testBlockManagerMachinesArray");
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
-      ns.readLock(FSNamesystemLockMode.BM);
+      ns.readLock(RwLockMode.BM);
       LocatedBlocks locatedBlocks =
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
               false, false, null, null);
       assertTrue("Located Blocks should exclude corrupt" +
               "replicas and failed storages",
           locatedBlocks.getLocatedBlocks().size() == 1);
-      ns.readUnlock(FSNamesystemLockMode.BM, "open");
+      ns.readUnlock(RwLockMode.BM, "open");
     } finally {
       if (cluster != null) {
         cluster.shutdown();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafe
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 
@@ -96,8 +96,8 @@ public class TestBlockManagerSafeMode {
     fsn = mock(FSNamesystem.class);
     doReturn(true).when(fsn).hasWriteLock();
     doReturn(true).when(fsn).hasReadLock();
-    doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
-    doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
+    doReturn(true).when(fsn).hasWriteLock(RwLockMode.BM);
+    doReturn(true).when(fsn).hasReadLock(RwLockMode.BM);
     doReturn(true).when(fsn).isRunning();
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java

@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.slf4j.event.Level;
@@ -603,11 +603,11 @@ public class TestBlocksWithNotEnoughRacks {
 
   static BlockReconstructionWork scheduleReconstruction(
       FSNamesystem fsn, BlockInfo block, int priority) {
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
     try {
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "scheduleReconstruction");
+      fsn.writeUnlock(RwLockMode.BM, "scheduleReconstruction");
     }
   }
 

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java

@@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.VersionInfo;
 import org.junit.After;
@@ -132,7 +132,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateReplicas() throws Exception {
     final int blockInvalidateLimit = bm.getDatanodeManager()
         .getBlockInvalidateLimit();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       for (int i=0; i<nodes.length; i++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
@@ -143,7 +143,7 @@ public class TestComputeInvalidateWork {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateReplicas");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidateReplicas");
     }
   }
 
@@ -155,7 +155,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
@@ -168,7 +168,7 @@ public class TestComputeInvalidateWork {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateStripedBlockGroups");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidateStripedBlockGroups");
     }
   }
 
@@ -182,7 +182,7 @@ public class TestComputeInvalidateWork {
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
     final Random random = new Random(System.currentTimeMillis());
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
@@ -202,7 +202,7 @@ public class TestComputeInvalidateWork {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidate");
+      namesystem.writeUnlock(RwLockMode.BM, "testComputeInvalidate");
     }
   }
 
@@ -213,7 +213,7 @@ public class TestComputeInvalidateWork {
    */
   @Test(timeout=120000)
   public void testDatanodeReformat() throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     try {
       // Change the datanode UUID to emulate a reformat
       String poolId = cluster.getNamesystem().getBlockPoolId();
@@ -235,7 +235,7 @@ public class TestComputeInvalidateWork {
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.getPendingDeletionBlocksCount());
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReformat");
+      namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReformat");
     }
   }
 
@@ -256,7 +256,7 @@ public class TestComputeInvalidateWork {
     dfs.delete(ecFile, false);
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
         cluster.getNamesystem(0).getBlockManager());
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     InvalidateBlocks invalidateBlocks;
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
         + ecPolicy.getNumParityUnits());
@@ -273,7 +273,7 @@ public class TestComputeInvalidateWork {
       assertEquals("Unexpected invalidate count for striped block groups!",
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
     } finally {
-      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
+      namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReRegistration");
     }
     // Re-register each DN and see that it wipes the invalidation work
     int totalBlockGroupsPerDataNode = totalBlockGroups;
@@ -285,14 +285,14 @@ public class TestComputeInvalidateWork {
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new ExportedBlockKeys(),
           VersionInfo.getVersion());
-      namesystem.writeLock(FSNamesystemLockMode.BM);
+      namesystem.writeLock(RwLockMode.BM);
       try {
         bm.getDatanodeManager().registerDatanode(reg);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         assertEquals("Expected number of invalidate blocks to decrease",
             (long) expected, invalidateBlocks.numBlocks());
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
+        namesystem.writeUnlock(RwLockMode.BM, "testDatanodeReRegistration");
       }
     }
   }

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -36,7 +36,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -65,6 +64,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Shell;
@@ -120,7 +120,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10);
@@ -156,7 +156,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     DatanodeManager dm = mockDatanodeManager(fsn, conf);
 
@@ -187,7 +187,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
 
     //Seed the RNG with a known value so test failures are easier to reproduce
@@ -287,7 +287,7 @@ public class TestDatanodeManager {
     //Create the DatanodeManager which will be tested
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
     Configuration conf = new Configuration();
     
@@ -406,7 +406,7 @@ public class TestDatanodeManager {
     Configuration conf = new Configuration();
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     if (scriptFileName != null && !scriptFileName.isEmpty()) {
       URL shellScript = getClass().getResource(scriptFileName);
       Path resourcePath = Paths.get(shellScript.toURI());
@@ -505,7 +505,7 @@ public class TestDatanodeManager {
     Configuration conf = new Configuration();
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass().getResource(
         "/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -655,7 +655,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -723,7 +723,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -810,7 +810,7 @@ public class TestDatanodeManager {
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     URL shellScript = getClass()
         .getResource("/" + Shell.appendScriptExtension("topology-script"));
     Path resourcePath = Paths.get(shellScript.toURI());
@@ -900,7 +900,7 @@ public class TestDatanodeManager {
 
     // Set the write lock so that the DatanodeManager can start
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
     DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
     HostFileManager hm = new HostFileManager();
@@ -999,7 +999,7 @@ public class TestDatanodeManager {
       throws IOException {
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
@@ -1154,7 +1154,7 @@ public class TestDatanodeManager {
       throws IOException {
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    Mockito.when(fsn.hasWriteLock(RwLockMode.BM)).thenReturn(true);
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers);
     DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java

@@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -92,7 +92,7 @@ public class TestHeartbeatHandling {
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
 
       try {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
@@ -137,7 +137,7 @@ public class TestHeartbeatHandling {
           assertEquals(0, cmds.length);
         }
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
+        namesystem.writeUnlock(RwLockMode.BM, "testHeartbeat");
       }
     } finally {
       cluster.shutdown();
@@ -177,7 +177,7 @@ public class TestHeartbeatHandling {
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
 
       try {
-        namesystem.writeLock(FSNamesystemLockMode.BM);
+        namesystem.writeLock(RwLockMode.BM);
         synchronized(hm) {
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
@@ -256,7 +256,7 @@ public class TestHeartbeatHandling {
           assertEquals(recoveringNodes[2], dd3);
         }
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
+        namesystem.writeUnlock(RwLockMode.BM, "testHeartbeat");
       }
     } finally {
       cluster.shutdown();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java

@@ -39,12 +39,12 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -185,7 +185,7 @@ public class TestNameNodePrunesMissingStorages {
         DataNodeTestUtils.triggerBlockReport(dn);
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       final String storageIdToRemove;
       String datanodeUuid;
       // Find the first storage which this block is in.
@@ -201,7 +201,7 @@ public class TestNameNodePrunesMissingStorages {
         storageIdToRemove = info.getStorageID();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM,
             "testRemovingStorageDoesNotProduceZombies");
       }
       // Find the DataNode which holds that first storage.
@@ -347,7 +347,7 @@ public class TestNameNodePrunesMissingStorages {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override
         public Boolean get() {
-          cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+          cluster.getNamesystem().writeLock(RwLockMode.BM);
           try {
             Iterator<DatanodeStorageInfo> storageInfoIter =
                 cluster.getNamesystem().getBlockManager().
@@ -369,7 +369,7 @@ public class TestNameNodePrunesMissingStorages {
             LOG.info("Successfully found " + block.getBlockName() + " in " +
                 "be in storage id " + newStorageId);
           } finally {
-            cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testRenamingStorageIds");
+            cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testRenamingStorageIds");
           }
           return true;
         }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
@@ -175,14 +175,14 @@ public class TestNodeCount {
   /* threadsafe read of the replication counts for this block */
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
     BlockManager blockManager = namesystem.getBlockManager();
-    namesystem.readLock(FSNamesystemLockMode.BM);
+    namesystem.readLock(RwLockMode.BM);
     try {
       lastBlock = block;
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       return lastNum;
     }
     finally {
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "countNodes");
+      namesystem.readUnlock(RwLockMode.BM, "countNodes");
     }
   }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 
 public class TestOverReplicatedBlocks {
@@ -96,7 +96,7 @@ public class TestOverReplicatedBlocks {
       final BlockManager bm = namesystem.getBlockManager();
       final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
       try {
-        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+        namesystem.writeLock(RwLockMode.GLOBAL);
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
@@ -119,7 +119,7 @@ public class TestOverReplicatedBlocks {
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
         }
       } finally {
-        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "testProcesOverReplicateBlock");
+        namesystem.writeUnlock(RwLockMode.GLOBAL, "testProcesOverReplicateBlock");
       }
       
     } finally {
@@ -182,11 +182,11 @@ public class TestOverReplicatedBlocks {
 
       // All replicas for deletion should be scheduled on lastDN.
       // And should not actually be deleted, because lastDN does not heartbeat.
-      namesystem.readLock(FSNamesystemLockMode.BM);
+      namesystem.readLock(RwLockMode.BM);
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       assertEquals("Replicas on node " + lastDNid + " should have been deleted",
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
-      namesystem.readUnlock(FSNamesystemLockMode.BM, "excessSize4Testing");
+      namesystem.readUnlock(RwLockMode.BM, "excessSize4Testing");
       for(BlockLocation location : locs)
         assertEquals("Block should still have 4 replicas",
             4, location.getNames().length);

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java

@@ -51,12 +51,12 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -289,13 +289,13 @@ public class TestPendingReconstruction {
 
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // report is not the same, it should stay in pending.
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
         // Use a wrong gen stamp.
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 0), null);
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
+        fsn.writeUnlock(RwLockMode.BM, "testProcessPendingReconstructions");
       }
 
       // The block should still be pending
@@ -304,12 +304,12 @@ public class TestPendingReconstruction {
 
       // A block report with the correct gen stamp should remove the record
       // from the pending queue.
-      fsn.writeLock(FSNamesystemLockMode.BM);
+      fsn.writeLock(RwLockMode.BM);
       try {
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 1), null);
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
+        fsn.writeUnlock(RwLockMode.BM, "testProcessPendingReconstructions");
       }
 
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
@@ -460,7 +460,7 @@ public class TestPendingReconstruction {
       // 3. mark a couple of blocks as corrupt
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
-      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      cluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
             "STORAGE_ID", "TEST");
@@ -472,7 +472,7 @@ public class TestPendingReconstruction {
         BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
       } finally {
-        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testPendingAndInvalidate");
+        cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testPendingAndInvalidate");
       }
 
       // 4. delete the file
@@ -508,7 +508,7 @@ public class TestPendingReconstruction {
         DATANODE_COUNT).build();
     tmpCluster.waitActive();
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
 
     try {
       BlockManager bm = fsn.getBlockManager();
@@ -564,7 +564,7 @@ public class TestPendingReconstruction {
       }, 100, 60000);
     } finally {
       tmpCluster.shutdown();
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReplicationCounter");
+      fsn.writeUnlock(RwLockMode.BM, "testReplicationCounter");
     }
   }
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java

@@ -24,9 +24,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.util.RwLock;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Before;
 import org.junit.Test;
 import java.io.IOException;
@@ -88,7 +88,7 @@ public class TestProvidedStorageMap {
     DatanodeStorage dn1DiskStorage = new DatanodeStorage(
         "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK);
 
-    when(nameSystemLock.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
+    when(nameSystemLock.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
     DatanodeStorageInfo dns1Provided =
         providedMap.getStorage(dn1, dn1ProvidedStorage);
     DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, dn1DiskStorage);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
@@ -197,11 +197,11 @@ public class TestReconstructStripedBlocksWithRackAwareness {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
     }
 
-    fsn.writeLock(FSNamesystemLockMode.BM);
+    fsn.writeLock(RwLockMode.BM);
     try {
       bm.processMisReplicatedBlocks();
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReconstructForNotEnoughRacks");
+      fsn.writeUnlock(RwLockMode.BM, "testReconstructForNotEnoughRacks");
     }
 
     // check if redundancy monitor correctly schedule the reconstruction work.
@@ -343,12 +343,12 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     final DatanodeAdminManager decomManager =
         (DatanodeAdminManager) Whitebox.getInternalState(
             dm, "datanodeAdminManager");
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       dn9.stopDecommission();
       decomManager.startDecommission(dn9);
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testReconstructionWithDecommission");
     }
 

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -66,8 +66,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.log4j.Level;
@@ -1407,12 +1407,12 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     FSNamesystem mockNS = mock(FSNamesystem.class);
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.BM)).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(mockNS.hasReadLock(FSNamesystemLockMode.FS)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.BM)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.BM)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.FS)).thenReturn(true);
+    when(mockNS.hasReadLock(RwLockMode.FS)).thenReturn(true);
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
 
@@ -1462,7 +1462,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
           throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
     when(mockNS.hasWriteLock()).thenReturn(true);
-    when(mockNS.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true);
+    when(mockNS.hasWriteLock(RwLockMode.BM)).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
     LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -73,7 +73,7 @@ public class TestReplicationPolicyConsiderLoad
    */
   @Test
   public void testChooseTargetWithDecomNodes() throws IOException {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -125,7 +125,7 @@ public class TestReplicationPolicyConsiderLoad
       dataNodes[0].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[2].stopDecommission();
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetWithDecomNodes");
     }
     NameNode.LOG.info("Done working on it");
@@ -133,7 +133,7 @@ public class TestReplicationPolicyConsiderLoad
 
   @Test
   public void testConsiderLoadFactor() throws IOException {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
@@ -180,7 +180,7 @@ public class TestReplicationPolicyConsiderLoad
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
       }
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testConsiderLoadFactor");
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM, "testConsiderLoadFactor");
     }
   }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -86,7 +86,7 @@ public class TestReplicationPolicyExcludeSlowNodes
    */
   @Test
   public void testChooseTargetExcludeSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
       // add nodes
       for (int i = 0; i < dataNodes.length; i++) {
@@ -136,7 +136,7 @@ public class TestReplicationPolicyExcludeSlowNodes
             .getDatanodeUuid()));
       }
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetExcludeSlowNodes");
     }
     NameNode.LOG.info("Done working on it");
@@ -144,7 +144,7 @@ public class TestReplicationPolicyExcludeSlowNodes
 
   @Test
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
       // add nodes
       for (DatanodeDescriptor dataNode : dataNodes) {
@@ -174,7 +174,7 @@ public class TestReplicationPolicyExcludeSlowNodes
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testSlowPeerTrackerEnabledClearSlowNodes");
     }
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 
 import java.util.ArrayList;
@@ -92,7 +92,7 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
    */
   @Test
   public void testChooseTargetWithRatioConsiderLoad() {
-    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    namenode.getNamesystem().writeLock(RwLockMode.BM);
     try {
       // After heartbeat has been processed, the total load should be 200.
       // And average load per node should be 40. The max load should be 2 * 40;
@@ -164,7 +164,7 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[4]));
     } finally {
-      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      namenode.getNamesystem().writeUnlock(RwLockMode.BM,
           "testChooseTargetWithRatioConsiderLoad");
     }
   }

+ 9 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 
@@ -47,19 +46,13 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.Whitebox;
-import org.mockito.ArgumentMatcher;
-import org.mockito.ArgumentMatchers;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.FSIMAGE_ATTRIBUTE_KEY;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 /**
  * This is a utility class to expose NameNode functionality for unit tests.
@@ -90,13 +83,13 @@ public class NameNodeAdapter {
     // consistent with FSNamesystem#getFileInfo()
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     FSPermissionChecker.setOperationType(operationName);
-    namenode.getNamesystem().readLock(FSNamesystemLockMode.FS);
+    namenode.getNamesystem().readLock(RwLockMode.FS);
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           needBlockToken);
     } finally {
-      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "getFileInfo");
+      namenode.getNamesystem().readUnlock(RwLockMode.FS, "getFileInfo");
     }
   }
   
@@ -209,11 +202,11 @@ public class NameNodeAdapter {
    */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
-    ns.readLock(FSNamesystemLockMode.BM);
+    ns.readLock(RwLockMode.BM);
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
+      ns.readUnlock(RwLockMode.BM, "getDatanode");
     }
   }
   
@@ -237,7 +230,7 @@ public class NameNodeAdapter {
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
       final String src, final DatanodeStorageInfo[] targets)
       throws IOException {
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
@@ -246,17 +239,17 @@ public class NameNodeAdapter {
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
       return file.getLastBlock();
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "addBlockNoJournal");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "addBlockNoJournal");
     }
   }
 
   public static void persistBlocks(final FSNamesystem fsn,
       final String src, final INodeFile file) throws IOException {
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "persistBlocks");
+      fsn.writeUnlock(RwLockMode.FS, "persistBlocks");
     }
   }
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.After;
 import org.junit.Before;
@@ -93,7 +93,7 @@ public class TestAddBlockRetry {
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    ns.readLock(FSNamesystemLockMode.GLOBAL);
+    ns.readLock(RwLockMode.GLOBAL);
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     try {
@@ -101,7 +101,7 @@ public class TestAddBlockRetry {
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             "clientName", null, onRetryBlock);
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "validateAddBlock");
+      ns.readUnlock(RwLockMode.GLOBAL, "validateAddBlock");
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);
@@ -119,13 +119,13 @@ public class TestAddBlockRetry {
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
 
     // continue first addBlock()
-    ns.writeLock(FSNamesystemLockMode.GLOBAL);
+    ns.writeLock(RwLockMode.GLOBAL);
     LocatedBlock newBlock;
     try {
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
     } finally {
-      ns.writeUnlock(FSNamesystemLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
+      ns.writeUnlock(RwLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
     }
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
 
@@ -139,11 +139,11 @@ public class TestAddBlockRetry {
 
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
     final FSNamesystem ns = cluster.getNamesystem();
-    ns.readLock(FSNamesystemLockMode.GLOBAL);
+    ns.readLock(RwLockMode.GLOBAL);
     try {
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
     } finally {
-      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "checkFileProgress");
+      ns.readUnlock(RwLockMode.GLOBAL, "checkFileProgress");
     }
   }
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -205,12 +205,12 @@ public class TestAddOverReplicatedStripedBlocks {
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
           infos.get(0), storages.get(0), "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testProcessOverReplicatedAndCorruptStripedBlock");
     }
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -37,8 +37,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -254,11 +254,11 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
 
     //test if decommission succeeded
     DatanodeDescriptor dnd3 = dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       dm.getDatanodeAdminManager().startDecommission(dnd3);
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM,
           "testPlacementWithOnlyOneNodeInRackDecommission");
     }
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -44,7 +44,6 @@ import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.commons.lang3.time.DateUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,6 +84,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -763,7 +763,7 @@ public class TestCacheDirectives {
       @Override
       public Boolean get() {
         int numCachedBlocks = 0, numCachedReplicas = 0;
-        namesystem.readLock(FSNamesystemLockMode.BM);
+        namesystem.readLock(RwLockMode.BM);
         try {
           GSet<CachedBlock, CachedBlock> cachedBlocks =
               cacheManager.getCachedBlocks();
@@ -776,7 +776,7 @@ public class TestCacheDirectives {
             }
           }
         } finally {
-          namesystem.readUnlock(FSNamesystemLockMode.BM, "checkBlocks");
+          namesystem.readUnlock(RwLockMode.BM, "checkBlocks");
         }
 
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
@@ -1507,7 +1507,7 @@ public class TestCacheDirectives {
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
     Thread.sleep(1000);
-    cluster.getNamesystem().readLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().readLock(RwLockMode.BM);
     try {
       final DatanodeManager datanodeManager =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -1520,7 +1520,7 @@ public class TestCacheDirectives {
             descriptor.getPendingCached().isEmpty());
       }
     } finally {
-      cluster.getNamesystem().readUnlock(FSNamesystemLockMode.BM, "checkPendingCachedEmpty");
+      cluster.getNamesystem().readUnlock(RwLockMode.BM, "checkPendingCachedEmpty");
     }
   }
 
@@ -1667,9 +1667,9 @@ public class TestCacheDirectives {
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     GenericTestUtils.waitFor(() -> {
       boolean isConsistence = false;
-      ann.getNamesystem().readLock(FSNamesystemLockMode.FS);
+      ann.getNamesystem().readLock(RwLockMode.FS);
       try {
-        sbn.getNamesystem().readLock(FSNamesystemLockMode.FS);
+        sbn.getNamesystem().readLock(RwLockMode.FS);
         try {
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
               getCacheDirectives().iterator();
@@ -1684,10 +1684,10 @@ public class TestCacheDirectives {
             }
           }
         } finally {
-          sbn.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
+          sbn.getNamesystem().readUnlock(RwLockMode.FS, "expiryTimeConsistency");
         }
       } finally {
-        ann.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
+        ann.getNamesystem().readUnlock(RwLockMode.FS, "expiryTimeConsistency");
       }
       if (!isConsistence) {
         LOG.info("testEexpiryTimeConsistency:"

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

@@ -52,8 +52,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -472,7 +472,7 @@ public class TestDeleteRace {
         } catch (InterruptedException e) {
         }
       });
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       open.start();
       openSem.acquire();
       Thread.yield();
@@ -480,7 +480,7 @@ public class TestDeleteRace {
       rename.start();
       renameSem.acquire();
       Thread.yield();
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testOpenRenameRace");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testOpenRenameRace");
 
       // wait open and rename threads finish.
       open.join();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java

@@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -393,11 +393,11 @@ public class TestDiskspaceQuotaUpdate {
 
   private void updateCountForQuota(int i) {
     FSNamesystem fsn = cluster.getNamesystem();
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
       getFSDirectory().updateCountForQuota(i);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "updateCountForQuota");
+      fsn.writeUnlock(RwLockMode.FS, "updateCountForQuota");
     }
   }
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

@@ -56,12 +56,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -525,11 +525,11 @@ public class TestEditLogRace {
         public void run() {
           try {
             LOG.info("Starting setOwner");
-            namesystem.writeLock(FSNamesystemLockMode.FS);
+            namesystem.writeLock(RwLockMode.FS);
             try {
               editLog.logSetOwner("/","test","test");
             } finally {
-              namesystem.writeUnlock(FSNamesystemLockMode.FS, "testSaveRightBeforeSync");
+              namesystem.writeUnlock(RwLockMode.FS, "testSaveRightBeforeSync");
             }
             sleepingBeforeSync.countDown();
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java

@@ -32,13 +32,13 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
 import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -152,11 +152,11 @@ public class TestFSImageWithSnapshot {
         conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     File imageFile = getImageFile(testDir, txid);
-    fsn.readLock(FSNamesystemLockMode.GLOBAL);
+    fsn.readLock(RwLockMode.GLOBAL);
     try {
       saver.save(imageFile, compression);
     } finally {
-      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "saveFSImage");
+      fsn.readUnlock(RwLockMode.GLOBAL, "saveFSImage");
     }
     return imageFile;
   }
@@ -164,14 +164,14 @@ public class TestFSImageWithSnapshot {
   /** Load the fsimage from a temp file */
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     fsn.getFSDirectory().writeLock();
     try {
       loader.load(imageFile, false);
       fsn.getFSDirectory().updateCountForQuota();
     } finally {
       fsn.getFSDirectory().writeUnlock();
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSImageFromTempFile");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "loadFSImageFromTempFile");
     }
   }
   

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java

@@ -41,12 +41,12 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.After;
 import org.junit.Test;
@@ -195,12 +195,12 @@ public class TestFSNamesystem {
   }
 
   private void clearNamesystem(FSNamesystem fsn) {
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
       fsn.clear();
       assertFalse(fsn.isImageLoaded());
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "clearNamesystem");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "clearNamesystem");
     }
   }
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java

@@ -34,10 +34,10 @@ import javax.management.ObjectName;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -154,7 +154,7 @@ public class TestFSNamesystemMBean {
       cluster.waitActive();
 
       fsn = cluster.getNameNode().namesystem;
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       Thread.sleep(jmxCachePeriod * 1000);
 
       MBeanClient client = new MBeanClient();
@@ -164,8 +164,8 @@ public class TestFSNamesystemMBean {
           "is owned by another thread", client.succeeded);
       client.interrupt();
     } finally {
-      if (fsn != null && fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
-        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testWithFSNamesystemWriteLock");
+      if (fsn != null && fsn.hasWriteLock(RwLockMode.GLOBAL)) {
+        fsn.writeUnlock(RwLockMode.GLOBAL, "testWithFSNamesystemWriteLock");
       }
       if (cluster != null) {
         cluster.shutdown();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java

@@ -36,7 +36,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.slf4j.Logger;
@@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -1084,7 +1084,7 @@ public class TestFileTruncate {
     INodeFile file = iip.getLastINode().asFile();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up in-place truncate.
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1104,7 +1104,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testTruncateRecovery");
     }
 
     // Re-create file and ensure we are ready to copy on truncate
@@ -1118,7 +1118,7 @@ public class TestFileTruncate {
         (BlockInfoContiguous) file.getLastBlock()), is(true));
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up copy-on-write truncate
-    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsn.writeLock(RwLockMode.GLOBAL);
     try {
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1138,7 +1138,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testTruncateRecovery");
     }
     checkBlockRecovery(srcPath);
     fs.deleteSnapshot(parent, "ss0");

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -105,12 +105,12 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.AccessControlException;
@@ -1512,11 +1512,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     try {
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testBlockIdCKDecommission");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testBlockIdCKDecommission");
     }
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
@@ -1954,11 +1954,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     try {
-      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+      fsn.writeLock(RwLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testFsckWithDecommissionedReplicas");
+      fsn.writeUnlock(RwLockMode.GLOBAL, "testFsckWithDecommissionedReplicas");
     }
     DatanodeDescriptor dn = bc.getBlocks()[0]
         .getDatanode(0);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -73,14 +73,14 @@ public class TestGetBlockLocations {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if(!deleted[0]) {
-          fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+          fsn.writeLock(RwLockMode.GLOBAL);
           try {
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  now());
           } finally {
-            fsn.writeUnlock(FSNamesystemLockMode.GLOBAL,
+            fsn.writeUnlock(RwLockMode.GLOBAL,
                 "testGetBlockLocationsRacingWithDelete");
           }
           deleted[0] = true;
@@ -108,14 +108,14 @@ public class TestGetBlockLocations {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if (!renamed[0]) {
-          fsn.writeLock(FSNamesystemLockMode.FS);
+          fsn.writeLock(RwLockMode.FS);
           try {
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    false);
             renamed[0] = true;
           } finally {
-            fsn.writeUnlock(FSNamesystemLockMode.FS, "testGetBlockLocationsRacingWithRename");
+            fsn.writeUnlock(RwLockMode.FS, "testGetBlockLocationsRacingWithRename");
           }
         }
         invocation.callRealMethod();
@@ -144,13 +144,13 @@ public class TestGetBlockLocations {
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         DFS_BLOCK_SIZE_DEFAULT);
 
-    fsn.writeLock(FSNamesystemLockMode.FS);
+    fsn.writeLock(RwLockMode.FS);
     try {
       final FSDirectory fsd = fsn.getFSDirectory();
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       fsd.addINode(iip, file, null);
     } finally {
-      fsn.writeUnlock(FSNamesystemLockMode.FS, "setupFileSystem");
+      fsn.writeUnlock(RwLockMode.FS, "setupFileSystem");
     }
     return fsn;
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java

@@ -21,12 +21,12 @@ import java.io.IOException;
 import java.util.Random;
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -121,11 +121,11 @@ public class TestLargeDirectoryDelete {
           try {
             int blockcount = getBlockCount();
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
-              mc.getNamesystem().writeLock(FSNamesystemLockMode.GLOBAL);
+              mc.getNamesystem().writeLock(RwLockMode.GLOBAL);
               try {
                 lockOps++;
               } finally {
-                mc.getNamesystem().writeUnlock(FSNamesystemLockMode.GLOBAL, "runThreads");
+                mc.getNamesystem().writeUnlock(RwLockMode.GLOBAL, "runThreads");
               }
               Thread.sleep(1);
             }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java

@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.util.Lists;
 
 import org.junit.Rule;
@@ -467,10 +467,10 @@ public class TestLeaseManager {
     when(fsn.isRunning()).thenReturn(true);
     when(fsn.hasReadLock()).thenReturn(true);
     when(fsn.hasWriteLock()).thenReturn(true);
-    when(fsn.hasReadLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(fsn.hasWriteLock(FSNamesystemLockMode.FS)).thenReturn(true);
-    when(fsn.hasReadLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
-    when(fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
+    when(fsn.hasReadLock(RwLockMode.FS)).thenReturn(true);
+    when(fsn.hasWriteLock(RwLockMode.FS)).thenReturn(true);
+    when(fsn.hasReadLock(RwLockMode.GLOBAL)).thenReturn(true);
+    when(fsn.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
     when(fsn.getFSDirectory()).thenReturn(dir);
     when(fsn.getMaxLockHoldToReleaseLeaseMs()).thenReturn(maxLockHoldToReleaseLeaseMs);
     return fsn;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java

@@ -47,9 +47,9 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -339,7 +339,7 @@ public class TestListOpenFiles {
     FSDirectory dir = fsNamesystem.getFSDirectory();
     List<INode> removedINodes = new ChunkedArrayList<>();
     removedINodes.add(dir.getINode(path));
-    fsNamesystem.writeLock(FSNamesystemLockMode.FS);
+    fsNamesystem.writeLock(RwLockMode.FS);
     try {
       dir.removeFromInodeMap(removedINodes);
       openFileEntryBatchedEntries = nnRpc
@@ -350,7 +350,7 @@ public class TestListOpenFiles {
     } catch (NullPointerException e) {
       Assert.fail("Should not throw NPE when the file is deleted but has lease!");
     } finally {
-      fsNamesystem.writeUnlock(FSNamesystemLockMode.FS, "testListOpenFilesWithDeletedPath");
+      fsNamesystem.writeUnlock(RwLockMode.FS, "testListOpenFilesWithDeletedPath");
     }
   }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import java.util.function.Supplier;
@@ -96,13 +96,13 @@ public class TestNameNodeMetadataConsistency {
 
     // Simulate Namenode forgetting a Block
     cluster.restartNameNode(true);
-    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNameNode().getNamesystem().writeLock(RwLockMode.BM);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
         .getStoredBlock(block.getLocalBlock());
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+    cluster.getNameNode().getNamesystem().writeUnlock(RwLockMode.BM,
         "testGenerationStampInFuture");
 
     // we also need to tell block manager that we are in the startup path
@@ -147,11 +147,11 @@ public class TestNameNodeMetadataConsistency {
     cluster.restartNameNode(true);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
         ().getStoredBlock(block.getLocalBlock());
-    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNameNode().getNamesystem().writeLock(RwLockMode.BM);
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+    cluster.getNameNode().getNamesystem().writeUnlock(RwLockMode.BM,
         "testEnsureGenStampsIsStartupOnly");
 
     cluster.restartDataNode(dnProps);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java

@@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -339,13 +339,13 @@ public class TestReconstructStripedBlocks {
       boolean reconstructed = false;
       for (int i = 0; i < 5; i++) {
         NumberReplicas num = null;
-        fsn.readLock(FSNamesystemLockMode.GLOBAL);
+        fsn.readLock(RwLockMode.GLOBAL);
         try {
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
           num = bm.countNodes(blockInfo);
         } finally {
-          fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "testCountLiveReplicas");
+          fsn.readUnlock(RwLockMode.GLOBAL, "testCountLiveReplicas");
         }
         if (num.liveReplicas() >= groupSize) {
           reconstructed = true;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -197,8 +197,8 @@ public class TestSecurityTokenEditLog {
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
           // fsn claims read lock if either read or write locked.
-          Assert.assertTrue(fsnRef.get().hasReadLock(FSNamesystemLockMode.FS));
-          Assert.assertFalse(fsnRef.get().hasWriteLock(FSNamesystemLockMode.FS));
+          Assert.assertTrue(fsnRef.get().hasReadLock(RwLockMode.FS));
+          Assert.assertFalse(fsnRef.get().hasWriteLock(RwLockMode.FS));
           return null;
         }
       }

+ 17 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/fgl/TestFineGrainedFSNamesystemLock.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.fgl;
 
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.junit.Test;
@@ -63,7 +64,7 @@ public class TestFineGrainedFSNamesystemLock {
       if (index == 0) { // Test the global write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            writeLock(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
           }
           return true;
@@ -71,7 +72,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 1) { // Test the fs write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            writeLock(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
           }
           return true;
@@ -79,7 +80,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 2) { // Test the bm write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLock(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            writeLock(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
           }
           return true;
@@ -87,7 +88,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 3) { // Test the bm read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            readLock(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
           }
           return true;
@@ -95,7 +96,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 4) { // Test the fs read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            readLock(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
           }
           return true;
@@ -103,7 +104,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 5) { // Test the global read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLock(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            readLock(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
           }
           return true;
@@ -111,7 +112,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 6) { // Test the global interruptable write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            writeLockInterruptibly(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
           }
           return true;
@@ -119,7 +120,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 7) { // Test the fs interruptable write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            writeLockInterruptibly(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
           }
           return true;
@@ -127,7 +128,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 8) { // Test the bm interruptable write lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            writeLockInterruptibly(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            writeLockInterruptibly(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
           }
           return true;
@@ -135,7 +136,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 9) { // Test the bm interruptable read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.BM, opName, bmCount);
+            readLockInterruptibly(fsn, RwLockMode.BM, opName, bmCount);
             bmNumber.incrementAndGet();
           }
           return true;
@@ -143,7 +144,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else if (index == 10) { // Test the fs interruptable read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.FS, opName, fsCount);
+            readLockInterruptibly(fsn, RwLockMode.FS, opName, fsCount);
             fsNumber.incrementAndGet();
           }
           return true;
@@ -151,7 +152,7 @@ public class TestFineGrainedFSNamesystemLock {
       } else { // Test the global interruptable read lock via multiple threads.
         callableList.add(() -> {
           for (int startIndex = 0; startIndex < getLoopNumber(); startIndex++) {
-            readLockInterruptibly(fsn, FSNamesystemLockMode.GLOBAL, opName, globalCount);
+            readLockInterruptibly(fsn, RwLockMode.GLOBAL, opName, globalCount);
             globalNumber.incrementAndGet();
           }
           return true;
@@ -177,7 +178,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param counter counter to trace this lock mode
    */
-  private void writeLock(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void writeLock(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
     fsn.writeLock(mode);
     try {
@@ -200,7 +201,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param counter counter to trace this lock mode
    */
-  private void readLock(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void readLock(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
     fsn.readLock(mode);
     try {
@@ -217,7 +218,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param counter counter to trace this lock mode
    */
-  private void writeLockInterruptibly(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void writeLockInterruptibly(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
     boolean success = false;
     try {
@@ -257,7 +258,7 @@ public class TestFineGrainedFSNamesystemLock {
    * @param opName operation name
    * @param counter counter to trace this lock mode
    */
-  private void readLockInterruptibly(FSNLockManager fsn, FSNamesystemLockMode mode,
+  private void readLockInterruptibly(FSNLockManager fsn, RwLockMode mode,
       String opName, AtomicLong counter)  {
     try {
       fsn.readLockInterruptibly(mode);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java

@@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -569,13 +569,13 @@ public class TestDNFencing {
   }
 
   private void doMetasave(NameNode nn2) {
-    nn2.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    nn2.getNamesystem().writeLock(RwLockMode.BM);
     try {
       PrintWriter pw = new PrintWriter(System.err);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       pw.flush();
     } finally {
-      nn2.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "metaSave");
+      nn2.getNamesystem().writeUnlock(RwLockMode.BM, "metaSave");
     }
   }
 

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -50,7 +50,6 @@ import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
@@ -88,6 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -495,12 +495,12 @@ public class TestNameNodeMetrics {
     // Corrupt first replica of the block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testCorruptBlock");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testCorruptBlock");
     }
 
     BlockManagerTestUtil.updateState(bm);
@@ -589,12 +589,12 @@ public class TestNameNodeMetrics {
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
 
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testStripedFileCorruptBlocks");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testStripedFileCorruptBlocks");
     }
 
     BlockManagerTestUtil.updateState(bm);
@@ -688,12 +688,12 @@ public class TestNameNodeMetrics {
     // Corrupt the only replica of the block to result in a missing block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+    cluster.getNamesystem().writeLock(RwLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testMissingBlock");
+      cluster.getNamesystem().writeUnlock(RwLockMode.BM, "testMissingBlock");
     }
     Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java

@@ -46,8 +46,8 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
 import org.junit.After;
@@ -298,10 +298,10 @@ public class TestINodeFileUnderConstructionWithSnapshot {
       hdfs.delete(foo, true);
       Thread.sleep(1000);
       try {
-        fsn.writeLock(FSNamesystemLockMode.GLOBAL);
+        fsn.writeLock(RwLockMode.GLOBAL);
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
       } finally {
-        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testLease");
+        fsn.writeUnlock(RwLockMode.GLOBAL, "testLease");
       }
     } finally {
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -81,7 +81,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
@@ -784,14 +784,14 @@ public class TestDFSAdmin {
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
 
-      miniCluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
+      miniCluster.getNamesystem().writeLock(RwLockMode.BM);
       try {
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
             "STORAGE_ID", "TEST");
         BlockManagerTestUtil.updateState(bm);
       } finally {
-        miniCluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testReportCommand");
+        miniCluster.getNamesystem().writeUnlock(RwLockMode.BM, "testReportCommand");
       }
       waitForCorruptBlock(miniCluster, client, file);
 

+ 7 - 7
hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

@@ -84,9 +84,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
-import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.RwLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
@@ -1095,26 +1095,26 @@ public class ITestProvidedImplementation {
 
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
-    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startDecommission");
+    namesystem.writeUnlock(RwLockMode.BM, "startDecommission");
   }
 
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.BM);
+    namesystem.writeLock(RwLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
-    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startMaintenance");
+    namesystem.writeUnlock(RwLockMode.BM, "startMaintenance");
   }
 
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
+    namesystem.writeLock(RwLockMode.GLOBAL);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
-    namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "stopMaintenance");
+    namesystem.writeUnlock(RwLockMode.GLOBAL, "stopMaintenance");
   }
 
   @Test

Some files were not shown because too many files changed in this diff