浏览代码

HDFS-17457. [FGL] UTs support fine-grained locking (#6741)

ZanderXu 1 年之前
父节点
当前提交
14153f07aa
共有 40 个文件被更改,包括 220 次插入165 次删除
  1. 3 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  2. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  3. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
  4. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
  5. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
  6. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
  7. 13 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
  8. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
  9. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
  10. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
  11. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  12. 9 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
  13. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
  14. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
  15. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
  16. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
  17. 13 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  18. 8 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  19. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
  20. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  21. 9 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  22. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
  23. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
  24. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
  25. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
  26. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
  27. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
  28. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
  29. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  30. 8 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
  31. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
  32. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
  33. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
  34. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
  35. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
  36. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
  37. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  38. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
  39. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
  40. 7 6
      hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -121,6 +121,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
@@ -1660,10 +1661,10 @@ public class TestRouterRpc {
       // mark a replica as corrupt
       // mark a replica as corrupt
       LocatedBlock block = NameNodeAdapter
       LocatedBlock block = NameNodeAdapter
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.BM);
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "findAndMarkBlockAsCorrupt");
       BlockManagerTestUtil.updateState(bm);
       BlockManagerTestUtil.updateState(bm);
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
           new Path(testFile), block.getBlock(), 1);
           new Path(testFile), block.getBlock(), 1);

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -175,7 +176,7 @@ public class TestBlocksScheduledCounter {
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .get(0);
           .get(0);
       DatanodeInfo[] locs = block.getLocations();
       DatanodeInfo[] locs = block.getLocations();
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
             "TEST");
             "TEST");
@@ -185,7 +186,8 @@ public class TestBlocksScheduledCounter {
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "findAndMarkBlockAsCorrupt");
       }
       }
 
 
       // 4. delete the file
       // 4. delete the file
@@ -238,13 +240,14 @@ public class TestBlocksScheduledCounter {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       }
       }
 
 
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "testBlocksScheduledCounterOnTruncate");
       }
       }
 
 
       // 5.truncate the file whose block exists in pending reconstruction
       // 5.truncate the file whose block exists in pending reconstruction

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -159,13 +160,13 @@ public class TestFileCorruption {
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
       FSNamesystem ns = cluster.getNamesystem();
       FSNamesystem ns = cluster.getNamesystem();
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
         cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             "STORAGE_ID");
             "STORAGE_ID");
       } finally {
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, "testArrayOutOfBoundsException");
       }
       }
       
       
       // open the file
       // open the file
@@ -210,16 +211,16 @@ public class TestFileCorruption {
       FSNamesystem ns = cluster.getNamesystem();
       FSNamesystem ns = cluster.getNamesystem();
       //fail the storage on that node which has the block
       //fail the storage on that node which has the block
       try {
       try {
-        ns.writeLock();
+        ns.writeLock(FSNamesystemLockMode.BM);
         updateAllStorages(bm);
         updateAllStorages(bm);
       } finally {
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
       }
       }
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         markAllBlocksAsCorrupt(bm, blk);
         markAllBlocksAsCorrupt(bm, blk);
       } finally {
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, "testCorruptionWithDiskFailure");
       }
       }
 
 
       // open the file
       // open the file

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
@@ -50,23 +51,23 @@ public class BlockManagerTestUtil {
   /** @return the datanode descriptor for the given the given storageID. */
   /** @return the datanode descriptor for the given the given storageID. */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       final String storageID) {
       final String storageID) {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
     } finally {
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
     }
     }
   }
   }
 
 
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
       final String storageID, final int startBlock) {
       final String storageID, final int startBlock) {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
     try {
       DatanodeDescriptor dn =
       DatanodeDescriptor dn =
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return dn.getBlockIterator(startBlock);
       return dn.getBlockIterator(startBlock);
     } finally {
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getBlockIterator");
     }
     }
   }
   }
 
 
@@ -88,7 +89,7 @@ public class BlockManagerTestUtil {
    */
    */
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
     final BlockManager bm = namesystem.getBlockManager();
     final BlockManager bm = namesystem.getBlockManager();
-    namesystem.readLock();
+    namesystem.readLock(FSNamesystemLockMode.BM);
     try {
     try {
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       return new int[]{getNumberOfRacks(bm, b),
       return new int[]{getNumberOfRacks(bm, b),
@@ -96,7 +97,7 @@ public class BlockManagerTestUtil {
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           getNumberOfDomains(bm, b)};
           getNumberOfDomains(bm, b)};
     } finally {
     } finally {
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "getReplicaInfo");
     }
     }
   }
   }
 
 
@@ -247,7 +248,7 @@ public class BlockManagerTestUtil {
    */
    */
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
     FSNamesystem namesystem = nn.getNamesystem();
     FSNamesystem namesystem = nn.getNamesystem();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
@@ -265,7 +266,7 @@ public class BlockManagerTestUtil {
         hbm.heartbeatCheck();
         hbm.heartbeatCheck();
       }
       }
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "noticeDeadDatanode");
     }
     }
   }
   }
   
   
@@ -302,12 +303,13 @@ public class BlockManagerTestUtil {
    */
    */
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
       FSNamesystem namesystem, BlockManager bm) {
       FSNamesystem namesystem, BlockManager bm) {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       return bm.getUnderReplicatedNotMissingBlocks();
       return bm.getUnderReplicatedNotMissingBlocks();
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM,
+          "checkHeartbeatAndGetUnderReplicatedBlocksCount");
     }
     }
   }
   }
 
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -1624,7 +1624,7 @@ public class TestBlockManager {
       }
       }
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       DatanodeStorageInfo corruptStorageInfo= null;
       DatanodeStorageInfo corruptStorageInfo= null;
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
@@ -1638,16 +1638,16 @@ public class TestBlockManager {
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
           corruptStorageInfo.getStorageID(),
           corruptStorageInfo.getStorageID(),
           CorruptReplicasMap.Reason.ANY.toString());
           CorruptReplicasMap.Reason.ANY.toString());
-      ns.writeUnlock();
+      ns.writeUnlock(FSNamesystemLockMode.BM, "testBlockManagerMachinesArray");
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
-      ns.readLock();
+      ns.readLock(FSNamesystemLockMode.BM);
       LocatedBlocks locatedBlocks =
       LocatedBlocks locatedBlocks =
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
               false, false, null, null);
               false, false, null, null);
       assertTrue("Located Blocks should exclude corrupt" +
       assertTrue("Located Blocks should exclude corrupt" +
               "replicas and failed storages",
               "replicas and failed storages",
           locatedBlocks.getLocatedBlocks().size() == 1);
           locatedBlocks.getLocatedBlocks().size() == 1);
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "open");
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java

@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -602,11 +603,11 @@ public class TestBlocksWithNotEnoughRacks {
 
 
   static BlockReconstructionWork scheduleReconstruction(
   static BlockReconstructionWork scheduleReconstruction(
       FSNamesystem fsn, BlockInfo block, int priority) {
       FSNamesystem fsn, BlockInfo block, int priority) {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, "scheduleReconstruction");
     }
     }
   }
   }
 
 

+ 13 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
@@ -131,7 +132,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateReplicas() throws Exception {
   public void testComputeInvalidateReplicas() throws Exception {
     final int blockInvalidateLimit = bm.getDatanodeManager()
     final int blockInvalidateLimit = bm.getDatanodeManager()
         .getBlockInvalidateLimit();
         .getBlockInvalidateLimit();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       for (int i=0; i<nodes.length; i++) {
       for (int i=0; i<nodes.length; i++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
@@ -142,7 +143,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateReplicas");
     }
     }
   }
   }
 
 
@@ -154,7 +155,7 @@ public class TestComputeInvalidateWork {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
     final int blockInvalidateLimit =
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
         bm.getDatanodeManager().getBlockInvalidateLimit();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
       for (int i = 0; i < nodeCount; i++) {
@@ -167,7 +168,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidateStripedBlockGroups");
     }
     }
   }
   }
 
 
@@ -181,7 +182,7 @@ public class TestComputeInvalidateWork {
     final int blockInvalidateLimit =
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
         bm.getDatanodeManager().getBlockInvalidateLimit();
     final Random random = new Random(System.currentTimeMillis());
     final Random random = new Random(System.currentTimeMillis());
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       int nodeCount = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
       for (int i = 0; i < nodeCount; i++) {
@@ -201,7 +202,7 @@ public class TestComputeInvalidateWork {
       }
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidate");
     }
     }
   }
   }
 
 
@@ -212,7 +213,7 @@ public class TestComputeInvalidateWork {
    */
    */
   @Test(timeout=120000)
   @Test(timeout=120000)
   public void testDatanodeReformat() throws Exception {
   public void testDatanodeReformat() throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       // Change the datanode UUID to emulate a reformat
       // Change the datanode UUID to emulate a reformat
       String poolId = cluster.getNamesystem().getBlockPoolId();
       String poolId = cluster.getNamesystem().getBlockPoolId();
@@ -234,7 +235,7 @@ public class TestComputeInvalidateWork {
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.getPendingDeletionBlocksCount());
       assertEquals(0, bm.getPendingDeletionBlocksCount());
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReformat");
     }
     }
   }
   }
 
 
@@ -255,7 +256,7 @@ public class TestComputeInvalidateWork {
     dfs.delete(ecFile, false);
     dfs.delete(ecFile, false);
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
         cluster.getNamesystem(0).getBlockManager());
         cluster.getNamesystem(0).getBlockManager());
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     InvalidateBlocks invalidateBlocks;
     InvalidateBlocks invalidateBlocks;
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
         + ecPolicy.getNumParityUnits());
         + ecPolicy.getNumParityUnits());
@@ -272,7 +273,7 @@ public class TestComputeInvalidateWork {
       assertEquals("Unexpected invalidate count for striped block groups!",
       assertEquals("Unexpected invalidate count for striped block groups!",
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
     } finally {
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
     }
     }
     // Re-register each DN and see that it wipes the invalidation work
     // Re-register each DN and see that it wipes the invalidation work
     int totalBlockGroupsPerDataNode = totalBlockGroups;
     int totalBlockGroupsPerDataNode = totalBlockGroups;
@@ -284,14 +285,14 @@ public class TestComputeInvalidateWork {
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new ExportedBlockKeys(),
           new ExportedBlockKeys(),
           VersionInfo.getVersion());
           VersionInfo.getVersion());
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         bm.getDatanodeManager().registerDatanode(reg);
         bm.getDatanodeManager().registerDatanode(reg);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         assertEquals("Expected number of invalidate blocks to decrease",
         assertEquals("Expected number of invalidate blocks to decrease",
             (long) expected, invalidateBlocks.numBlocks());
             (long) expected, invalidateBlocks.numBlocks());
       } finally {
       } finally {
-          namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReRegistration");
       }
       }
     }
     }
   }
   }

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -91,7 +92,7 @@ public class TestHeartbeatHandling {
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
 
 
       try {
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.BM);
         synchronized(hm) {
         synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
             dd.addBlockToBeReplicated(
@@ -136,7 +137,7 @@ public class TestHeartbeatHandling {
           assertEquals(0, cmds.length);
           assertEquals(0, cmds.length);
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
       }
       }
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
@@ -176,7 +177,7 @@ public class TestHeartbeatHandling {
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
 
 
       try {
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.BM);
         synchronized(hm) {
         synchronized(hm) {
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
@@ -255,7 +256,7 @@ public class TestHeartbeatHandling {
           assertEquals(recoveringNodes[2], dd3);
           assertEquals(recoveringNodes[2], dd3);
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
       }
       }
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
@@ -184,7 +185,7 @@ public class TestNameNodePrunesMissingStorages {
         DataNodeTestUtils.triggerBlockReport(dn);
         DataNodeTestUtils.triggerBlockReport(dn);
       }
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       final String storageIdToRemove;
       final String storageIdToRemove;
       String datanodeUuid;
       String datanodeUuid;
       // Find the first storage which this block is in.
       // Find the first storage which this block is in.
@@ -200,7 +201,8 @@ public class TestNameNodePrunesMissingStorages {
         storageIdToRemove = info.getStorageID();
         storageIdToRemove = info.getStorageID();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "testRemovingStorageDoesNotProduceZombies");
       }
       }
       // Find the DataNode which holds that first storage.
       // Find the DataNode which holds that first storage.
       final DataNode datanodeToRemoveStorageFrom;
       final DataNode datanodeToRemoveStorageFrom;
@@ -345,7 +347,7 @@ public class TestNameNodePrunesMissingStorages {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override
         @Override
         public Boolean get() {
         public Boolean get() {
-          cluster.getNamesystem().writeLock();
+          cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
           try {
           try {
             Iterator<DatanodeStorageInfo> storageInfoIter =
             Iterator<DatanodeStorageInfo> storageInfoIter =
                 cluster.getNamesystem().getBlockManager().
                 cluster.getNamesystem().getBlockManager().
@@ -367,7 +369,7 @@ public class TestNameNodePrunesMissingStorages {
             LOG.info("Successfully found " + block.getBlockName() + " in " +
             LOG.info("Successfully found " + block.getBlockName() + " in " +
                 "be in storage id " + newStorageId);
                 "be in storage id " + newStorageId);
           } finally {
           } finally {
-            cluster.getNamesystem().writeUnlock();
+            cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testRenamingStorageIds");
           }
           }
           return true;
           return true;
         }
         }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -174,14 +175,14 @@ public class TestNodeCount {
   /* threadsafe read of the replication counts for this block */
   /* threadsafe read of the replication counts for this block */
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
     BlockManager blockManager = namesystem.getBlockManager();
     BlockManager blockManager = namesystem.getBlockManager();
-    namesystem.readLock();
+    namesystem.readLock(FSNamesystemLockMode.BM);
     try {
     try {
       lastBlock = block;
       lastBlock = block;
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       return lastNum;
       return lastNum;
     }
     }
     finally {
     finally {
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "countNodes");
     }
     }
   }
   }
 }
 }

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -95,7 +96,7 @@ public class TestOverReplicatedBlocks {
       final BlockManager bm = namesystem.getBlockManager();
       final BlockManager bm = namesystem.getBlockManager();
       final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
       final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
       try {
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
         synchronized(hm) {
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
           // so they will be chosen to be deleted when over-replication occurs
@@ -118,7 +119,7 @@ public class TestOverReplicatedBlocks {
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
         }
         }
       } finally {
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "testProcesOverReplicateBlock");
       }
       }
       
       
     } finally {
     } finally {
@@ -181,11 +182,11 @@ public class TestOverReplicatedBlocks {
 
 
       // All replicas for deletion should be scheduled on lastDN.
       // All replicas for deletion should be scheduled on lastDN.
       // And should not actually be deleted, because lastDN does not heartbeat.
       // And should not actually be deleted, because lastDN does not heartbeat.
-      namesystem.readLock();
+      namesystem.readLock(FSNamesystemLockMode.BM);
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       assertEquals("Replicas on node " + lastDNid + " should have been deleted",
       assertEquals("Replicas on node " + lastDNid + " should have been deleted",
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "excessSize4Testing");
       for(BlockLocation location : locs)
       for(BlockLocation location : locs)
         assertEquals("Block should still have 4 replicas",
         assertEquals("Block should still have 4 replicas",
             4, location.getNames().length);
             4, location.getNames().length);

+ 9 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
@@ -288,13 +289,13 @@ public class TestPendingReconstruction {
 
 
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // report is not the same, it should stay in pending.
       // report is not the same, it should stay in pending.
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         // Use a wrong gen stamp.
         // Use a wrong gen stamp.
         blkManager.addBlock(desc[0].getStorageInfos()[0],
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 0), null);
             new Block(1, 1, 0), null);
       } finally {
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
       }
       }
 
 
       // The block should still be pending
       // The block should still be pending
@@ -303,12 +304,12 @@ public class TestPendingReconstruction {
 
 
       // A block report with the correct gen stamp should remove the record
       // A block report with the correct gen stamp should remove the record
       // from the pending queue.
       // from the pending queue.
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         blkManager.addBlock(desc[0].getStorageInfos()[0],
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 1), null);
             new Block(1, 1, 1), null);
       } finally {
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.BM, "testProcessPendingReconstructions");
       }
       }
 
 
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
@@ -459,7 +460,7 @@ public class TestPendingReconstruction {
       // 3. mark a couple of blocks as corrupt
       // 3. mark a couple of blocks as corrupt
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
             "STORAGE_ID", "TEST");
             "STORAGE_ID", "TEST");
@@ -471,7 +472,7 @@ public class TestPendingReconstruction {
         BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
         BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
       } finally {
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testPendingAndInvalidate");
       }
       }
 
 
       // 4. delete the file
       // 4. delete the file
@@ -507,7 +508,7 @@ public class TestPendingReconstruction {
         DATANODE_COUNT).build();
         DATANODE_COUNT).build();
     tmpCluster.waitActive();
     tmpCluster.waitActive();
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
 
 
     try {
     try {
       BlockManager bm = fsn.getBlockManager();
       BlockManager bm = fsn.getBlockManager();
@@ -563,7 +564,7 @@ public class TestPendingReconstruction {
       }, 100, 60000);
       }, 100, 60000);
     } finally {
     } finally {
       tmpCluster.shutdown();
       tmpCluster.shutdown();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReplicationCounter");
     }
     }
   }
   }
 
 

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
@@ -196,11 +197,11 @@ public class TestReconstructStripedBlocksWithRackAwareness {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
     }
     }
 
 
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.processMisReplicatedBlocks();
       bm.processMisReplicatedBlocks();
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReconstructForNotEnoughRacks");
     }
     }
 
 
     // check if redundancy monitor correctly schedule the reconstruction work.
     // check if redundancy monitor correctly schedule the reconstruction work.
@@ -342,12 +343,13 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     final DatanodeAdminManager decomManager =
     final DatanodeAdminManager decomManager =
         (DatanodeAdminManager) Whitebox.getInternalState(
         (DatanodeAdminManager) Whitebox.getInternalState(
             dm, "datanodeAdminManager");
             dm, "datanodeAdminManager");
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       dn9.stopDecommission();
       dn9.stopDecommission();
       decomManager.startDecommission(dn9);
       decomManager.startDecommission(dn9);
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testReconstructionWithDecommission");
     }
     }
 
 
     // make sure the decommission finishes and the block in on 6 racks
     // make sure the decommission finishes and the block in on 6 racks

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized;
@@ -72,7 +73,7 @@ public class TestReplicationPolicyConsiderLoad
    */
    */
   @Test
   @Test
   public void testChooseTargetWithDecomNodes() throws IOException {
   public void testChooseTargetWithDecomNodes() throws IOException {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -124,14 +125,15 @@ public class TestReplicationPolicyConsiderLoad
       dataNodes[0].stopDecommission();
       dataNodes[0].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[2].stopDecommission();
       dataNodes[2].stopDecommission();
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetWithDecomNodes");
     }
     }
     NameNode.LOG.info("Done working on it");
     NameNode.LOG.info("Done working on it");
   }
   }
 
 
   @Test
   @Test
   public void testConsiderLoadFactor() throws IOException {
   public void testConsiderLoadFactor() throws IOException {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
@@ -178,7 +180,7 @@ public class TestReplicationPolicyConsiderLoad
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
       }
       }
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testConsiderLoadFactor");
     }
     }
   }
   }
 }
 }

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
 
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -85,7 +86,7 @@ public class TestReplicationPolicyExcludeSlowNodes
    */
    */
   @Test
   @Test
   public void testChooseTargetExcludeSlowNodes() throws Exception {
   public void testChooseTargetExcludeSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       // add nodes
       // add nodes
       for (int i = 0; i < dataNodes.length; i++) {
       for (int i = 0; i < dataNodes.length; i++) {
@@ -135,14 +136,15 @@ public class TestReplicationPolicyExcludeSlowNodes
             .getDatanodeUuid()));
             .getDatanodeUuid()));
       }
       }
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetExcludeSlowNodes");
     }
     }
     NameNode.LOG.info("Done working on it");
     NameNode.LOG.info("Done working on it");
   }
   }
 
 
   @Test
   @Test
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       // add nodes
       // add nodes
       for (DatanodeDescriptor dataNode : dataNodes) {
       for (DatanodeDescriptor dataNode : dataNodes) {
@@ -172,7 +174,8 @@ public class TestReplicationPolicyExcludeSlowNodes
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testSlowPeerTrackerEnabledClearSlowNodes");
     }
     }
   }
   }
 
 

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -91,7 +92,7 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
    */
    */
   @Test
   @Test
   public void testChooseTargetWithRatioConsiderLoad() {
   public void testChooseTargetWithRatioConsiderLoad() {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       // After heartbeat has been processed, the total load should be 200.
       // After heartbeat has been processed, the total load should be 200.
       // And average load per node should be 40. The max load should be 2 * 40;
       // And average load per node should be 40. The max load should be 2 * 40;
@@ -163,7 +164,8 @@ public class TestReplicationPolicyRatioConsiderLoadWithStorage
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[4]));
       assertTrue(targetSet.contains(dataNodes[4]));
     } finally {
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetWithRatioConsiderLoad");
     }
     }
   }
   }
 }
 }

+ 13 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doAnswer;
@@ -94,13 +95,13 @@ public class NameNodeAdapter {
     // consistent with FSNamesystem#getFileInfo()
     // consistent with FSNamesystem#getFileInfo()
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker.setOperationType(operationName);
-    namenode.getNamesystem().readLock();
+    namenode.getNamesystem().readLock(FSNamesystemLockMode.FS);
     try {
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           needBlockToken);
           needBlockToken);
     } finally {
     } finally {
-      namenode.getNamesystem().readUnlock();
+      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "getFileInfo");
     }
     }
   }
   }
   
   
@@ -213,11 +214,11 @@ public class NameNodeAdapter {
    */
    */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
       DatanodeID id) throws IOException {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
     }
     }
   }
   }
   
   
@@ -241,7 +242,7 @@ public class NameNodeAdapter {
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
       final String src, final DatanodeStorageInfo[] targets)
       final String src, final DatanodeStorageInfo[] targets)
       throws IOException {
       throws IOException {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
@@ -250,17 +251,17 @@ public class NameNodeAdapter {
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
       return file.getLastBlock();
       return file.getLastBlock();
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "addBlockNoJournal");
     }
     }
   }
   }
 
 
   public static void persistBlocks(final FSNamesystem fsn,
   public static void persistBlocks(final FSNamesystem fsn,
       final String src, final INodeFile file) throws IOException {
       final String src, final INodeFile file) throws IOException {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
     try {
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "persistBlocks");
     }
     }
   }
   }
 
 
@@ -272,8 +273,8 @@ public class NameNodeAdapter {
   public static FSNamesystem spyOnNamesystem(NameNode nn) {
   public static FSNamesystem spyOnNamesystem(NameNode nn) {
     FSNamesystem fsnSpy = Mockito.spy(nn.getNamesystem());
     FSNamesystem fsnSpy = Mockito.spy(nn.getNamesystem());
     FSNamesystem fsnOld = nn.namesystem;
     FSNamesystem fsnOld = nn.namesystem;
-    fsnOld.writeLock();
-    fsnSpy.writeLock();
+    fsnOld.writeLock(FSNamesystemLockMode.GLOBAL);
+    fsnSpy.writeLock(FSNamesystemLockMode.GLOBAL);
     nn.namesystem = fsnSpy;
     nn.namesystem = fsnSpy;
     try {
     try {
       FieldUtils.writeDeclaredField(
       FieldUtils.writeDeclaredField(
@@ -291,8 +292,8 @@ public class NameNodeAdapter {
     } catch (IllegalAccessException e) {
     } catch (IllegalAccessException e) {
       throw new RuntimeException("Cannot set spy FSNamesystem", e);
       throw new RuntimeException("Cannot set spy FSNamesystem", e);
     } finally {
     } finally {
-      fsnSpy.writeUnlock();
-      fsnOld.writeUnlock();
+      fsnSpy.writeUnlock(FSNamesystemLockMode.GLOBAL, "spyOnNamesystem");
+      fsnOld.writeUnlock(FSNamesystemLockMode.GLOBAL, "spyOnNamesystem");
     }
     }
     return fsnSpy;
     return fsnSpy;
   }
   }

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.EnumSet;
+
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.After;
 import org.junit.After;
@@ -91,7 +93,7 @@ public class TestAddBlockRetry {
     // start first addBlock()
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.GLOBAL);
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     try {
     try {
@@ -99,7 +101,7 @@ public class TestAddBlockRetry {
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             "clientName", null, onRetryBlock);
                                             "clientName", null, onRetryBlock);
     } finally {
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "validateAddBlock");
     }
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);
         ns.getBlockManager(), src, null, null, null, r);
@@ -117,13 +119,13 @@ public class TestAddBlockRetry {
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
 
 
     // continue first addBlock()
     // continue first addBlock()
-    ns.writeLock();
+    ns.writeLock(FSNamesystemLockMode.GLOBAL);
     LocatedBlock newBlock;
     LocatedBlock newBlock;
     try {
     try {
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
     } finally {
     } finally {
-      ns.writeUnlock();
+      ns.writeUnlock(FSNamesystemLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
     }
     }
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
 
 
@@ -137,11 +139,11 @@ public class TestAddBlockRetry {
 
 
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
     final FSNamesystem ns = cluster.getNamesystem();
     final FSNamesystem ns = cluster.getNamesystem();
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
     } finally {
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "checkFileProgress");
     }
     }
   }
   }
 
 

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -204,12 +205,13 @@ public class TestAddOverReplicatedStripedBlocks {
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
           infos.get(0), storages.get(0), "TEST");
           infos.get(0), storages.get(0), "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testProcessOverReplicatedAndCorruptStripedBlock");
     }
     }
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))
         .corruptReplicas());
         .corruptReplicas());

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -253,11 +254,12 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
 
 
     //test if decommission succeeded
     //test if decommission succeeded
     DatanodeDescriptor dnd3 = dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
     DatanodeDescriptor dnd3 = dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       dm.getDatanodeAdminManager().startDecommission(dnd3);
       dm.getDatanodeAdminManager().startDecommission(dnd3);
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testPlacementWithOnlyOneNodeInRackDecommission");
     }
     }
 
 
     // make sure the decommission finishes and the block in on 4 racks
     // make sure the decommission finishes and the block in on 4 racks

+ 9 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -43,6 +43,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.commons.lang3.time.DateUtils;
 import org.apache.commons.lang3.time.DateUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -761,7 +762,7 @@ public class TestCacheDirectives {
       @Override
       @Override
       public Boolean get() {
       public Boolean get() {
         int numCachedBlocks = 0, numCachedReplicas = 0;
         int numCachedBlocks = 0, numCachedReplicas = 0;
-        namesystem.readLock();
+        namesystem.readLock(FSNamesystemLockMode.BM);
         try {
         try {
           GSet<CachedBlock, CachedBlock> cachedBlocks =
           GSet<CachedBlock, CachedBlock> cachedBlocks =
               cacheManager.getCachedBlocks();
               cacheManager.getCachedBlocks();
@@ -774,7 +775,7 @@ public class TestCacheDirectives {
             }
             }
           }
           }
         } finally {
         } finally {
-          namesystem.readUnlock();
+          namesystem.readUnlock(FSNamesystemLockMode.BM, "checkBlocks");
         }
         }
 
 
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
@@ -1505,7 +1506,7 @@ public class TestCacheDirectives {
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
       throws Exception {
     Thread.sleep(1000);
     Thread.sleep(1000);
-    cluster.getNamesystem().readLock();
+    cluster.getNamesystem().readLock(FSNamesystemLockMode.BM);
     try {
     try {
       final DatanodeManager datanodeManager =
       final DatanodeManager datanodeManager =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -1518,7 +1519,7 @@ public class TestCacheDirectives {
             descriptor.getPendingCached().isEmpty());
             descriptor.getPendingCached().isEmpty());
       }
       }
     } finally {
     } finally {
-      cluster.getNamesystem().readUnlock();
+      cluster.getNamesystem().readUnlock(FSNamesystemLockMode.BM, "checkPendingCachedEmpty");
     }
     }
   }
   }
 
 
@@ -1665,9 +1666,9 @@ public class TestCacheDirectives {
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     GenericTestUtils.waitFor(() -> {
     GenericTestUtils.waitFor(() -> {
       boolean isConsistence = false;
       boolean isConsistence = false;
-      ann.getNamesystem().readLock();
+      ann.getNamesystem().readLock(FSNamesystemLockMode.FS);
       try {
       try {
-        sbn.getNamesystem().readLock();
+        sbn.getNamesystem().readLock(FSNamesystemLockMode.FS);
         try {
         try {
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
               getCacheDirectives().iterator();
               getCacheDirectives().iterator();
@@ -1682,10 +1683,10 @@ public class TestCacheDirectives {
             }
             }
           }
           }
         } finally {
         } finally {
-          sbn.getNamesystem().readUnlock();
+          sbn.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
         }
         }
       } finally {
       } finally {
-        ann.getNamesystem().readUnlock();
+        ann.getNamesystem().readUnlock(FSNamesystemLockMode.FS, "expiryTimeConsistency");
       }
       }
       if (!isConsistence) {
       if (!isConsistence) {
         LOG.info("testEexpiryTimeConsistency:"
         LOG.info("testEexpiryTimeConsistency:"

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
@@ -471,7 +472,7 @@ public class TestDeleteRace {
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
         }
         }
       });
       });
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       open.start();
       open.start();
       openSem.acquire();
       openSem.acquire();
       Thread.yield();
       Thread.yield();
@@ -479,7 +480,7 @@ public class TestDeleteRace {
       rename.start();
       rename.start();
       renameSem.acquire();
       renameSem.acquire();
       Thread.yield();
       Thread.yield();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testOpenRenameRace");
 
 
       // wait open and rename threads finish.
       // wait open and rename threads finish.
       open.join();
       open.join();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
@@ -392,11 +393,11 @@ public class TestDiskspaceQuotaUpdate {
 
 
   private void updateCountForQuota(int i) {
   private void updateCountForQuota(int i) {
     FSNamesystem fsn = cluster.getNamesystem();
     FSNamesystem fsn = cluster.getNamesystem();
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
     try {
       getFSDirectory().updateCountForQuota(i);
       getFSDirectory().updateCountForQuota(i);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "updateCountForQuota");
     }
     }
   }
   }
 
 

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

@@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.AtomicReference;
 
 
 import java.util.function.Supplier;
 import java.util.function.Supplier;
+
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
@@ -523,11 +525,11 @@ public class TestEditLogRace {
         public void run() {
         public void run() {
           try {
           try {
             LOG.info("Starting setOwner");
             LOG.info("Starting setOwner");
-            namesystem.writeLock();
+            namesystem.writeLock(FSNamesystemLockMode.FS);
             try {
             try {
               editLog.logSetOwner("/","test","test");
               editLog.logSetOwner("/","test","test");
             } finally {
             } finally {
-              namesystem.writeUnlock();
+              namesystem.writeUnlock(FSNamesystemLockMode.FS, "testSaveRightBeforeSync");
             }
             }
             sleepingBeforeSync.countDown();
             sleepingBeforeSync.countDown();
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -147,11 +148,11 @@ public class TestFSImageWithSnapshot {
         conf);
         conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     File imageFile = getImageFile(testDir, txid);
     File imageFile = getImageFile(testDir, txid);
-    fsn.readLock();
+    fsn.readLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       saver.save(imageFile, compression);
       saver.save(imageFile, compression);
     } finally {
     } finally {
-      fsn.readUnlock();
+      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "saveFSImage");
     }
     }
     return imageFile;
     return imageFile;
   }
   }
@@ -159,14 +160,14 @@ public class TestFSImageWithSnapshot {
   /** Load the fsimage from a temp file */
   /** Load the fsimage from a temp file */
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     fsn.getFSDirectory().writeLock();
     fsn.getFSDirectory().writeLock();
     try {
     try {
       loader.load(imageFile, false);
       loader.load(imageFile, false);
       fsn.getFSDirectory().updateCountForQuota();
       fsn.getFSDirectory().updateCountForQuota();
     } finally {
     } finally {
       fsn.getFSDirectory().writeUnlock();
       fsn.getFSDirectory().writeUnlock();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSImageFromTempFile");
     }
     }
   }
   }
   
   

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -194,12 +195,12 @@ public class TestFSNamesystem {
   }
   }
 
 
   private void clearNamesystem(FSNamesystem fsn) {
   private void clearNamesystem(FSNamesystem fsn) {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       fsn.clear();
       fsn.clear();
       assertFalse(fsn.isImageLoaded());
       assertFalse(fsn.isImageLoaded());
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "clearNamesystem");
     }
     }
   }
   }
 
 

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -153,7 +154,7 @@ public class TestFSNamesystemMBean {
       cluster.waitActive();
       cluster.waitActive();
 
 
       fsn = cluster.getNameNode().namesystem;
       fsn = cluster.getNameNode().namesystem;
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       Thread.sleep(jmxCachePeriod * 1000);
       Thread.sleep(jmxCachePeriod * 1000);
 
 
       MBeanClient client = new MBeanClient();
       MBeanClient client = new MBeanClient();
@@ -163,8 +164,8 @@ public class TestFSNamesystemMBean {
           "is owned by another thread", client.succeeded);
           "is owned by another thread", client.succeeded);
       client.interrupt();
       client.interrupt();
     } finally {
     } finally {
-      if (fsn != null && fsn.hasWriteLock()) {
-        fsn.writeUnlock();
+      if (fsn != null && fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
+        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testWithFSNamesystemWriteLock");
       }
       }
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java

@@ -36,6 +36,7 @@ import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -1083,7 +1084,7 @@ public class TestFileTruncate {
     INodeFile file = iip.getLastINode().asFile();
     INodeFile file = iip.getLastINode().asFile();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up in-place truncate.
     // Test that prepareFileForTruncate sets up in-place truncate.
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       Block oldBlock = file.getLastBlock();
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1103,7 +1104,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
     }
     }
 
 
     // Re-create file and ensure we are ready to copy on truncate
     // Re-create file and ensure we are ready to copy on truncate
@@ -1117,7 +1118,7 @@ public class TestFileTruncate {
         (BlockInfoContiguous) file.getLastBlock()), is(true));
         (BlockInfoContiguous) file.getLastBlock()), is(true));
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up copy-on-write truncate
     // Test that prepareFileForTruncate sets up copy-on-write truncate
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
     try {
       Block oldBlock = file.getLastBlock();
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1137,7 +1138,7 @@ public class TestFileTruncate {
       fsn.getEditLog().logTruncate(
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
     }
     }
     checkBlockRecovery(srcPath);
     checkBlockRecovery(srcPath);
     fs.deleteSnapshot(parent, "ss0");
     fs.deleteSnapshot(parent, "ss0");

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -105,6 +105,7 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.DFSck;
@@ -1511,11 +1512,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     BlockCollection bc = null;
     try {
     try {
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
       bc = fsn.getBlockCollection(bi);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testBlockIdCKDecommission");
     }
     }
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
@@ -1953,11 +1954,11 @@ public class TestFsck {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     BlockCollection bc = null;
     try {
     try {
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
       bc = fsn.getBlockCollection(bi);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testFsckWithDecommissionedReplicas");
     }
     }
     DatanodeDescriptor dn = bc.getBlocks()[0]
     DatanodeDescriptor dn = bc.getBlocks()[0]
         .getDatanode(0);
         .getDatanode(0);

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
@@ -72,14 +73,15 @@ public class TestGetBlockLocations {
       @Override
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if(!deleted[0]) {
         if(!deleted[0]) {
-          fsn.writeLock();
+          fsn.writeLock(FSNamesystemLockMode.GLOBAL);
           try {
           try {
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  now());
                                  now());
           } finally {
           } finally {
-            fsn.writeUnlock();
+            fsn.writeUnlock(FSNamesystemLockMode.GLOBAL,
+                "testGetBlockLocationsRacingWithDelete");
           }
           }
           deleted[0] = true;
           deleted[0] = true;
         }
         }
@@ -106,14 +108,14 @@ public class TestGetBlockLocations {
       @Override
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if (!renamed[0]) {
         if (!renamed[0]) {
-          fsn.writeLock();
+          fsn.writeLock(FSNamesystemLockMode.FS);
           try {
           try {
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    false);
                                    false);
             renamed[0] = true;
             renamed[0] = true;
           } finally {
           } finally {
-            fsn.writeUnlock();
+            fsn.writeUnlock(FSNamesystemLockMode.FS, "testGetBlockLocationsRacingWithRename");
           }
           }
         }
         }
         invocation.callRealMethod();
         invocation.callRealMethod();
@@ -142,13 +144,13 @@ public class TestGetBlockLocations {
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         DFS_BLOCK_SIZE_DEFAULT);
         DFS_BLOCK_SIZE_DEFAULT);
 
 
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
     try {
       final FSDirectory fsd = fsn.getFSDirectory();
       final FSDirectory fsd = fsn.getFSDirectory();
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       fsd.addINode(iip, file, null);
       fsd.addINode(iip, file, null);
     } finally {
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "setupFileSystem");
     }
     }
     return fsn;
     return fsn;
   }
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -120,11 +121,11 @@ public class TestLargeDirectoryDelete {
           try {
           try {
             int blockcount = getBlockCount();
             int blockcount = getBlockCount();
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
-              mc.getNamesystem().writeLock();
+              mc.getNamesystem().writeLock(FSNamesystemLockMode.GLOBAL);
               try {
               try {
                 lockOps++;
                 lockOps++;
               } finally {
               } finally {
-                mc.getNamesystem().writeUnlock();
+                mc.getNamesystem().writeUnlock(FSNamesystemLockMode.GLOBAL, "runThreads");
               }
               }
               Thread.sleep(1);
               Thread.sleep(1);
             }
             }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -338,7 +339,7 @@ public class TestListOpenFiles {
     FSDirectory dir = fsNamesystem.getFSDirectory();
     FSDirectory dir = fsNamesystem.getFSDirectory();
     List<INode> removedINodes = new ChunkedArrayList<>();
     List<INode> removedINodes = new ChunkedArrayList<>();
     removedINodes.add(dir.getINode(path));
     removedINodes.add(dir.getINode(path));
-    fsNamesystem.writeLock();
+    fsNamesystem.writeLock(FSNamesystemLockMode.FS);
     try {
     try {
       dir.removeFromInodeMap(removedINodes);
       dir.removeFromInodeMap(removedINodes);
       openFileEntryBatchedEntries = nnRpc
       openFileEntryBatchedEntries = nnRpc
@@ -349,7 +350,7 @@ public class TestListOpenFiles {
     } catch (NullPointerException e) {
     } catch (NullPointerException e) {
       Assert.fail("Should not throw NPE when the file is deleted but has lease!");
       Assert.fail("Should not throw NPE when the file is deleted but has lease!");
     } finally {
     } finally {
-      fsNamesystem.writeUnlock();
+      fsNamesystem.writeUnlock(FSNamesystemLockMode.FS, "testListOpenFilesWithDeletedPath");
     }
     }
   }
   }
 }
 }

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 
 
 import java.util.function.Supplier;
 import java.util.function.Supplier;
@@ -95,13 +96,14 @@ public class TestNameNodeMetadataConsistency {
 
 
     // Simulate Namenode forgetting a Block
     // Simulate Namenode forgetting a Block
     cluster.restartNameNode(true);
     cluster.restartNameNode(true);
-    cluster.getNameNode().getNamesystem().writeLock();
+    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
         .getStoredBlock(block.getLocalBlock());
         .getStoredBlock(block.getLocalBlock());
     bInfo.delete();
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock();
+    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        "testGenerationStampInFuture");
 
 
     // we also need to tell block manager that we are in the startup path
     // we also need to tell block manager that we are in the startup path
     BlockManagerTestUtil.setStartupSafeModeForTest(
     BlockManagerTestUtil.setStartupSafeModeForTest(
@@ -145,11 +147,12 @@ public class TestNameNodeMetadataConsistency {
     cluster.restartNameNode(true);
     cluster.restartNameNode(true);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
         ().getStoredBlock(block.getLocalBlock());
         ().getStoredBlock(block.getLocalBlock());
-    cluster.getNameNode().getNamesystem().writeLock();
+    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
     bInfo.delete();
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock();
+    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        "testEnsureGenStampsIsStartupOnly");
 
 
     cluster.restartDataNode(dnProps);
     cluster.restartDataNode(dnProps);
     waitForNumBytes(0);
     waitForNumBytes(0);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 
 
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@@ -338,13 +339,13 @@ public class TestReconstructStripedBlocks {
       boolean reconstructed = false;
       boolean reconstructed = false;
       for (int i = 0; i < 5; i++) {
       for (int i = 0; i < 5; i++) {
         NumberReplicas num = null;
         NumberReplicas num = null;
-        fsn.readLock();
+        fsn.readLock(FSNamesystemLockMode.GLOBAL);
         try {
         try {
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
           num = bm.countNodes(blockInfo);
           num = bm.countNodes(blockInfo);
         } finally {
         } finally {
-          fsn.readUnlock();
+          fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "testCountLiveReplicas");
         }
         }
         if (num.liveReplicas() >= groupSize) {
         if (num.liveReplicas() >= groupSize) {
           reconstructed = true;
           reconstructed = true;

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -196,8 +197,8 @@ public class TestSecurityTokenEditLog {
         @Override
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
         public Void answer(InvocationOnMock invocation) throws Throwable {
           // fsn claims read lock if either read or write locked.
           // fsn claims read lock if either read or write locked.
-          Assert.assertTrue(fsnRef.get().hasReadLock());
-          Assert.assertFalse(fsnRef.get().hasWriteLock());
+          Assert.assertTrue(fsnRef.get().hasReadLock(FSNamesystemLockMode.FS));
+          Assert.assertFalse(fsnRef.get().hasWriteLock(FSNamesystemLockMode.FS));
           return null;
           return null;
         }
         }
       }
       }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java

@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -568,13 +569,13 @@ public class TestDNFencing {
   }
   }
 
 
   private void doMetasave(NameNode nn2) {
   private void doMetasave(NameNode nn2) {
-    nn2.getNamesystem().writeLock();
+    nn2.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       PrintWriter pw = new PrintWriter(System.err);
       PrintWriter pw = new PrintWriter(System.err);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       pw.flush();
       pw.flush();
     } finally {
     } finally {
-      nn2.getNamesystem().writeUnlock();
+      nn2.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "metaSave");
     }
     }
   }
   }
 
 

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -50,6 +50,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
 
@@ -491,12 +492,12 @@ public class TestNameNodeMetrics {
     // Corrupt first replica of the block
     // Corrupt first replica of the block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testCorruptBlock");
     }
     }
 
 
     BlockManagerTestUtil.updateState(bm);
     BlockManagerTestUtil.updateState(bm);
@@ -583,12 +584,12 @@ public class TestNameNodeMetrics {
     assert lbs.get(0) instanceof LocatedStripedBlock;
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
 
 
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testStripedFileCorruptBlocks");
     }
     }
 
 
     BlockManagerTestUtil.updateState(bm);
     BlockManagerTestUtil.updateState(bm);
@@ -681,12 +682,12 @@ public class TestNameNodeMetrics {
     // Corrupt the only replica of the block to result in a missing block
     // Corrupt the only replica of the block to result in a missing block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
           "STORAGE_ID", "TEST");
     } finally {
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testMissingBlock");
     }
     }
     Thread.sleep(1000); // Wait for block to be marked corrupt
     Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
@@ -297,10 +298,10 @@ public class TestINodeFileUnderConstructionWithSnapshot {
       hdfs.delete(foo, true);
       hdfs.delete(foo, true);
       Thread.sleep(1000);
       Thread.sleep(1000);
       try {
       try {
-        fsn.writeLock();
+        fsn.writeLock(FSNamesystemLockMode.GLOBAL);
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
       } finally {
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testLease");
       }
       }
     } finally {
     } finally {
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -783,14 +784,14 @@ public class TestDFSAdmin {
       LocatedStripedBlock bg =
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
           (LocatedStripedBlock)(lbs.get(0));
 
 
-      miniCluster.getNamesystem().writeLock();
+      miniCluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
       try {
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
             "STORAGE_ID", "TEST");
             "STORAGE_ID", "TEST");
         BlockManagerTestUtil.updateState(bm);
         BlockManagerTestUtil.updateState(bm);
       } finally {
       } finally {
-        miniCluster.getNamesystem().writeUnlock();
+        miniCluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "testReportCommand");
       }
       }
       waitForCorruptBlock(miniCluster, client, file);
       waitForCorruptBlock(miniCluster, client, file);
 
 

+ 7 - 6
hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java

@@ -84,6 +84,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 
 
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -1094,26 +1095,26 @@ public class ITestProvidedImplementation {
 
 
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startDecommission");
   }
   }
 
 
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startMaintenance");
   }
   }
 
 
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "stopMaintenance");
   }
   }
 
 
   @Test
   @Test