Selaa lähdekoodia

Revert "HDFS-17496. DataNode supports more fine-grained dataset lock based on…" (#7279)

This reverts commit 94d6a77c39452c82ba78cef2cd96f5c8ff4fcfa3.
slfan1989 3 kuukautta sitten
vanhempi
commit
f0ab1e6972

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -1744,10 +1744,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean
       DFS_DATANODE_LOCKMANAGER_TRACE_DEFAULT = false;
 
-  public static final String DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY =
-      "dfs.datanode.dataset.sublock.count";
-  public static final long DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT = 1000L;
-
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java

@@ -29,8 +29,7 @@ public interface DataNodeLockManager<T extends AutoCloseDataSetLock> {
    */
   enum LockLevel {
     BLOCK_POOl,
-    VOLUME,
-    DIR
+    VOLUME
   }
 
   /**

+ 2 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java

@@ -96,13 +96,6 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
             + resources[0] + "volume lock :" + resources[1]);
       }
       return resources[0] + resources[1];
-    } else if (resources.length == 3 && level == LockLevel.DIR) {
-      if (resources[0] == null || resources[1] == null || resources[2] == null) {
-        throw new IllegalArgumentException("acquire a null dataset lock : "
-            + resources[0] + ",volume lock :" + resources[1]
-        + ",subdir lock :" + resources[2]);
-      }
-      return resources[0] + resources[1] + resources[2];
     } else {
       throw new IllegalArgumentException("lock level do not match resource");
     }
@@ -163,7 +156,7 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
   public AutoCloseDataSetLock readLock(LockLevel level, String... resources) {
     if (level == LockLevel.BLOCK_POOl) {
       return getReadLock(level, resources[0]);
-    } else if (level == LockLevel.VOLUME){
+    } else {
       AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
       AutoCloseDataSetLock volLock = getReadLock(level, resources);
       volLock.setParentLock(bpLock);
@@ -172,17 +165,6 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
             resources[0]);
       }
       return volLock;
-    } else {
-      AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
-      AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
-      volLock.setParentLock(bpLock);
-      AutoCloseDataSetLock dirLock = getReadLock(level, resources);
-      dirLock.setParentLock(volLock);
-      if (openLockTrace) {
-        LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
-            resources[0] + resources[1]);
-      }
-      return dirLock;
     }
   }
 
@@ -190,7 +172,7 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
   public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) {
     if (level == LockLevel.BLOCK_POOl) {
       return getWriteLock(level, resources[0]);
-    } else if (level == LockLevel.VOLUME) {
+    } else {
       AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
       AutoCloseDataSetLock volLock = getWriteLock(level, resources);
       volLock.setParentLock(bpLock);
@@ -199,17 +181,6 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
             resources[0]);
       }
       return volLock;
-    } else {
-      AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
-      AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
-      volLock.setParentLock(bpLock);
-      AutoCloseDataSetLock dirLock = getWriteLock(level, resources);
-      dirLock.setParentLock(volLock);
-      if (openLockTrace) {
-        LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
-            resources[0] + resources[1]);
-      }
-      return dirLock;
     }
   }
 
@@ -264,13 +235,8 @@ public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetL
     String lockName = generateLockName(level, resources);
     if (level == LockLevel.BLOCK_POOl) {
       lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
-    } else if (level == LockLevel.VOLUME) {
-      lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
-      lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
     } else {
       lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
-      lockMap.addLock(generateLockName(LockLevel.VOLUME, resources[0], resources[1]),
-          new ReentrantReadWriteLock(isFair));
       lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
     }
   }

+ 0 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetSubLockStrategy.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.util.List;
-
-/**
- * This interface is used to generate sub lock name for a blockid.
- */
-public interface DataSetSubLockStrategy {
-
-  /**
-   * Generate sub lock name for the given blockid.
-   * @param blockid the block id.
-   * @return sub lock name for the input blockid.
-   */
-  String blockIdToSubLock(long blockid);
-
-  List<String> getAllSubLockName();
-}

+ 0 - 53
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ModDataSetSubLockStrategy.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class ModDataSetSubLockStrategy implements DataSetSubLockStrategy {
-  public static final Logger LOG = LoggerFactory.getLogger(DataSetSubLockStrategy.class);
-
-  private static final String LOCK_NAME_PERFIX = "SubLock";
-  private long modFactor;
-
-  public ModDataSetSubLockStrategy(long mod) {
-    if (mod <= 0) {
-      mod = 1L;
-    }
-    this.modFactor = mod;
-  }
-
-  @Override
-  public String blockIdToSubLock(long blockid) {
-    return LOCK_NAME_PERFIX + (blockid % modFactor);
-  }
-
-  @Override
-  public List<String> getAllSubLockName() {
-    List<String> res = new ArrayList<>();
-    for (long i = 0L; i < modFactor; i++) {
-      res.add(LOCK_NAME_PERFIX + i);
-    }
-    return res;
-  }
-}

+ 38 - 83
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -65,11 +65,9 @@ import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
 import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
-import org.apache.hadoop.hdfs.server.datanode.DataSetSubLockStrategy;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
-import org.apache.hadoop.hdfs.server.datanode.ModDataSetSubLockStrategy;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -200,9 +198,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public Block getStoredBlock(String bpid, long blkid)
       throws IOException {
-    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR,
-        bpid, getReplicaInfo(bpid, blkid).getStorageUuid(),
-        datasetSubLockStrategy.blockIdToSubLock(blkid))) {
+    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+        bpid)) {
       ReplicaInfo r = volumeMap.get(bpid, blkid);
       if (r == null) {
         return null;
@@ -291,9 +288,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   private long lastDirScannerNotifyTime;
   private volatile long lastDirScannerFinishTime;
 
-  private final DataSetSubLockStrategy datasetSubLockStrategy;
-  private final long datasetSubLockCount;
-
   /**
    * An FSDataset has a directory where it loads its data files.
    */
@@ -398,9 +392,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_KEY,
         DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_DEFAULT);
     lastDirScannerNotifyTime = System.currentTimeMillis();
-    datasetSubLockCount = conf.getLong(DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY,
-        DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT);
-    this.datasetSubLockStrategy = new ModDataSetSubLockStrategy(datasetSubLockCount);
   }
 
   /**
@@ -439,12 +430,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       FsVolumeReference ref) throws IOException {
     for (String bp : volumeMap.getBlockPoolList()) {
       lockManager.addLock(LockLevel.VOLUME, bp, ref.getVolume().getStorageID());
-      List<String> allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName();
-      for (String dir : allSubDirNameForDataSetLock) {
-        lockManager.addLock(LockLevel.DIR, bp, ref.getVolume().getStorageID(), dir);
-        LOG.info("Added DIR lock for bpid:{}, volume storageid:{}, dir:{}",
-            bp, ref.getVolume().getStorageID(), dir);
-      }
     }
     DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
     if (dnStorage != null) {
@@ -644,12 +629,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       for (String storageUuid : storageToRemove) {
         storageMap.remove(storageUuid);
         for (String bp : volumeMap.getBlockPoolList()) {
-          List<String> allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName();
-          for (String dir : allSubDirNameForDataSetLock) {
-            lockManager.removeLock(LockLevel.DIR, bp, storageUuid, dir);
-            LOG.info("Removed DIR lock for bpid:{}, volume storageid:{}, dir:{}",
-                bp, storageUuid, dir);
-          }
           lockManager.removeLock(LockLevel.VOLUME, bp, storageUuid);
         }
       }
@@ -840,9 +819,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       long seekOffset) throws IOException {
 
     ReplicaInfo info;
-    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+        b.getBlockPoolId())) {
       info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
     }
 
@@ -936,9 +914,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
       long blkOffset, long metaOffset) throws IOException {
-    try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       ReplicaInfo info = getReplicaInfo(b);
       FsVolumeReference ref = info.getVolume().obtainReference();
       try {
@@ -1403,9 +1380,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override  // FsDatasetSpi
   public ReplicaHandler append(ExtendedBlock b,
       long newGS, long expectedBlockLen) throws IOException {
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       // If the block was successfully finalized because all packets
       // were successfully processed at the Datanode but the ack for
       // some of the packets were not received by the client. The client
@@ -1457,9 +1433,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   private ReplicaInPipeline append(String bpid,
       ReplicaInfo replicaInfo, long newGS, long estimateBlockLen)
       throws IOException {
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        bpid, replicaInfo.getStorageUuid(),
-        datasetSubLockStrategy.blockIdToSubLock(replicaInfo.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        bpid, replicaInfo.getStorageUuid())) {
       // If the block is cached, start uncaching it.
       if (replicaInfo.getState() != ReplicaState.FINALIZED) {
         throw new IOException("Only a Finalized replica can be appended to; "
@@ -1555,9 +1530,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     while (true) {
       try {
-        try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-            b.getBlockPoolId(), getStorageUuidForLock(b),
-            datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+        try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+            b.getBlockPoolId())) {
           ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
           FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
           ReplicaInPipeline replica;
@@ -1590,9 +1564,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         b, newGS, expectedBlockLen);
     while (true) {
       try {
-        try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-            b.getBlockPoolId(), getStorageUuidForLock(b),
-            datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+        try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+            b.getBlockPoolId(), getStorageUuidForLock(b))) {
           // check replica's state
           ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
           // bump the replica's GS
@@ -1677,9 +1650,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       }
 
       ReplicaInPipeline newReplicaInfo;
-      try (AutoCloseableLock l = lockManager.writeLock(LockLevel.DIR,
-          b.getBlockPoolId(), v.getStorageID(),
-          datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+      try (AutoCloseableLock l = lockManager.writeLock(LockLevel.VOLUME,
+          b.getBlockPoolId(), v.getStorageID())) {
         newReplicaInfo = v.createRbw(b);
         if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
           throw new IOException("CreateRBW returned a replica of state "
@@ -1709,9 +1681,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     try {
       while (true) {
         try {
-          try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-              b.getBlockPoolId(), getStorageUuidForLock(b),
-              datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+          try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+              b.getBlockPoolId(), getStorageUuidForLock(b))) {
             ReplicaInfo replicaInfo =
                 getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
             // check the replica's state
@@ -1742,9 +1713,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw,
       ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
       throws IOException {
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       // check generation stamp
       long replicaGenerationStamp = rbw.getGenerationStamp();
       if (replicaGenerationStamp < b.getGenerationStamp() ||
@@ -1805,9 +1775,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   public ReplicaInPipeline convertTemporaryToRbw(
       final ExtendedBlock b) throws IOException {
     long startTimeMs = Time.monotonicNow();
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       final long blockId = b.getBlockId();
       final long expectedGs = b.getGenerationStamp();
       final long visible = b.getNumBytes();
@@ -1946,9 +1915,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         .getNumBytes());
     FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
     ReplicaInPipeline newReplicaInfo;
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), v.getStorageID(),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), v.getStorageID())) {
       try {
         newReplicaInfo = v.createTemporary(b);
         LOG.debug("creating temporary for block: {} on volume: {}",
@@ -2005,9 +1973,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     ReplicaInfo replicaInfo = null;
     ReplicaInfo finalizedReplicaInfo = null;
     long startTimeMs = Time.monotonicNow();
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       if (Thread.interrupted()) {
         // Don't allow data modifications from interrupted threads
         throw new IOException("Cannot finalize block: " + b + " from Interrupted Thread");
@@ -2043,9 +2010,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
   private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo)
       throws IOException {
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        bpid, replicaInfo.getStorageUuid(),
-        datasetSubLockStrategy.blockIdToSubLock(replicaInfo.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        bpid, replicaInfo.getStorageUuid())) {
       // Compare generation stamp of old and new replica before finalizing
       if (volumeMap.get(bpid, replicaInfo.getBlockId()).getGenerationStamp()
           > replicaInfo.getGenerationStamp()) {
@@ -2094,9 +2060,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public void unfinalizeBlock(ExtendedBlock b) throws IOException {
     long startTimeMs = Time.monotonicNow();
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR,
-        b.getBlockPoolId(), getStorageUuidForLock(b),
-        datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
+        b.getBlockPoolId(), getStorageUuidForLock(b))) {
       ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
           b.getLocalBlock());
       if (replicaInfo != null &&
@@ -2494,8 +2459,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     final String bpid = block.getBlockPoolId();
     final Block localBlock = block.getLocalBlock();
     final long blockId = localBlock.getBlockId();
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid, volume.getStorageID(),
-        datasetSubLockStrategy.blockIdToSubLock(blockId))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
       final ReplicaInfo info = volumeMap.get(bpid, localBlock);
       if (info == null) {
         ReplicaInfo infoByBlockId = volumeMap.get(bpid, blockId);
@@ -2584,8 +2548,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           bpid + ": ReplicaInfo not found.");
       return;
     }
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid,
-        info.getStorageUuid(), datasetSubLockStrategy.blockIdToSubLock(blockId))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, bpid,
+        info.getStorageUuid())) {
       boolean success = false;
       try {
         info = volumeMap.get(bpid, blockId);
@@ -2782,8 +2746,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       lastDirScannerNotifyTime = startTimeMs;
     }
     String storageUuid = vol.getStorageID();
-    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid,
-        vol.getStorageID(), datasetSubLockStrategy.blockIdToSubLock(blockId))) {
+    try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, bpid, storageUuid)) {
       if (!storageMap.containsKey(storageUuid)) {
         // Storage was already removed
         return;
@@ -3268,9 +3231,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public long getReplicaVisibleLength(final ExtendedBlock block)
   throws IOException {
-    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR,
-        block.getBlockPoolId(), getStorageUuidForLock(block),
-        datasetSubLockStrategy.blockIdToSubLock(block.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+        block.getBlockPoolId())) {
       final Replica replica = getReplicaInfo(block.getBlockPoolId(),
           block.getBlockId());
       if (replica.getGenerationStamp() < block.getGenerationStamp()) {
@@ -3297,12 +3259,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       Set<String> vols = storageMap.keySet();
       for (String v : vols) {
         lockManager.addLock(LockLevel.VOLUME, bpid, v);
-        List<String> allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName();
-        for (String dir : allSubDirNameForDataSetLock) {
-          lockManager.addLock(LockLevel.DIR, bpid, v, dir);
-          LOG.info("Added DIR lock for bpid:{}, volume storageid:{}, dir:{}",
-              bpid, v, dir);
-        }
       }
     }
     try {
@@ -3430,9 +3386,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
       throws IOException {
-    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR,
-        block.getBlockPoolId(), getStorageUuidForLock(block),
-        datasetSubLockStrategy.blockIdToSubLock(block.getBlockId()))) {
+    try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+        block.getBlockPoolId())) {
       final Replica replica = volumeMap.get(block.getBlockPoolId(),
           block.getBlockId());
       if (replica == null) {

+ 0 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -6568,15 +6568,6 @@
       problem. In produce default set false, because it's have little performance loss.
     </description>
   </property>
-
-  <property>
-    <name>dfs.datanode.dataset.sublock.count</name>
-    <value>1000</value>
-    <description>
-      The dataset readwrite lock counts for a volume.
-    </description>
-  </property>
-
   <property>
     <name>dfs.client.fsck.connect.timeout</name>
     <value>60000ms</value>

+ 0 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java

@@ -37,7 +37,6 @@ public class TestDataSetLockManager {
   public void testBaseFunc() {
     manager.addLock(LockLevel.BLOCK_POOl, "BPtest");
     manager.addLock(LockLevel.VOLUME, "BPtest", "Volumetest");
-    manager.addLock(LockLevel.DIR, "BPtest", "Volumetest", "SubDirtest");
 
     AutoCloseDataSetLock lock = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest");
     AutoCloseDataSetLock lock1 = manager.readLock(LockLevel.BLOCK_POOl, "BPtest");
@@ -63,16 +62,6 @@ public class TestDataSetLockManager {
     manager.lockLeakCheck();
     assertNull(manager.getLastException());
 
-    AutoCloseDataSetLock lock6 = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest");
-    AutoCloseDataSetLock lock7 = manager.readLock(LockLevel.VOLUME, "BPtest", "Volumetest");
-    AutoCloseDataSetLock lock8 = manager.readLock(LockLevel.DIR,
-        "BPtest", "Volumetest", "SubDirtest");
-    lock8.close();
-    lock7.close();
-    lock6.close();
-    manager.lockLeakCheck();
-    assertNull(manager.getLastException());
-
     manager.writeLock(LockLevel.VOLUME, "BPtest", "Volumetest");
     manager.lockLeakCheck();
 

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

@@ -1946,12 +1946,7 @@ public class TestFsDatasetImpl {
       assertFalse(uuids.contains(dn.getDatanodeUuid()));
 
       // This replica has deleted from datanode memory.
-      try {
-        Block storedBlock = ds.getStoredBlock(bpid, extendedBlock.getBlockId());
-        assertNull(storedBlock);
-      } catch (Exception e) {
-        GenericTestUtils.assertExceptionContains("ReplicaNotFoundException", e);
-      }
+      assertNull(ds.getStoredBlock(bpid, extendedBlock.getBlockId()));
     } finally {
       cluster.shutdown();
       DataNodeFaultInjector.set(oldInjector);

+ 3 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
@@ -597,13 +596,9 @@ public class TestDNFencing {
       throws IOException {
     int count = 0;
     for (DataNode dn : cluster.getDataNodes()) {
-      try {
-        if (DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
-            block.getBlockPoolId(), block.getBlockId()) != null) {
-          count++;
-        }
-      } catch (ReplicaNotFoundException e) {
-        continue;
+      if (DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
+          block.getBlockPoolId(), block.getBlockId()) != null) {
+        count++;
       }
     }
     return count;