|
@@ -45,12 +45,12 @@ import java.util.Iterator;
|
|
|
import java.util.LinkedList;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
+import java.util.Map.Entry;
|
|
|
import java.util.NavigableMap;
|
|
|
import java.util.Random;
|
|
|
import java.util.Set;
|
|
|
import java.util.TreeMap;
|
|
|
import java.util.TreeSet;
|
|
|
-import java.util.Map.Entry;
|
|
|
import java.util.concurrent.TimeUnit;
|
|
|
|
|
|
import javax.management.NotCompliantMBeanException;
|
|
@@ -61,6 +61,7 @@ import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
|
import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.ContentSummary;
|
|
|
+import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.fs.permission.FsAction;
|
|
|
import org.apache.hadoop.fs.permission.FsPermission;
|
|
@@ -86,27 +87,26 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
|
|
|
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
|
|
|
+import org.apache.hadoop.hdfs.server.common.Storage;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
|
|
-import org.apache.hadoop.hdfs.server.common.Storage;
|
|
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
|
|
import org.apache.hadoop.io.IOUtils;
|
|
|
import org.apache.hadoop.io.Text;
|
|
|
import org.apache.hadoop.ipc.Server;
|
|
|
-import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|
|
import org.apache.hadoop.metrics2.MetricsBuilder;
|
|
|
import org.apache.hadoop.metrics2.MetricsSource;
|
|
|
import org.apache.hadoop.metrics2.MetricsSystem;
|
|
@@ -121,8 +121,8 @@ import org.apache.hadoop.net.ScriptBasedMapping;
|
|
|
import org.apache.hadoop.security.AccessControlException;
|
|
|
import org.apache.hadoop.security.UserGroupInformation;
|
|
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
|
|
-import org.apache.hadoop.security.token.Token;
|
|
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
|
|
+import org.apache.hadoop.security.token.Token;
|
|
|
import org.apache.hadoop.security.token.delegation.DelegationKey;
|
|
|
import org.apache.hadoop.util.Daemon;
|
|
|
import org.apache.hadoop.util.HostsFileReader;
|
|
@@ -327,6 +327,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
|
|
|
private boolean allowBrokenAppend = false;
|
|
|
// enable durable sync
|
|
|
private boolean durableSync = true;
|
|
|
+ // How many entries are returned by getCorruptInodes()
|
|
|
+ int maxCorruptFilesReturned;
|
|
|
|
|
|
/**
|
|
|
* Last block index used for replication work.
|
|
@@ -527,6 +529,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
|
|
|
conf.getClass("net.topology.impl", NetworkTopology.class,
|
|
|
NetworkTopology.class), conf);
|
|
|
|
|
|
+ this.maxCorruptFilesReturned = conf.getInt(
|
|
|
+ DFSConfigKeys.DFS_MAX_CORRUPT_FILES_RETURNED_KEY,
|
|
|
+ DFSConfigKeys.DFS_MAX_CORRUPT_FILES_RETURNED_DEFAULT);
|
|
|
this.replicator = BlockPlacementPolicy.getInstance(conf, this, clusterMap);
|
|
|
this.defaultReplication = conf.getInt("dfs.replication", 3);
|
|
|
this.maxReplication = conf.getInt("dfs.replication.max", 512);
|
|
@@ -6271,4 +6276,58 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
|
|
|
this.avoidStaleDataNodesForWrite = avoidStaleDataNodesForWrite;
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * Used by {@link FSNamesystem#getCorruptFileBlocks()} and
|
|
|
+ * {@link FSNamesystem#listCorruptFileBlocks()} to represent information about
|
|
|
+ * corrupt file and its corresponding block
|
|
|
+ */
|
|
|
+ static class CorruptFileBlockInfo {
|
|
|
+ String path;
|
|
|
+ Block block;
|
|
|
+
|
|
|
+ public CorruptFileBlockInfo(String p, Block b) {
|
|
|
+ path = p;
|
|
|
+ block = b;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public String toString() {
|
|
|
+ return block.getBlockName() + "\t" + path;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @return a collection of corrupt files with their blocks information, with a
|
|
|
+ * maximum of {@link FSNamesystem#maxCorruptFilesReturned} files
|
|
|
+ * listed in total
|
|
|
+ */
|
|
|
+ private Collection<CorruptFileBlockInfo> getCorruptFileBlocks() {
|
|
|
+ ArrayList<CorruptFileBlockInfo> corruptFiles =
|
|
|
+ new ArrayList<CorruptFileBlockInfo>();
|
|
|
+ for (Block blk : neededReplications.getCorruptQueue()){
|
|
|
+ INode inode = blocksMap.getINode(blk);
|
|
|
+ if (inode != null && countNodes(blk).liveReplicas() == 0) {
|
|
|
+ String filePath = inode.getFullPathName();
|
|
|
+ CorruptFileBlockInfo info = new CorruptFileBlockInfo(filePath, blk);
|
|
|
+ corruptFiles.add(info);
|
|
|
+ if (corruptFiles.size() >= this.maxCorruptFilesReturned) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return corruptFiles;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @return Collection of CorruptFileBlockInfo objects representing files with
|
|
|
+ * corrupted blocks.
|
|
|
+ * @throws AccessControlException
|
|
|
+ * @throws IOException
|
|
|
+ */
|
|
|
+ synchronized Collection<CorruptFileBlockInfo> listCorruptFileBlocks()
|
|
|
+ throws AccessControlException, IOException {
|
|
|
+ checkSuperuserPrivilege();
|
|
|
+ return getCorruptFileBlocks();
|
|
|
+ }
|
|
|
+
|
|
|
}
|