|
@@ -19,11 +19,66 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|
|
|
|
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
|
|
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
|
|
|
+import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
|
|
|
|
-import com.google.common.annotations.VisibleForTesting;
|
|
|
-import com.google.common.base.Joiner;
|
|
|
-import com.google.common.base.Preconditions;
|
|
|
-import com.google.protobuf.BlockingService;
|
|
|
+import java.io.BufferedOutputStream;
|
|
|
+import java.io.ByteArrayInputStream;
|
|
|
+import java.io.DataInputStream;
|
|
|
+import java.io.DataOutputStream;
|
|
|
+import java.io.FileInputStream;
|
|
|
+import java.io.IOException;
|
|
|
+import java.io.InputStream;
|
|
|
+import java.io.OutputStream;
|
|
|
+import java.io.PrintStream;
|
|
|
+import java.lang.management.ManagementFactory;
|
|
|
+import java.net.InetSocketAddress;
|
|
|
+import java.net.Socket;
|
|
|
+import java.net.URI;
|
|
|
+import java.net.UnknownHostException;
|
|
|
+import java.nio.channels.SocketChannel;
|
|
|
+import java.security.PrivilegedExceptionAction;
|
|
|
+import java.util.ArrayList;
|
|
|
+import java.util.Arrays;
|
|
|
+import java.util.Collection;
|
|
|
+import java.util.EnumSet;
|
|
|
+import java.util.HashMap;
|
|
|
+import java.util.List;
|
|
|
+import java.util.Map;
|
|
|
+import java.util.Set;
|
|
|
+import java.util.UUID;
|
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
|
+
|
|
|
+import javax.management.ObjectName;
|
|
|
|
|
|
import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
@@ -39,10 +94,23 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
|
import org.apache.hadoop.hdfs.DFSUtil;
|
|
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
+import org.apache.hadoop.hdfs.StorageType;
|
|
|
import org.apache.hadoop.hdfs.net.DomainPeerServer;
|
|
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
|
|
-import org.apache.hadoop.hdfs.protocol.*;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.*;
|
|
|
+import org.apache.hadoop.hdfs.protocol.Block;
|
|
|
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
|
|
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|
|
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
|
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
|
+import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
|
|
|
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
|
+import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
|
|
|
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
|
+import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
|
|
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
|
|
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
|
|
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
|
|
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
|
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
|
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
|
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
|
|
@@ -50,9 +118,20 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Client
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.*;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.*;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
|
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
|
@@ -65,7 +144,11 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.*;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
|
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
|
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
|
|
import org.apache.hadoop.http.HttpConfig;
|
|
@@ -88,22 +171,21 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
|
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
|
|
import org.apache.hadoop.security.token.Token;
|
|
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
|
|
-import org.apache.hadoop.util.*;
|
|
|
+import org.apache.hadoop.util.Daemon;
|
|
|
+import org.apache.hadoop.util.DiskChecker;
|
|
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|
|
+import org.apache.hadoop.util.GenericOptionsParser;
|
|
|
+import org.apache.hadoop.util.JvmPauseMonitor;
|
|
|
+import org.apache.hadoop.util.ServicePlugin;
|
|
|
+import org.apache.hadoop.util.StringUtils;
|
|
|
+import org.apache.hadoop.util.Time;
|
|
|
+import org.apache.hadoop.util.VersionInfo;
|
|
|
import org.mortbay.util.ajax.JSON;
|
|
|
|
|
|
-import javax.management.ObjectName;
|
|
|
-
|
|
|
-import java.io.*;
|
|
|
-import java.lang.management.ManagementFactory;
|
|
|
-import java.net.*;
|
|
|
-import java.nio.channels.SocketChannel;
|
|
|
-import java.security.PrivilegedExceptionAction;
|
|
|
-import java.util.*;
|
|
|
-import java.util.concurrent.atomic.AtomicInteger;
|
|
|
-
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
|
|
-import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
|
+import com.google.common.annotations.VisibleForTesting;
|
|
|
+import com.google.common.base.Joiner;
|
|
|
+import com.google.common.base.Preconditions;
|
|
|
+import com.google.protobuf.BlockingService;
|
|
|
|
|
|
/**********************************************************
|
|
|
* DataNode is a class (and program) that stores a set of
|
|
@@ -1475,8 +1557,8 @@ public class DataNode extends Configured
|
|
|
return xmitsInProgress.get();
|
|
|
}
|
|
|
|
|
|
- private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[])
|
|
|
- throws IOException {
|
|
|
+ private void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets,
|
|
|
+ StorageType[] xferTargetStorageTypes) throws IOException {
|
|
|
BPOfferService bpos = getBPOSForBlock(block);
|
|
|
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
|
|
|
|
|
@@ -1512,16 +1594,17 @@ public class DataNode extends Configured
|
|
|
LOG.info(bpReg + " Starting thread to transfer " +
|
|
|
block + " to " + xfersBuilder);
|
|
|
|
|
|
- new Daemon(new DataTransfer(xferTargets, block,
|
|
|
+ new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block,
|
|
|
BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start();
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void transferBlocks(String poolId, Block blocks[],
|
|
|
- DatanodeInfo xferTargets[][]) {
|
|
|
+ DatanodeInfo xferTargets[][], StorageType[][] xferTargetStorageTypes) {
|
|
|
for (int i = 0; i < blocks.length; i++) {
|
|
|
try {
|
|
|
- transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i]);
|
|
|
+ transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
|
|
|
+ xferTargetStorageTypes[i]);
|
|
|
} catch (IOException ie) {
|
|
|
LOG.warn("Failed to transfer block " + blocks[i], ie);
|
|
|
}
|
|
@@ -1624,6 +1707,7 @@ public class DataNode extends Configured
|
|
|
*/
|
|
|
private class DataTransfer implements Runnable {
|
|
|
final DatanodeInfo[] targets;
|
|
|
+ final StorageType[] targetStorageTypes;
|
|
|
final ExtendedBlock b;
|
|
|
final BlockConstructionStage stage;
|
|
|
final private DatanodeRegistration bpReg;
|
|
@@ -1634,7 +1718,8 @@ public class DataNode extends Configured
|
|
|
* Connect to the first item in the target list. Pass along the
|
|
|
* entire target list, the block, and the data.
|
|
|
*/
|
|
|
- DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage,
|
|
|
+ DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
|
|
|
+ ExtendedBlock b, BlockConstructionStage stage,
|
|
|
final String clientname) {
|
|
|
if (DataTransferProtocol.LOG.isDebugEnabled()) {
|
|
|
DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
|
|
@@ -1644,6 +1729,7 @@ public class DataNode extends Configured
|
|
|
+ ", targests=" + Arrays.asList(targets));
|
|
|
}
|
|
|
this.targets = targets;
|
|
|
+ this.targetStorageTypes = targetStorageTypes;
|
|
|
this.b = b;
|
|
|
this.stage = stage;
|
|
|
BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
|
|
@@ -1702,7 +1788,8 @@ public class DataNode extends Configured
|
|
|
false, false, true, DataNode.this, null, cachingStrategy);
|
|
|
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
|
|
|
|
|
- new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode,
|
|
|
+ new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
|
|
|
+ clientname, targets, targetStorageTypes, srcNode,
|
|
|
stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy);
|
|
|
|
|
|
// send data & checksum
|
|
@@ -2403,7 +2490,8 @@ public class DataNode extends Configured
|
|
|
* @param client client name
|
|
|
*/
|
|
|
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
|
|
|
- final DatanodeInfo[] targets, final String client) throws IOException {
|
|
|
+ final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
|
|
|
+ final String client) throws IOException {
|
|
|
final long storedGS;
|
|
|
final long visible;
|
|
|
final BlockConstructionStage stage;
|
|
@@ -2436,7 +2524,7 @@ public class DataNode extends Configured
|
|
|
b.setNumBytes(visible);
|
|
|
|
|
|
if (targets.length > 0) {
|
|
|
- new DataTransfer(targets, b, stage, client).run();
|
|
|
+ new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
|
|
|
}
|
|
|
}
|
|
|
|