|
@@ -18,66 +18,10 @@
|
|
|
package org.apache.hadoop.hdfs.server.datanode;
|
|
|
|
|
|
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
|
|
-import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
|
-
|
|
|
-import java.io.BufferedOutputStream;
|
|
|
-import java.io.ByteArrayInputStream;
|
|
|
-import java.io.DataInputStream;
|
|
|
-import java.io.DataOutputStream;
|
|
|
-import java.io.File;
|
|
|
-import java.io.FileInputStream;
|
|
|
-import java.io.IOException;
|
|
|
-import java.io.InputStream;
|
|
|
-import java.io.OutputStream;
|
|
|
-import java.io.PrintStream;
|
|
|
-import java.net.InetSocketAddress;
|
|
|
-import java.net.Socket;
|
|
|
-import java.net.SocketException;
|
|
|
-import java.net.SocketTimeoutException;
|
|
|
-import java.net.URI;
|
|
|
-import java.net.UnknownHostException;
|
|
|
-import java.nio.channels.ClosedByInterruptException;
|
|
|
-import java.nio.channels.SocketChannel;
|
|
|
-import java.security.PrivilegedExceptionAction;
|
|
|
-import java.util.AbstractList;
|
|
|
-import java.util.ArrayList;
|
|
|
-import java.util.Arrays;
|
|
|
-import java.util.Collection;
|
|
|
-import java.util.EnumSet;
|
|
|
-import java.util.HashMap;
|
|
|
-import java.util.List;
|
|
|
-import java.util.Map;
|
|
|
-import java.util.Set;
|
|
|
-import java.util.concurrent.atomic.AtomicInteger;
|
|
|
-
|
|
|
+import com.google.common.annotations.VisibleForTesting;
|
|
|
+import com.google.common.base.Joiner;
|
|
|
+import com.google.common.base.Preconditions;
|
|
|
+import com.google.protobuf.BlockingService;
|
|
|
import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
|
import org.apache.hadoop.classification.InterfaceAudience;
|
|
@@ -95,38 +39,15 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
import org.apache.hadoop.hdfs.net.DomainPeerServer;
|
|
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
|
|
-import org.apache.hadoop.hdfs.protocol.Block;
|
|
|
-import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
|
|
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|
|
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
|
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
|
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
|
-import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
|
|
|
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
|
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
|
-import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
|
|
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
|
|
+import org.apache.hadoop.hdfs.protocol.*;
|
|
|
+import org.apache.hadoop.hdfs.protocol.datatransfer.*;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
|
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
|
|
|
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
|
|
+import org.apache.hadoop.hdfs.protocolPB.*;
|
|
|
+import org.apache.hadoop.hdfs.security.token.block.*;
|
|
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
|
|
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|
@@ -141,11 +62,7 @@ import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMetho
|
|
|
import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.*;
|
|
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
|
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
|
|
import org.apache.hadoop.http.HttpServer;
|
|
@@ -166,23 +83,21 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
|
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
|
|
import org.apache.hadoop.security.token.Token;
|
|
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
|
|
-import org.apache.hadoop.util.Daemon;
|
|
|
-import org.apache.hadoop.util.DiskChecker;
|
|
|
+import org.apache.hadoop.util.*;
|
|
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|
|
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
|
|
-import org.apache.hadoop.util.GenericOptionsParser;
|
|
|
-import org.apache.hadoop.util.ServicePlugin;
|
|
|
-import org.apache.hadoop.util.StringUtils;
|
|
|
-import org.apache.hadoop.util.Time;
|
|
|
-import org.apache.hadoop.util.VersionInfo;
|
|
|
import org.mortbay.util.ajax.JSON;
|
|
|
|
|
|
-import com.google.common.annotations.VisibleForTesting;
|
|
|
-import com.google.common.base.Joiner;
|
|
|
-import com.google.common.base.Preconditions;
|
|
|
-import com.google.common.collect.Sets;
|
|
|
-import com.google.common.annotations.VisibleForTesting;
|
|
|
-import com.google.protobuf.BlockingService;
|
|
|
+import java.io.*;
|
|
|
+import java.net.*;
|
|
|
+import java.nio.channels.ClosedByInterruptException;
|
|
|
+import java.nio.channels.SocketChannel;
|
|
|
+import java.security.PrivilegedExceptionAction;
|
|
|
+import java.util.*;
|
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
|
+
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
|
|
+import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
|
|
|
|
/**********************************************************
|
|
|
* DataNode is a class (and program) that stores a set of
|
|
@@ -264,6 +179,7 @@ public class DataNode extends Configured
|
|
|
private volatile boolean heartbeatsDisabledForTests = false;
|
|
|
private DataStorage storage = null;
|
|
|
private HttpServer infoServer = null;
|
|
|
+ private int infoSecurePort;
|
|
|
DataNodeMetrics metrics;
|
|
|
private InetSocketAddress streamingAddr;
|
|
|
|
|
@@ -392,6 +308,7 @@ public class DataNode extends Configured
|
|
|
: new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
|
|
|
conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
|
|
|
secureResources.getListener());
|
|
|
+
|
|
|
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
|
|
|
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
|
|
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
|
@@ -405,6 +322,7 @@ public class DataNode extends Configured
|
|
|
if(LOG.isDebugEnabled()) {
|
|
|
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
|
|
|
}
|
|
|
+ infoSecurePort = secInfoSocAddr.getPort();
|
|
|
}
|
|
|
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
|
|
|
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
|
|
@@ -771,7 +689,8 @@ public class DataNode extends Configured
|
|
|
}
|
|
|
DatanodeID dnId = new DatanodeID(
|
|
|
streamingAddr.getAddress().getHostAddress(), hostName,
|
|
|
- getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
|
|
|
+ getStorageId(), getXferPort(), getInfoPort(),
|
|
|
+ infoSecurePort, getIpcPort());
|
|
|
return new DatanodeRegistration(dnId, storageInfo,
|
|
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
|
|
}
|
|
@@ -869,7 +788,7 @@ public class DataNode extends Configured
|
|
|
* If this is the first block pool to register, this also initializes
|
|
|
* the datanode-scoped storage.
|
|
|
*
|
|
|
- * @param nsInfo the handshake response from the NN.
|
|
|
+ * @param bpos Block pool offer service
|
|
|
* @throws IOException if the NN is inconsistent with the local storage.
|
|
|
*/
|
|
|
void initBlockPool(BPOfferService bpos) throws IOException {
|
|
@@ -2323,6 +2242,13 @@ public class DataNode extends Configured
|
|
|
return infoServer.getPort();
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * @return the datanode's https port
|
|
|
+ */
|
|
|
+ public int getInfoSecurePort() {
|
|
|
+ return infoSecurePort;
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Returned information is a JSON representation of a map with
|
|
|
* name node host name as the key and block pool Id as the value.
|