Forráskód Böngészése

commit 862b8fcb187a1d204644fc3a45cf9b2edfe31863
Author: Jitendra Nath Pandey <jitendra@yahoo-inc.com>
Date: Fri Dec 25 13:56:07 2009 -0800

HDFS-764 and HADOOP-6367 from https://issues.apache.org/jira/secure/attachment/12428959/HADOOP-6367_HDFS-764-0_20.1.patch
Combined patch for two jiras.

+++ b/YAHOO-CHANGES.txt
+ HADOOP-6367, HDFS-764. Moving Access Token implementation from Common to
+ HDFS. These two jiras must be committed together otherwise build will
+ fail. (Jitendra Nath Pandey)
+


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077093 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 éve
szülő
commit
bc47366936
26 módosított fájl, 130 hozzáadás és 125 törlés
  1. 4 3
      src/hdfs/hdfs-default.xml
  2. 9 9
      src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
  3. 4 4
      src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
  4. 30 26
      src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java
  5. 6 6
      src/hdfs/org/apache/hadoop/hdfs/security/BlockAccessKey.java
  6. 7 7
      src/hdfs/org/apache/hadoop/hdfs/security/BlockAccessToken.java
  7. 9 9
      src/hdfs/org/apache/hadoop/hdfs/security/ExportedAccessKeys.java
  8. 1 1
      src/hdfs/org/apache/hadoop/hdfs/security/InvalidAccessTokenException.java
  9. 4 4
      src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  10. 4 4
      src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  11. 7 7
      src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  12. 2 2
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  13. 3 2
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
  14. 1 2
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  15. 1 1
      src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
  16. 1 1
      src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
  17. 1 1
      src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
  18. 2 2
      src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
  19. 10 10
      src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  20. 2 2
      src/test/org/apache/hadoop/hdfs/security/SecurityTestUtil.java
  21. 6 6
      src/test/org/apache/hadoop/hdfs/security/TestAccessToken.java
  22. 2 2
      src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
  23. 2 2
      src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  24. 7 7
      src/test/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
  25. 3 3
      src/webapps/datanode/browseBlock.jsp
  26. 2 2
      src/webapps/datanode/tail.jsp

+ 4 - 3
src/hdfs/hdfs-default.xml

@@ -190,7 +190,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.access.token.enable</name>
+  <name>dfs.block.access.token.enable</name>
   <value>false</value>
   <description>
     If "true", access tokens are used as capabilities for accessing datanodes.
@@ -199,7 +199,7 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.access.key.update.interval</name>
+  <name>dfs.block.access.key.update.interval</name>
   <value>600</value>
   <description>
     Interval in minutes at which namenode updates its access keys.
@@ -207,11 +207,12 @@ creations/deletions), or "all".</description>
 </property>
 
 <property>
-  <name>dfs.access.token.lifetime</name>
+  <name>dfs.block.access.token.lifetime</name>
   <value>600</value>
   <description>The lifetime of access tokens in minutes.</description>
 </property>
 
+
 <property>
   <name>dfs.data.dir</name>
   <value>${hadoop.tmp.dir}/dfs/data</value>

+ 9 - 9
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -29,14 +29,14 @@ import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
 import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.InvalidAccessTokenException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
-import org.apache.hadoop.security.InvalidAccessTokenException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.*;
 
@@ -1350,7 +1350,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       checksumSize = this.checksum.getChecksumSize();
     }
 
-    public static BlockReader newBlockReader(Socket sock, String file, long blockId, AccessToken accessToken, 
+    public static BlockReader newBlockReader(Socket sock, String file, long blockId, BlockAccessToken accessToken, 
         long genStamp, long startOffset, long len, int bufferSize) throws IOException {
       return newBlockReader(sock, file, blockId, accessToken, genStamp, startOffset, len, bufferSize,
           true);
@@ -1358,7 +1358,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
 
     /** Java Doc required */
     public static BlockReader newBlockReader( Socket sock, String file, long blockId, 
-                                       AccessToken accessToken,
+                                       BlockAccessToken accessToken,
                                        long genStamp,
                                        long startOffset, long len,
                                        int bufferSize, boolean verifyChecksum)
@@ -1369,7 +1369,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
 
     public static BlockReader newBlockReader( Socket sock, String file,
                                        long blockId, 
-                                       AccessToken accessToken,
+                                       BlockAccessToken accessToken,
                                        long genStamp,
                                        long startOffset, long len,
                                        int bufferSize, boolean verifyChecksum,
@@ -1679,7 +1679,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
           NetUtils.connect(s, targetAddr, socketTimeout);
           s.setSoTimeout(socketTimeout);
           Block blk = targetBlock.getBlock();
-          AccessToken accessToken = targetBlock.getAccessToken();
+          BlockAccessToken accessToken = targetBlock.getAccessToken();
           
           blockReader = BlockReader.newBlockReader(s, src, blk.getBlockId(), 
               accessToken, 
@@ -1905,7 +1905,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
           dn = socketFactory.createSocket();
           NetUtils.connect(dn, targetAddr, socketTimeout);
           dn.setSoTimeout(socketTimeout);
-          AccessToken accessToken = block.getAccessToken();
+          BlockAccessToken accessToken = block.getAccessToken();
               
           int len = (int) (end - start + 1);
               
@@ -2170,7 +2170,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     private DataOutputStream blockStream;
     private DataInputStream blockReplyStream;
     private Block block;
-    private AccessToken accessToken;
+    private BlockAccessToken accessToken;
     final private long blockSize;
     private DataChecksum checksum;
     private LinkedList<Packet> dataQueue = new LinkedList<Packet>();
@@ -2196,7 +2196,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     private volatile boolean appendChunk = false;   // appending to existing partial block
     private long initialFileSize = 0; // at time of file open
 
-    AccessToken getAccessToken() {
+    BlockAccessToken getAccessToken() {
       return accessToken;
     }
 

+ 4 - 4
src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.io.*;
-import org.apache.hadoop.security.AccessToken;
 
 import java.io.*;
 
@@ -44,7 +44,7 @@ public class LocatedBlock implements Writable {
   // else false. If block has few corrupt replicas, they are filtered and 
   // their locations are not part of this object
   private boolean corrupt;
-  private AccessToken accessToken = new AccessToken();
+  private BlockAccessToken accessToken = new BlockAccessToken();
 
   /**
    */
@@ -78,11 +78,11 @@ public class LocatedBlock implements Writable {
     }
   }
 
-  public AccessToken getAccessToken() {
+  public BlockAccessToken getAccessToken() {
     return accessToken;
   }
 
-  public void setAccessToken(AccessToken token) {
+  public void setAccessToken(BlockAccessToken token) {
     this.accessToken = token;
   }
 

+ 30 - 26
src/core/org/apache/hadoop/security/AccessTokenHandler.java → src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -39,6 +39,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * AccessTokenHandler can be instantiated in 2 modes, master mode and slave
@@ -49,9 +50,12 @@ import org.apache.hadoop.io.WritableUtils;
  */
 public class AccessTokenHandler {
   private static final Log LOG = LogFactory.getLog(AccessTokenHandler.class);
-  public static final String STRING_ENABLE_ACCESS_TOKEN = "dfs.access.token.enable";
-  public static final String STRING_ACCESS_KEY_UPDATE_INTERVAL = "dfs.access.key.update.interval";
-  public static final String STRING_ACCESS_TOKEN_LIFETIME = "dfs.access.token.lifetime";
+  public static final String STRING_ENABLE_ACCESS_TOKEN =
+                        "dfs.block.access.token.enable";
+  public static final String STRING_ACCESS_KEY_UPDATE_INTERVAL =
+                        "dfs.block.access.key.update.interval";
+  public static final String STRING_ACCESS_TOKEN_LIFETIME =
+                        "dfs.block.access.token.lifetime";
 
   private final boolean isMaster;
   /*
@@ -63,9 +67,9 @@ public class AccessTokenHandler {
   private long tokenLifetime;
   private long serialNo = new SecureRandom().nextLong();
   private KeyGenerator keyGen;
-  private AccessKey currentKey;
-  private AccessKey nextKey;
-  private Map<Long, AccessKey> allKeys;
+  private BlockAccessKey currentKey;
+  private BlockAccessKey nextKey;
+  private Map<Long, BlockAccessKey> allKeys;
 
   public static enum AccessMode {
     READ, WRITE, COPY, REPLACE
@@ -84,7 +88,7 @@ public class AccessTokenHandler {
     this.isMaster = isMaster;
     this.keyUpdateInterval = keyUpdateInterval;
     this.tokenLifetime = tokenLifetime;
-    this.allKeys = new HashMap<Long, AccessKey>();
+    this.allKeys = new HashMap<Long, BlockAccessKey>();
     if (isMaster) {
       try {
         generateKeys();
@@ -112,11 +116,11 @@ public class AccessTokenHandler {
      * more.
      */
     serialNo++;
-    currentKey = new AccessKey(serialNo, new Text(keyGen.generateKey()
+    currentKey = new BlockAccessKey(serialNo, new Text(keyGen.generateKey()
         .getEncoded()), System.currentTimeMillis() + 2 * keyUpdateInterval
         + tokenLifetime);
     serialNo++;
-    nextKey = new AccessKey(serialNo, new Text(keyGen.generateKey()
+    nextKey = new BlockAccessKey(serialNo, new Text(keyGen.generateKey()
         .getEncoded()), System.currentTimeMillis() + 3 * keyUpdateInterval
         + tokenLifetime);
     allKeys.put(currentKey.getKeyID(), currentKey);
@@ -124,7 +128,7 @@ public class AccessTokenHandler {
   }
 
   /** Initialize Mac function */
-  private synchronized void initMac(AccessKey key) throws IOException {
+  private synchronized void initMac(BlockAccessKey key) throws IOException {
     try {
       Mac mac = Mac.getInstance("HmacSHA1");
       mac.init(new SecretKeySpec(key.getKey().getBytes(), "HmacSHA1"));
@@ -143,14 +147,14 @@ public class AccessTokenHandler {
     if (LOG.isDebugEnabled())
       LOG.debug("Exporting access keys");
     return new ExportedAccessKeys(true, keyUpdateInterval, tokenLifetime,
-        currentKey, allKeys.values().toArray(new AccessKey[0]));
+        currentKey, allKeys.values().toArray(new BlockAccessKey[0]));
   }
 
   private synchronized void removeExpiredKeys() {
     long now = System.currentTimeMillis();
-    for (Iterator<Map.Entry<Long, AccessKey>> it = allKeys.entrySet()
+    for (Iterator<Map.Entry<Long, BlockAccessKey>> it = allKeys.entrySet()
         .iterator(); it.hasNext();) {
-      Map.Entry<Long, AccessKey> e = it.next();
+      Map.Entry<Long, BlockAccessKey> e = it.next();
       if (e.getValue().getExpiryDate() < now) {
         it.remove();
       }
@@ -168,7 +172,7 @@ public class AccessTokenHandler {
     removeExpiredKeys();
     this.currentKey = exportedKeys.getCurrentKey();
     initMac(currentKey);
-    AccessKey[] receivedKeys = exportedKeys.getAllKeys();
+    BlockAccessKey[] receivedKeys = exportedKeys.getAllKeys();
     for (int i = 0; i < receivedKeys.length; i++) {
       if (receivedKeys[i] == null)
         continue;
@@ -185,27 +189,27 @@ public class AccessTokenHandler {
     LOG.info("Updating access keys");
     removeExpiredKeys();
     // set final expiry date of retiring currentKey
-    allKeys.put(currentKey.getKeyID(), new AccessKey(currentKey.getKeyID(),
+    allKeys.put(currentKey.getKeyID(), new BlockAccessKey(currentKey.getKeyID(),
         currentKey.getKey(), System.currentTimeMillis() + keyUpdateInterval
             + tokenLifetime));
     // update the estimated expiry date of new currentKey
-    currentKey = new AccessKey(nextKey.getKeyID(), nextKey.getKey(), System
+    currentKey = new BlockAccessKey(nextKey.getKeyID(), nextKey.getKey(), System
         .currentTimeMillis()
         + 2 * keyUpdateInterval + tokenLifetime);
     initMac(currentKey);
     allKeys.put(currentKey.getKeyID(), currentKey);
     // generate a new nextKey
     serialNo++;
-    nextKey = new AccessKey(serialNo, new Text(keyGen.generateKey()
+    nextKey = new BlockAccessKey(serialNo, new Text(keyGen.generateKey()
         .getEncoded()), System.currentTimeMillis() + 3 * keyUpdateInterval
         + tokenLifetime);
     allKeys.put(nextKey.getKeyID(), nextKey);
   }
 
   /** Check if token is well formed */
-  private synchronized boolean verifyToken(long keyID, AccessToken token)
+  private synchronized boolean verifyToken(long keyID, BlockAccessToken token)
       throws IOException {
-    AccessKey key = allKeys.get(keyID);
+    BlockAccessKey key = allKeys.get(keyID);
     if (key == null) {
       LOG.warn("Access key for keyID=" + keyID + " doesn't exist.");
       return false;
@@ -219,7 +223,7 @@ public class AccessTokenHandler {
   }
 
   /** Generate an access token for current user */
-  public AccessToken generateToken(long blockID, EnumSet<AccessMode> modes)
+  public BlockAccessToken generateToken(long blockID, EnumSet<AccessMode> modes)
       throws IOException {
     UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
     String userID = (ugi == null ? null : ugi.getUserName());
@@ -227,7 +231,7 @@ public class AccessTokenHandler {
   }
 
   /** Generate an access token for a specified user */
-  public synchronized AccessToken generateToken(String userID, long blockID,
+  public synchronized BlockAccessToken generateToken(String userID, long blockID,
       EnumSet<AccessMode> modes) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Generating access token for user=" + userID + ", blockID="
@@ -247,12 +251,12 @@ public class AccessTokenHandler {
       WritableUtils.writeEnum(out, aMode);
     }
     Text tokenID = new Text(buf.toByteArray());
-    return new AccessToken(tokenID, new Text(currentKey.getMac().doFinal(
+    return new BlockAccessToken(tokenID, new Text(currentKey.getMac().doFinal(
         tokenID.getBytes())));
   }
 
   /** Check if access should be allowed. userID is not checked if null */
-  public boolean checkAccess(AccessToken token, String userID, long blockID,
+  public boolean checkAccess(BlockAccessToken token, String userID, long blockID,
       AccessMode mode) throws IOException {
     long oExpiry = 0;
     long oKeyID = 0;
@@ -292,7 +296,7 @@ public class AccessTokenHandler {
 
   /** check if a token is expired. for unit test only.
    *  return true when token is expired, false otherwise */
-  static boolean isTokenExpired(AccessToken token) throws IOException {
+  static boolean isTokenExpired(BlockAccessToken token) throws IOException {
     ByteArrayInputStream buf = new ByteArrayInputStream(token.getTokenID()
         .getBytes());
     DataInputStream in = new DataInputStream(buf);
@@ -304,4 +308,4 @@ public class AccessTokenHandler {
   synchronized void setTokenLifetime(long tokenLifetime) {
     this.tokenLifetime = tokenLifetime;
   }
-}
+}

+ 6 - 6
src/core/org/apache/hadoop/security/AccessKey.java → src/hdfs/org/apache/hadoop/hdfs/security/BlockAccessKey.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -31,17 +31,17 @@ import org.apache.hadoop.io.WritableUtils;
 /**
  * Key used for generating and verifying access tokens
  */
-public class AccessKey implements Writable {
+public class BlockAccessKey implements Writable {
   private long keyID;
   private Text key;
   private long expiryDate;
   private Mac mac;
 
-  public AccessKey() {
+  public BlockAccessKey() {
     this(0L, new Text(), 0L);
   }
 
-  public AccessKey(long keyID, Text key, long expiryDate) {
+  public BlockAccessKey(long keyID, Text key, long expiryDate) {
     this.keyID = keyID;
     this.key = key;
     this.expiryDate = expiryDate;
@@ -76,8 +76,8 @@ public class AccessKey implements Writable {
     if (obj == this) {
       return true;
     }
-    if (obj instanceof AccessKey) {
-      AccessKey that = (AccessKey) obj;
+    if (obj instanceof BlockAccessKey) {
+      BlockAccessKey that = (BlockAccessKey) obj;
       return this.keyID == that.keyID && isEqual(this.key, that.key)
           && this.expiryDate == that.expiryDate;
     }

+ 7 - 7
src/core/org/apache/hadoop/security/AccessToken.java → src/hdfs/org/apache/hadoop/hdfs/security/BlockAccessToken.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -25,16 +25,16 @@ import java.io.IOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 
-public class AccessToken implements Writable {
-  public static final AccessToken DUMMY_TOKEN = new AccessToken();
+public class BlockAccessToken implements Writable {
+  public static final BlockAccessToken DUMMY_TOKEN = new BlockAccessToken();
   private Text tokenID;
   private Text tokenAuthenticator;
 
-  public AccessToken() {
+  public BlockAccessToken() {
     this(new Text(), new Text());
   }
 
-  public AccessToken(Text tokenID, Text tokenAuthenticator) {
+  public BlockAccessToken(Text tokenID, Text tokenAuthenticator) {
     this.tokenID = tokenID;
     this.tokenAuthenticator = tokenAuthenticator;
   }
@@ -56,8 +56,8 @@ public class AccessToken implements Writable {
     if (obj == this) {
       return true;
     }
-    if (obj instanceof AccessToken) {
-      AccessToken that = (AccessToken) obj;
+    if (obj instanceof BlockAccessToken) {
+      BlockAccessToken that = (BlockAccessToken) obj;
       return isEqual(this.tokenID, that.tokenID)
           && isEqual(this.tokenAuthenticator, that.tokenAuthenticator);
     }

+ 9 - 9
src/core/org/apache/hadoop/security/ExportedAccessKeys.java → src/hdfs/org/apache/hadoop/hdfs/security/ExportedAccessKeys.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -35,15 +35,15 @@ public class ExportedAccessKeys implements Writable {
   private boolean isAccessTokenEnabled;
   private long keyUpdateInterval;
   private long tokenLifetime;
-  private AccessKey currentKey;
-  private AccessKey[] allKeys;
+  private BlockAccessKey currentKey;
+  private BlockAccessKey[] allKeys;
 
   public ExportedAccessKeys() {
-    this(false, 0, 0, new AccessKey(), new AccessKey[0]);
+    this(false, 0, 0, new BlockAccessKey(), new BlockAccessKey[0]);
   }
 
   ExportedAccessKeys(boolean isAccessTokenEnabled, long keyUpdateInterval,
-      long tokenLifetime, AccessKey currentKey, AccessKey[] allKeys) {
+      long tokenLifetime, BlockAccessKey currentKey, BlockAccessKey[] allKeys) {
     this.isAccessTokenEnabled = isAccessTokenEnabled;
     this.keyUpdateInterval = keyUpdateInterval;
     this.tokenLifetime = tokenLifetime;
@@ -63,11 +63,11 @@ public class ExportedAccessKeys implements Writable {
     return tokenLifetime;
   }
 
-  public AccessKey getCurrentKey() {
+  public BlockAccessKey getCurrentKey() {
     return currentKey;
   }
 
-  public AccessKey[] getAllKeys() {
+  public BlockAccessKey[] getAllKeys() {
     return allKeys;
   }
 
@@ -128,9 +128,9 @@ public class ExportedAccessKeys implements Writable {
     keyUpdateInterval = in.readLong();
     tokenLifetime = in.readLong();
     currentKey.readFields(in);
-    this.allKeys = new AccessKey[in.readInt()];
+    this.allKeys = new BlockAccessKey[in.readInt()];
     for (int i = 0; i < allKeys.length; i++) {
-      allKeys[i] = new AccessKey();
+      allKeys[i] = new BlockAccessKey();
       allKeys[i].readFields(in);
     }
   }

+ 1 - 1
src/core/org/apache/hadoop/security/InvalidAccessTokenException.java → src/hdfs/org/apache/hadoop/hdfs/security/InvalidAccessTokenException.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.IOException;
 

+ 4 - 4
src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -56,6 +56,9 @@ import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -72,9 +75,6 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.AccessToken;
-import org.apache.hadoop.security.AccessTokenHandler;
-import org.apache.hadoop.security.ExportedAccessKeys;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
@@ -369,7 +369,7 @@ public class Balancer implements Tool {
       out.writeLong(block.getBlock().getGenerationStamp());
       Text.writeString(out, source.getStorageID());
       proxySource.write(out);
-      AccessToken accessToken = AccessToken.DUMMY_TOKEN;
+      BlockAccessToken accessToken = BlockAccessToken.DUMMY_TOKEN;
       if (isAccessTokenEnabled) {
         accessToken = accessTokenHandler.generateToken(null, block.getBlock()
             .getBlockId(), EnumSet.of(AccessTokenHandler.AccessMode.REPLACE,

+ 4 - 4
src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -84,9 +84,6 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessToken;
-import org.apache.hadoop.security.AccessTokenHandler;
-import org.apache.hadoop.security.ExportedAccessKeys;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.authorize.ConfiguredPolicy;
 import org.apache.hadoop.security.authorize.PolicyProvider;
@@ -97,6 +94,9 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 
 /**********************************************************
  * DataNode is a class (and program) that stores a set of
@@ -1211,7 +1211,7 @@ public class DataNode extends Configured
         for (int i = 1; i < targets.length; i++) {
           targets[i].write(out);
         }
-        AccessToken accessToken = AccessToken.DUMMY_TOKEN;
+        BlockAccessToken accessToken = BlockAccessToken.DUMMY_TOKEN;
         if (isAccessTokenEnabled) {
           accessToken = accessTokenHandler.generateToken(null, b.getBlockId(),
               EnumSet.of(AccessTokenHandler.AccessMode.WRITE));

+ 7 - 7
src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -32,14 +32,14 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessToken;
-import org.apache.hadoop.security.AccessTokenHandler;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
@@ -151,7 +151,7 @@ class DataXceiver implements Runnable, FSConstants {
     long startOffset = in.readLong();
     long length = in.readLong();
     String clientName = Text.readString(in);
-    AccessToken accessToken = new AccessToken();
+    BlockAccessToken accessToken = new BlockAccessToken();
     accessToken.readFields(in);
     OutputStream baseStream = NetUtils.getOutputStream(s, 
         datanode.socketWriteTimeout);
@@ -258,7 +258,7 @@ class DataXceiver implements Runnable, FSConstants {
       tmp.readFields(in);
       targets[i] = tmp;
     }
-    AccessToken accessToken = new AccessToken();
+    BlockAccessToken accessToken = new BlockAccessToken();
     accessToken.readFields(in);
     DataOutputStream replyOut = null;   // stream to prev target
     replyOut = new DataOutputStream(
@@ -423,7 +423,7 @@ class DataXceiver implements Runnable, FSConstants {
    */
   void getBlockChecksum(DataInputStream in) throws IOException {
     final Block block = new Block(in.readLong(), 0 , in.readLong());
-    AccessToken accessToken = new AccessToken();
+    BlockAccessToken accessToken = new BlockAccessToken();
     accessToken.readFields(in);
     DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
         datanode.socketWriteTimeout));
@@ -484,7 +484,7 @@ class DataXceiver implements Runnable, FSConstants {
     // Read in the header
     long blockId = in.readLong(); // read block id
     Block block = new Block(blockId, 0, in.readLong());
-    AccessToken accessToken = new AccessToken();
+    BlockAccessToken accessToken = new BlockAccessToken();
     accessToken.readFields(in);
     if (datanode.isAccessTokenEnabled
         && !datanode.accessTokenHandler.checkAccess(accessToken, null, blockId,
@@ -562,7 +562,7 @@ class DataXceiver implements Runnable, FSConstants {
     String sourceID = Text.readString(in); // read del hint
     DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source
     proxySource.readFields(in);
-    AccessToken accessToken = new AccessToken();
+    BlockAccessToken accessToken = new BlockAccessToken();
     accessToken.readFields(in);
     if (datanode.isAccessTokenEnabled
         && !datanode.accessTokenHandler.checkAccess(accessToken, null, blockId,

+ 2 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.AccessTokenHandler;
-import org.apache.hadoop.security.ExportedAccessKeys;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.security.PermissionChecker;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;

+ 3 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java

@@ -43,7 +43,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.*;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 
 public class JspHelper {
   final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
@@ -116,7 +117,7 @@ public class JspHelper {
     return chosenNode;
   }
   public void streamBlockInAscii(InetSocketAddress addr, long blockId, 
-                                 AccessToken accessToken, long genStamp, long blockSize, 
+                                 BlockAccessToken accessToken, long genStamp, long blockSize, 
                                  long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
     throws IOException {
     if (chunkSizeToView == 0) return;

+ 1 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -48,8 +48,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.AccessKey;
-import org.apache.hadoop.security.ExportedAccessKeys;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java

@@ -23,13 +23,13 @@ import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.security.ExportedAccessKeys;
 
 /** 
  * DatanodeRegistration class conatins all information the Namenode needs

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java

@@ -21,10 +21,10 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.security.ExportedAccessKeys;
 
 public class KeyUpdateCommand extends DatanodeCommand {
   private ExportedAccessKeys keys;

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.hdfs.server.protocol;
 import java.io.IOException;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.security.ExportedAccessKeys;
 
 /*****************************************************************************
  * Protocol that a secondary NameNode uses to communicate with the NameNode.

+ 2 - 2
src/test/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -31,8 +31,8 @@ import junit.framework.TestCase;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -255,7 +255,7 @@ public class DFSTestUtil extends TestCase {
     return ((DFSClient.DFSDataInputStream) in).getAllBlocks();
   }
 
-  public static AccessToken getAccessToken(FSDataOutputStream out) {
+  public static BlockAccessToken getAccessToken(FSDataOutputStream out) {
     return ((DFSClient.DFSOutputStream) out.getWrappedStream()).getAccessToken();
   }
 

+ 10 - 10
src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
@@ -38,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -179,7 +179,7 @@ public class TestDataTransferProtocol extends TestCase {
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);           // number of downstream targets
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     
     // bad bytes per checksum
@@ -215,7 +215,7 @@ public class TestDataTransferProtocol extends TestCase {
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     sendOut.writeInt((int)512);
     sendOut.writeInt(4);           // size of packet
@@ -244,7 +244,7 @@ public class TestDataTransferProtocol extends TestCase {
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     sendOut.writeInt((int)512);    // checksum size
     sendOut.writeInt(8);           // size of packet
@@ -275,7 +275,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(fileLen);
     recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
     // negative block start offset
@@ -287,7 +287,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(-1L);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
 
@@ -300,7 +300,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(fileLen);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -315,7 +315,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(0);
     sendOut.writeLong(-1-random.nextInt(oneMil));
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -330,7 +330,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(0);
     sendOut.writeLong(fileLen + 1);
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -343,7 +343,7 @@ public class TestDataTransferProtocol extends TestCase {
     sendOut.writeLong(0);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
-    AccessToken.DUMMY_TOKEN.write(sendOut);
+    BlockAccessToken.DUMMY_TOKEN.write(sendOut);
     readFile(fileSys, file, fileLen);
   }
 }

+ 2 - 2
src/test/org/apache/hadoop/security/SecurityTestUtil.java → src/test/org/apache/hadoop/hdfs/security/SecurityTestUtil.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.io.IOException;
 
@@ -27,7 +27,7 @@ public class SecurityTestUtil {
    * check if an access token is expired. return true when token is expired,
    * false otherwise
    */
-  public static boolean isAccessTokenExpired(AccessToken token)
+  public static boolean isAccessTokenExpired(BlockAccessToken token)
       throws IOException {
     return AccessTokenHandler.isTokenExpired(token);
   }

+ 6 - 6
src/test/org/apache/hadoop/security/TestAccessToken.java → src/test/org/apache/hadoop/hdfs/security/TestAccessToken.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.security;
+package org.apache.hadoop.hdfs.security;
 
 import java.util.EnumSet;
 
@@ -39,8 +39,8 @@ public class TestAccessToken extends TestCase {
         accessKeyUpdateInterval, accessTokenLifetime);
     ExportedAccessKeys keys = handler.exportKeys();
     TestWritable.testWritable(keys);
-    TestWritable.testWritable(AccessToken.DUMMY_TOKEN);
-    AccessToken token = handler.generateToken(blockID3, EnumSet
+    TestWritable.testWritable(BlockAccessToken.DUMMY_TOKEN);
+    BlockAccessToken token = handler.generateToken(blockID3, EnumSet
         .allOf(AccessTokenHandler.AccessMode.class));
     TestWritable.testWritable(token);
   }
@@ -51,16 +51,16 @@ public class TestAccessToken extends TestCase {
     for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
         .values()) {
       // generated by master
-      AccessToken token1 = master.generateToken(blockID1, EnumSet.of(mode));
+      BlockAccessToken token1 = master.generateToken(blockID1, EnumSet.of(mode));
       assertTrue(master.checkAccess(token1, null, blockID1, mode));
       assertTrue(slave.checkAccess(token1, null, blockID1, mode));
       // generated by slave
-      AccessToken token2 = slave.generateToken(blockID2, EnumSet.of(mode));
+      BlockAccessToken token2 = slave.generateToken(blockID2, EnumSet.of(mode));
       assertTrue(master.checkAccess(token2, null, blockID2, mode));
       assertTrue(slave.checkAccess(token2, null, blockID2, mode));
     }
     // multi-mode tokens
-    AccessToken mtoken = master.generateToken(blockID3, EnumSet
+    BlockAccessToken mtoken = master.generateToken(blockID3, EnumSet
         .allOf(AccessTokenHandler.AccessMode.class));
     for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
         .values()) {

+ 2 - 2
src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessToken;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 /**
  * This class tests if block replacement request to data nodes work correctly.
  */
@@ -232,7 +232,7 @@ public class TestBlockReplacement extends TestCase {
     out.writeLong(block.getGenerationStamp());
     Text.writeString(out, source.getStorageID());
     sourceProxy.write(out);
-    AccessToken.DUMMY_TOKEN.write(out);
+    BlockAccessToken.DUMMY_TOKEN.write(out);
     out.flush();
     // receiveResponse
     DataInputStream reply = new DataInputStream(sock.getInputStream());

+ 2 - 2
src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessToken;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
 
 import junit.framework.TestCase;
 
@@ -120,7 +120,7 @@ public class TestDiskError extends TestCase {
       Text.writeString( out, "" );
       out.writeBoolean(false); // Not sending src node information
       out.writeInt(0);
-      AccessToken.DUMMY_TOKEN.write(out);
+      BlockAccessToken.DUMMY_TOKEN.write(out);
       
       // write check header
       out.writeByte( 1 );

+ 7 - 7
src/test/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java

@@ -32,6 +32,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
+import org.apache.hadoop.hdfs.security.InvalidAccessTokenException;
+import org.apache.hadoop.hdfs.security.SecurityTestUtil;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.net.NetUtils;
@@ -39,10 +43,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.AccessToken;
-import org.apache.hadoop.security.AccessTokenHandler;
-import org.apache.hadoop.security.InvalidAccessTokenException;
-import org.apache.hadoop.security.SecurityTestUtil;
 import org.apache.log4j.Level;
 
 import junit.framework.TestCase;
@@ -206,7 +206,7 @@ public class TestAccessTokenWithDFS extends TestCase {
       /*
        * wait till token used in stm expires
        */
-      AccessToken token = DFSTestUtil.getAccessToken(stm);
+      BlockAccessToken token = DFSTestUtil.getAccessToken(stm);
       while (!SecurityTestUtil.isAccessTokenExpired(token)) {
         try {
           Thread.sleep(10);
@@ -258,7 +258,7 @@ public class TestAccessTokenWithDFS extends TestCase {
       /*
        * wait till token used in stm expires
        */
-      AccessToken token = DFSTestUtil.getAccessToken(stm);
+      BlockAccessToken token = DFSTestUtil.getAccessToken(stm);
       while (!SecurityTestUtil.isAccessTokenExpired(token)) {
         try {
           Thread.sleep(10);
@@ -320,7 +320,7 @@ public class TestAccessTokenWithDFS extends TestCase {
       List<LocatedBlock> locatedBlocks = cluster.getNameNode().getBlockLocations(
           FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
       LocatedBlock lblock = locatedBlocks.get(0); // first block
-      AccessToken myToken = lblock.getAccessToken();
+      BlockAccessToken myToken = lblock.getAccessToken();
       // verify token is not expired
       assertFalse(SecurityTestUtil.isAccessTokenExpired(myToken));
       // read with valid token, should succeed

+ 3 - 3
src/webapps/datanode/browseBlock.jsp

@@ -12,8 +12,8 @@
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.net.DNS"
-  import="org.apache.hadoop.security.AccessToken"
-  import="org.apache.hadoop.security.AccessTokenHandler"
+  import="org.apache.hadoop.hdfs.security.BlockAccessToken"
+  import="org.apache.hadoop.hdfs.security.AccessTokenHandler"
   import="org.apache.hadoop.util.*"
   import="java.text.DateFormat"
 %>
@@ -194,7 +194,7 @@
 
     final DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     
-    AccessToken accessToken = AccessToken.DUMMY_TOKEN;
+    BlockAccessToken accessToken = BlockAccessToken.DUMMY_TOKEN;
     if (JspHelper.conf
         .getBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
       List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,

+ 2 - 2
src/webapps/datanode/tail.jsp

@@ -12,7 +12,7 @@
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.net.DNS"
-  import="org.apache.hadoop.security.AccessToken"
+  import="org.apache.hadoop.hdfs.security.BlockAccessToken"
   import="org.apache.hadoop.util.*"
   import="org.apache.hadoop.net.NetUtils"
   import="java.text.DateFormat"
@@ -83,7 +83,7 @@
     LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
     long blockSize = lastBlk.getBlock().getNumBytes();
     long blockId = lastBlk.getBlock().getBlockId();
-    AccessToken accessToken = lastBlk.getAccessToken();
+    BlockAccessToken accessToken = lastBlk.getAccessToken();
     long genStamp = lastBlk.getBlock().getGenerationStamp();
     DatanodeInfo chosenNode;
     try {