Explorar o código

MAPREDUCE-2764. Fix renewal of dfs delegation tokens. Contributed by Owen.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1183187 13f79535-47bb-0310-9956-ffa450edef68
Jitendra Nath Pandey %!s(int64=13) %!d(string=hai) anos
pai
achega
002dd6968b
Modificáronse 30 ficheiros con 728 adicións e 379 borrados
  1. 2 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  3. 17 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  4. 115 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
  5. 69 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
  6. 2 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  7. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  8. 74 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  9. 13 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  10. 110 46
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  11. 9 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
  12. 37 49
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  13. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  14. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
  15. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
  16. 3 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
  17. 48 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
  18. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  19. 2 0
      hadoop-mapreduce-project/CHANGES.txt
  20. 36 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
  21. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
  22. 53 198
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
  23. 10 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
  24. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
  25. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  26. 9 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java
  27. 10 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
  28. 2 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  29. 1 0
      hadoop-mapreduce-project/src/test/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  30. 72 54
      hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java

+ 2 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -81,6 +81,8 @@ Trunk (unreleased changes)
     HADOOP-7721. Add log before login in KerberosAuthenticationHandler. 
     HADOOP-7721. Add log before login in KerberosAuthenticationHandler. 
     (jitendra)
     (jitendra)
 
 
+    MAPREDUCE-2764. Fix renewal of dfs delegation tokens. (Owen via jitendra)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -18,6 +18,7 @@ package org.apache.hadoop.security;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
@@ -34,7 +35,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 
 
 import sun.security.jgss.krb5.Krb5Util;
 import sun.security.jgss.krb5.Krb5Util;
@@ -352,4 +355,22 @@ public class SecurityUtil {
     return null;
     return null;
   }
   }
 
 
+  /**
+   * Set the given token's service to the format expected by the RPC client 
+   * @param token a delegation token
+   * @param addr the socket for the rpc connection
+   */
+  public static void setTokenService(Token<?> token, InetSocketAddress addr) {
+    token.setService(buildTokenService(addr));
+  }
+  
+  /**
+   * Construct the service key for a token
+   * @param addr InetSocketAddress of remote connection with a token
+   * @return "ip:port"
+   */
+  public static Text buildTokenService(InetSocketAddress addr) {
+    String host = addr.getAddress().getHostAddress();
+    return new Text(host + ":" + addr.getPort());
+  }
 }
 }

+ 17 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -634,6 +634,23 @@ public class UserGroupInformation {
         + " using keytab file " + keytabFile);
         + " using keytab file " + keytabFile);
   }
   }
   
   
+  /**
+   * Re-login a user from keytab if TGT is expired or is close to expiry.
+   * 
+   * @throws IOException
+   */
+  public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
+    if (!isSecurityEnabled()
+        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
+        || !isKeytab)
+      return;
+    KerberosTicket tgt = getTGT();
+    if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) {
+      return;
+    }
+    reloginFromKeytab();
+  }
+
   /**
   /**
    * Re-Login a user in from a keytab file. Loads a user identity from a keytab
    * Re-Login a user in from a keytab file. Loads a user identity from a keytab
    * file and logs them in. They become the currently logged-in user. This
    * file and logs them in. They become the currently logged-in user. This

+ 115 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java

@@ -22,11 +22,15 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.ServiceLoader;
 
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+  
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
@@ -40,10 +44,12 @@ import org.apache.hadoop.io.WritableUtils;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
 public class Token<T extends TokenIdentifier> implements Writable {
+  public static final Log LOG = LogFactory.getLog(Token.class);
   private byte[] identifier;
   private byte[] identifier;
   private byte[] password;
   private byte[] password;
   private Text kind;
   private Text kind;
   private Text service;
   private Text service;
+  private TokenRenewer renewer;
   
   
   /**
   /**
    * Construct a token given a token identifier and a secret manager for the
    * Construct a token given a token identifier and a secret manager for the
@@ -82,6 +88,17 @@ public class Token<T extends TokenIdentifier> implements Writable {
     service = new Text();
     service = new Text();
   }
   }
 
 
+  /**
+   * Clone a token.
+   * @param other the token to clone
+   */
+  public Token(Token<T> other) {
+    this.identifier = other.identifier;
+    this.password = other.password;
+    this.kind = other.kind;
+    this.service = other.service;
+  }
+
   /**
   /**
    * Get the token identifier
    * Get the token identifier
    * @return the token identifier
    * @return the token identifier
@@ -106,6 +123,17 @@ public class Token<T extends TokenIdentifier> implements Writable {
     return kind;
     return kind;
   }
   }
 
 
+  /**
+   * Set the token kind. This is only intended to be used by services that
+   * wrap another service's token, such as HFTP wrapping HDFS.
+   * @param newKind
+   */
+  @InterfaceAudience.Private
+  public synchronized void setKind(Text newKind) {
+    kind = newKind;
+    renewer = null;
+  }
+
   /**
   /**
    * Get the service on which the token is supposed to be used
    * Get the service on which the token is supposed to be used
    * @return the service name
    * @return the service name
@@ -244,4 +272,90 @@ public class Token<T extends TokenIdentifier> implements Writable {
     buffer.append(service.toString());
     buffer.append(service.toString());
     return buffer.toString();
     return buffer.toString();
   }
   }
+  
+  private static ServiceLoader<TokenRenewer> renewers =
+      ServiceLoader.load(TokenRenewer.class);
+
+  private synchronized TokenRenewer getRenewer() throws IOException {
+    if (renewer != null) {
+      return renewer;
+    }
+    renewer = TRIVIAL_RENEWER;
+    for (TokenRenewer canidate: renewers) {
+      if (canidate.handleKind(this.kind)) {
+        renewer = canidate;
+        return renewer;
+      }
+    }
+    LOG.warn("No TokenRenewer defined for token kind " + this.kind);
+    return renewer;
+  }
+
+  /**
+   * Is this token managed so that it can be renewed or cancelled?
+   * @return true, if it can be renewed and cancelled.
+   */
+  public boolean isManaged() throws IOException {
+    return getRenewer().isManaged(this);
+  }
+
+  /**
+   * Renew this delegation token
+   * @return the new expiration time
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public long renew(Configuration conf
+                    ) throws IOException, InterruptedException {
+    return getRenewer().renew(this, conf);
+  }
+  
+  /**
+   * Cancel this delegation token
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public void cancel(Configuration conf
+                     ) throws IOException, InterruptedException {
+    getRenewer().cancel(this, conf);
+  }
+  
+  /**
+   * A trivial renewer for token kinds that aren't managed. Sub-classes need
+   * to implement getKind for their token kind.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+  @InterfaceStability.Evolving
+  public static class TrivialRenewer extends TokenRenewer {
+    
+    // define the kind for this renewer
+    protected Text getKind() {
+      return null;
+    }
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return kind.equals(getKind());
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) {
+      return false;
+    }
+
+    @Override
+    public long renew(Token<?> token, Configuration conf) {
+      throw new UnsupportedOperationException("Token renewal is not supported "+
+                                              " for " + token.kind + " tokens");
+    }
+
+    @Override
+    public void cancel(Token<?> token, Configuration conf) throws IOException,
+        InterruptedException {
+      throw new UnsupportedOperationException("Token cancel is not supported " +
+          " for " + token.kind + " tokens");
+    }
+
+  }
+  private static final TokenRenewer TRIVIAL_RENEWER = new TrivialRenewer();
 }
 }

+ 69 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java

@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.token;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+/**
+ * This is the interface for plugins that handle tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public abstract class TokenRenewer {
+
+  /**
+   * Does this renewer handle this kind of token?
+   * @param kind the kind of the token
+   * @return true if this renewer can renew it
+   */
+  public abstract boolean handleKind(Text kind);
+
+  /**
+   * Is the given token managed? Only managed tokens may be renewed or
+   * cancelled.
+   * @param token the token being checked
+   * @return true if the token may be renewed or cancelled
+   * @throws IOException
+   */
+  public abstract boolean isManaged(Token<?> token) throws IOException;
+  
+  /**
+   * Renew the given token.
+   * @return the new expiration time
+   * @throws IOException
+   * @throws InterruptedException 
+   */
+  public abstract long renew(Token<?> token,
+                             Configuration conf
+                             ) throws IOException, InterruptedException;
+  
+  /**
+   * Cancel the given token
+   * @throws IOException
+   * @throws InterruptedException 
+   */
+  public abstract void cancel(Token<?> token,
+                              Configuration conf
+                              ) throws IOException, InterruptedException;
+}

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -123,6 +123,8 @@ Trunk (unreleased changes)
     HDFS-2424. Added a root element "HdfsFileStatuses" for the response
     HDFS-2424. Added a root element "HdfsFileStatuses" for the response
     of webhdfs listStatus.  (szetszwo)
     of webhdfs listStatus.  (szetszwo)
 
 
+    MAPREDUCE-2764. Fix renewal of dfs delegation tokens. (Owen via jitendra)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -409,6 +409,7 @@ public class Hdfs extends AbstractFileSystem {
    * @return the new expiration time
    * @return the new expiration time
    * @throws InvalidToken
    * @throws InvalidToken
    * @throws IOException
    * @throws IOException
+   * @deprecated Use Token.renew instead.
    */
    */
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   public long renewDelegationToken(
   public long renewDelegationToken(
@@ -423,6 +424,7 @@ public class Hdfs extends AbstractFileSystem {
    * @param token delegation token
    * @param token delegation token
    * @throws InvalidToken
    * @throws InvalidToken
    * @throws IOException
    * @throws IOException
+   * @deprecated Use Token.cancel instead.
    */
    */
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   public void cancelDelegationToken(
   public void cancelDelegationToken(

+ 74 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -93,9 +93,11 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /********************************************************
 /********************************************************
@@ -115,6 +117,7 @@ public class DFSClient implements java.io.Closeable {
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   final ClientProtocol namenode;
   final ClientProtocol namenode;
+  private final InetSocketAddress nnAddress;
   final UserGroupInformation ugi;
   final UserGroupInformation ugi;
   volatile boolean clientRunning = true;
   volatile boolean clientRunning = true;
   private volatile FsServerDefaults serverDefaults;
   private volatile FsServerDefaults serverDefaults;
@@ -241,6 +244,7 @@ public class DFSClient implements java.io.Closeable {
     this.dfsClientConf = new Conf(conf);
     this.dfsClientConf = new Conf(conf);
     this.conf = conf;
     this.conf = conf;
     this.stats = stats;
     this.stats = stats;
+    this.nnAddress = nameNodeAddr;
     this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
     this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
     this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
     this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
 
 
@@ -442,18 +446,26 @@ public class DFSClient implements java.io.Closeable {
       throws IOException {
       throws IOException {
     Token<DelegationTokenIdentifier> result =
     Token<DelegationTokenIdentifier> result =
       namenode.getDelegationToken(renewer);
       namenode.getDelegationToken(renewer);
+    SecurityUtil.setTokenService(result, nnAddress);
     LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(result));
     LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(result));
     return result;
     return result;
   }
   }
 
 
   /**
   /**
-   * @see ClientProtocol#renewDelegationToken(Token)
+   * Renew a delegation token
+   * @param token the token to renew
+   * @return the new expiration time
+   * @throws InvalidToken
+   * @throws IOException
+   * @deprecated Use Token.renew instead.
    */
    */
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
     LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
     LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
     try {
     try {
-      return namenode.renewDelegationToken(token);
+      return token.renew(conf);
+    } catch (InterruptedException ie) {                                       
+      throw new RuntimeException("caught interrupted", ie);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(InvalidToken.class,
       throw re.unwrapRemoteException(InvalidToken.class,
                                      AccessControlException.class);
                                      AccessControlException.class);
@@ -461,19 +473,77 @@ public class DFSClient implements java.io.Closeable {
   }
   }
 
 
   /**
   /**
-   * @see ClientProtocol#cancelDelegationToken(Token)
+   * Cancel a delegation token
+   * @param token the token to cancel
+   * @throws InvalidToken
+   * @throws IOException
+   * @deprecated Use Token.cancel instead.
    */
    */
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
     LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
     LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
     try {
     try {
-      namenode.cancelDelegationToken(token);
+      token.cancel(conf);
+     } catch (InterruptedException ie) {                                       
+      throw new RuntimeException("caught interrupted", ie);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(InvalidToken.class,
       throw re.unwrapRemoteException(InvalidToken.class,
                                      AccessControlException.class);
                                      AccessControlException.class);
     }
     }
   }
   }
   
   
+  @InterfaceAudience.Private
+  public static class Renewer extends TokenRenewer {
+    
+    @Override
+    public boolean handleKind(Text kind) {
+      return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public long renew(Token<?> token, Configuration conf) throws IOException {
+      Token<DelegationTokenIdentifier> delToken = 
+          (Token<DelegationTokenIdentifier>) token;
+      LOG.info("Renewing " + 
+               DelegationTokenIdentifier.stringifyToken(delToken));
+      ClientProtocol nn = 
+        DFSUtil.createNamenode
+           (NameNode.getAddress(token.getService().toString()),
+            conf, UserGroupInformation.getCurrentUser());
+      try {
+        return nn.renewDelegationToken(delToken);
+      } catch (RemoteException re) {
+        throw re.unwrapRemoteException(InvalidToken.class, 
+                                       AccessControlException.class);
+      }
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void cancel(Token<?> token, Configuration conf) throws IOException {
+      Token<DelegationTokenIdentifier> delToken = 
+          (Token<DelegationTokenIdentifier>) token;
+      LOG.info("Cancelling " + 
+               DelegationTokenIdentifier.stringifyToken(delToken));
+      ClientProtocol nn = DFSUtil.createNamenode(
+          NameNode.getAddress(token.getService().toString()), conf,
+          UserGroupInformation.getCurrentUser());
+      try {
+        nn.cancelDelegationToken(delToken);
+      } catch (RemoteException re) {
+        throw re.unwrapRemoteException(InvalidToken.class,
+            AccessControlException.class);
+      }
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+    
+  }
+
   /**
   /**
    * Report corrupt blocks that were discovered by the client.
    * Report corrupt blocks that were discovered by the client.
    * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
    * @see ClientProtocol#reportBadBlocks(LocatedBlock[])

+ 13 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -811,7 +811,6 @@ public class DistributedFileSystem extends FileSystem {
   ) throws IOException {
   ) throws IOException {
     Token<DelegationTokenIdentifier> result =
     Token<DelegationTokenIdentifier> result =
       dfs.getDelegationToken(renewer == null ? null : new Text(renewer));
       dfs.getDelegationToken(renewer == null ? null : new Text(renewer));
-    result.setService(new Text(getCanonicalServiceName()));
     return result;
     return result;
   }
   }
 
 
@@ -831,7 +830,7 @@ public class DistributedFileSystem extends FileSystem {
   @Deprecated
   @Deprecated
   public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
   public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
       throws IOException {
       throws IOException {
-    return dfs.getDelegationToken(renewer);
+    return getDelegationToken(renewer.toString());
   }
   }
   
   
   @Override // FileSystem
   @Override // FileSystem
@@ -848,10 +847,15 @@ public class DistributedFileSystem extends FileSystem {
    * @param token delegation token obtained earlier
    * @param token delegation token obtained earlier
    * @return the new expiration time
    * @return the new expiration time
    * @throws IOException
    * @throws IOException
+   * @deprecated Use Token.renew instead.
    */
    */
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
-    return dfs.renewDelegationToken(token);
+    try {
+      return token.renew(getConf());
+    } catch (InterruptedException ie) {
+      throw new RuntimeException("Caught interrupted", ie);
+    }
   }
   }
 
 
   /**
   /**
@@ -859,10 +863,15 @@ public class DistributedFileSystem extends FileSystem {
    * 
    * 
    * @param token delegation token
    * @param token delegation token
    * @throws IOException
    * @throws IOException
+   * @deprecated Use Token.cancel instead.
    */
    */
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
       throws IOException {
       throws IOException {
-    dfs.cancelDelegationToken(token);
+    try {
+      token.cancel(getConf());
+    } catch (InterruptedException ie) {
+      throw new RuntimeException("Caught interrupted", ie);
+    }
   }
   }
 
 
   /**
   /**

+ 110 - 46
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -60,6 +60,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.ServletUtil;
 import org.xml.sax.Attributes;
 import org.xml.sax.Attributes;
@@ -83,14 +84,18 @@ public class HftpFileSystem extends FileSystem {
     HttpURLConnection.setFollowRedirects(true);
     HttpURLConnection.setFollowRedirects(true);
   }
   }
 
 
+  public static final Text TOKEN_KIND = new Text("HFTP delegation");
+
   private String nnHttpUrl;
   private String nnHttpUrl;
-  private URI hdfsURI;
+  private Text hdfsServiceName;
+  private URI hftpURI;
   protected InetSocketAddress nnAddr;
   protected InetSocketAddress nnAddr;
   protected UserGroupInformation ugi; 
   protected UserGroupInformation ugi; 
 
 
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
-  private Token<DelegationTokenIdentifier> delegationToken;
+  private Token<?> delegationToken;
+  private Token<?> renewToken;
   public static final String HFTP_SERVICE_NAME_KEY = "hdfs.service.host_";
   public static final String HFTP_SERVICE_NAME_KEY = "hdfs.service.host_";
   
   
   public static final SimpleDateFormat getDateFormat() {
   public static final SimpleDateFormat getDateFormat() {
@@ -118,7 +123,7 @@ public class HftpFileSystem extends FileSystem {
 
 
   @Override
   @Override
   public String getCanonicalServiceName() {
   public String getCanonicalServiceName() {
-    return SecurityUtil.buildDTServiceName(hdfsURI, getDefaultPort());
+    return SecurityUtil.buildDTServiceName(hftpURI, getDefaultPort());
   }
   }
   
   
   private String buildUri(String schema, String host, int port) {
   private String buildUri(String schema, String host, int port) {
@@ -144,17 +149,21 @@ public class HftpFileSystem extends FileSystem {
       urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
       urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
           DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
           DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
 
 
-    nnHttpUrl = 
-      buildUri("https://", NetUtils.normalizeHostName(name.getHost()), urlPort);
+    String normalizedNN = NetUtils.normalizeHostName(name.getHost());
+    nnHttpUrl = buildUri("https://", normalizedNN ,urlPort);
     LOG.debug("using url to get DT:" + nnHttpUrl);
     LOG.debug("using url to get DT:" + nnHttpUrl);
+    try {
+      hftpURI = new URI(buildUri("hftp://", normalizedNN, urlPort));
+    } catch (URISyntaxException ue) {
+      throw new IOException("bad uri for hdfs", ue);
+    }
 
 
-    
-    
     // if one uses RPC port different from the Default one,  
     // if one uses RPC port different from the Default one,  
     // one should specify what is the setvice name for this delegation token
     // one should specify what is the setvice name for this delegation token
     // otherwise it is hostname:RPC_PORT
     // otherwise it is hostname:RPC_PORT
-    String key = HftpFileSystem.HFTP_SERVICE_NAME_KEY+
-    SecurityUtil.buildDTServiceName(name, DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
+    String key = HftpFileSystem.HFTP_SERVICE_NAME_KEY
+        + SecurityUtil.buildDTServiceName(name,
+            DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Trying to find DT for " + name + " using key=" + key + 
       LOG.debug("Trying to find DT for " + name + " using key=" + key + 
           "; conf=" + conf.get(key, ""));
           "; conf=" + conf.get(key, ""));
@@ -165,9 +174,10 @@ public class HftpFileSystem extends FileSystem {
       nnPort = NetUtils.createSocketAddr(nnServiceName, 
       nnPort = NetUtils.createSocketAddr(nnServiceName, 
           NameNode.DEFAULT_PORT).getPort();
           NameNode.DEFAULT_PORT).getPort();
     }
     }
-
     try {
     try {
-      hdfsURI = new URI(buildUri("hdfs://", nnAddr.getHostName(), nnPort));
+      URI hdfsURI = new URI("hdfs://" + normalizedNN + ":" + nnPort);
+      hdfsServiceName = new Text(SecurityUtil.buildDTServiceName(hdfsURI, 
+                                                                 nnPort));
     } catch (URISyntaxException ue) {
     } catch (URISyntaxException ue) {
       throw new IOException("bad uri for hdfs", ue);
       throw new IOException("bad uri for hdfs", ue);
     }
     }
@@ -175,30 +185,55 @@ public class HftpFileSystem extends FileSystem {
     if (UserGroupInformation.isSecurityEnabled()) {
     if (UserGroupInformation.isSecurityEnabled()) {
       //try finding a token for this namenode (esp applicable for tasks
       //try finding a token for this namenode (esp applicable for tasks
       //using hftp). If there exists one, just set the delegationField
       //using hftp). If there exists one, just set the delegationField
-      String canonicalName = getCanonicalServiceName();
+      String hftpServiceName = getCanonicalServiceName();
       for (Token<? extends TokenIdentifier> t : ugi.getTokens()) {
       for (Token<? extends TokenIdentifier> t : ugi.getTokens()) {
-        if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(t.getKind()) &&
-            t.getService().toString().equals(canonicalName)) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug("Found existing DT for " + name);
+        Text kind = t.getKind();
+        if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind)) {
+          if (t.getService().toString().equals(hdfsServiceName)) {
+            setDelegationToken(t);
+            break;
+          }
+        } else if (TOKEN_KIND.equals(kind)) {
+          if (hftpServiceName
+              .equals(normalizeService(t.getService().toString()))) {
+            setDelegationToken(t);
+            break;
           }
           }
-          delegationToken = (Token<DelegationTokenIdentifier>) t;
-          break;
         }
         }
       }
       }
       
       
       //since we don't already have a token, go get one over https
       //since we don't already have a token, go get one over https
       if (delegationToken == null) {
       if (delegationToken == null) {
-        delegationToken = 
-          (Token<DelegationTokenIdentifier>) getDelegationToken(null);
+        setDelegationToken(getDelegationToken(null));
         renewer.addTokenToRenew(this);
         renewer.addTokenToRenew(this);
       }
       }
     }
     }
   }
   }
-  
+
+  private String normalizeService(String service) {
+    int colonIndex = service.indexOf(':');
+    if (colonIndex == -1) {
+      throw new IllegalArgumentException("Invalid service for hftp token: " + 
+                                         service);
+    }
+    String hostname = 
+        NetUtils.normalizeHostName(service.substring(0, colonIndex));
+    String port = service.substring(colonIndex + 1);
+    return hostname + ":" + port;
+  }
+
+  private <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
+    renewToken = token;
+    // emulate the 203 usage of the tokens
+    // by setting the kind and service as if they were hdfs tokens
+    delegationToken = new Token<T>(token);
+    delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+    delegationToken.setService(hdfsServiceName);
+  }
 
 
   @Override
   @Override
-  public synchronized Token<?> getDelegationToken(final String renewer) throws IOException {
+  public synchronized Token<?> getDelegationToken(final String renewer
+                                                  ) throws IOException {
     try {
     try {
       //Renew TGT if needed
       //Renew TGT if needed
       ugi.reloginFromKeytab();
       ugi.reloginFromKeytab();
@@ -221,7 +256,6 @@ public class HftpFileSystem extends FileSystem {
               LOG.debug("Got dt for " + getUri() + ";t.service="
               LOG.debug("Got dt for " + getUri() + ";t.service="
                   +t.getService());
                   +t.getService());
             }
             }
-            t.setService(new Text(getCanonicalServiceName()));
             return t;
             return t;
           }
           }
           return null;
           return null;
@@ -625,7 +659,8 @@ public class HftpFileSystem extends FileSystem {
     @Override
     @Override
     public int compareTo(Delayed o) {
     public int compareTo(Delayed o) {
       if (o.getClass() != RenewAction.class) {
       if (o.getClass() != RenewAction.class) {
-        throw new IllegalArgumentException("Illegal comparision to non-RenewAction");
+        throw new IllegalArgumentException
+                  ("Illegal comparision to non-RenewAction");
       }
       }
       RenewAction other = (RenewAction) o;
       RenewAction other = (RenewAction) o;
       return timestamp < other.timestamp ? -1 :
       return timestamp < other.timestamp ? -1 :
@@ -662,31 +697,20 @@ public class HftpFileSystem extends FileSystem {
      * @return
      * @return
      * @throws IOException
      * @throws IOException
      */
      */
-    @SuppressWarnings("unchecked")
     public boolean renew() throws IOException, InterruptedException {
     public boolean renew() throws IOException, InterruptedException {
       final HftpFileSystem fs = weakFs.get();
       final HftpFileSystem fs = weakFs.get();
       if (fs != null) {
       if (fs != null) {
         synchronized (fs) {
         synchronized (fs) {
-          fs.ugi.reloginFromKeytab();
-          fs.ugi.doAs(new PrivilegedExceptionAction<Void>() {
-
-            @Override
-            public Void run() throws Exception {
-              try {
-                DelegationTokenFetcher.renewDelegationToken(fs.nnHttpUrl, 
-                    fs.delegationToken);
-              } catch (IOException ie) {
-                try {
-                  fs.delegationToken = 
-                    (Token<DelegationTokenIdentifier>) fs.getDelegationToken(null);
-                } catch (IOException ie2) {
-                  throw new IOException("Can't renew or get new delegation token ", 
-                      ie);
-                }
-              }
-              return null;
-            } 
-          });
+          try {
+            fs.renewToken.renew(fs.getConf());
+          } catch (IOException ie) {
+            try {
+              fs.setDelegationToken(fs.getDelegationToken(null));
+            } catch (IOException ie2) {
+              throw new IOException("Can't renew or get new delegation "
+                  + "token ", ie);
+            }
+          }
         }
         }
       }
       }
       return fs != null;
       return fs != null;
@@ -722,7 +746,7 @@ public class HftpFileSystem extends FileSystem {
     }
     }
 
 
     public void addTokenToRenew(HftpFileSystem fs) {
     public void addTokenToRenew(HftpFileSystem fs) {
-      queue.add(new RenewAction(RENEW_CYCLE + System.currentTimeMillis(),fs));
+      queue.add(new RenewAction(RENEW_CYCLE + System.currentTimeMillis(), fs));
     }
     }
 
 
     public void run() {
     public void run() {
@@ -747,4 +771,44 @@ public class HftpFileSystem extends FileSystem {
       }
       }
     }
     }
   }
   }
+  
+  @InterfaceAudience.Private
+  public static class TokenManager extends TokenRenewer {
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return kind.equals(TOKEN_KIND);
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public long renew(Token<?> token, 
+                      Configuration conf) throws IOException {
+      // update the kerberos credentials, if they are coming from a keytab
+      UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
+      // use https to renew the token
+      return 
+        DelegationTokenFetcher.renewDelegationToken
+        ("https://" + token.getService().toString(), 
+         (Token<DelegationTokenIdentifier>) token);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void cancel(Token<?> token, 
+                       Configuration conf) throws IOException {
+      // update the kerberos credentials, if they are coming from a keytab
+      UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
+      // use https to cancel the token
+      DelegationTokenFetcher.cancelDelegationToken
+        ("https://" + token.getService().toString(), 
+         (Token<DelegationTokenIdentifier>) token);
+    }
+    
+  }
 }
 }

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.Acces
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -171,4 +172,12 @@ public class BlockTokenIdentifier extends TokenIdentifier {
     
     
     return cache;
     return cache;
   }
   }
+  
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
 }
 }

+ 37 - 49
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -39,14 +39,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -149,34 +152,31 @@ public class DelegationTokenFetcher {
                 DataInputStream in = new DataInputStream(
                 DataInputStream in = new DataInputStream(
                     new ByteArrayInputStream(token.getIdentifier()));
                     new ByteArrayInputStream(token.getIdentifier()));
                 id.readFields(in);
                 id.readFields(in);
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Token (" + id + ") for " + token.getService());
-                }
+                System.out.println("Token (" + id + ") for " + 
+                                   token.getService());
               }
               }
-              return null;
-            }
-            
-            if (webUrl != null) {
-              if (renew) {
-                long result;
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  result = renewDelegationToken(webUrl,
-                      (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                	  LOG.debug("Renewed token via " + webUrl + " for "
-                          + token.getService() + " until: " + new Date(result));
+            } else if (cancel) {
+              for(Token<?> token: readTokens(tokenFile, conf)) {
+                if (token.isManaged()) {
+                  token.cancel(conf);
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Cancelled token for " + token.getService());
                   }
                   }
                 }
                 }
-              } else if (cancel) {
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  cancelDelegationToken(webUrl,
-                      (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token via " + webUrl + " for "
-                	    + token.getService());
+              }
+            } else if (renew) {
+              for (Token<?> token : readTokens(tokenFile, conf)) {
+                if (token.isManaged()) {
+                  long result = token.renew(conf);
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Renewed token for " + token.getService()
+                        + " until: " + new Date(result));
                   }
                   }
                 }
                 }
-              } else {
+              }
+            } else {
+              // otherwise we are fetching
+              if (webUrl != null) {
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
                 for (Token<?> token : creds.getAllTokens()) {
@@ -185,29 +185,8 @@ public class DelegationTokenFetcher {
                         + token.getService() + " into " + tokenFile);
                         + token.getService() + " into " + tokenFile);
                   }
                   }
                 }
                 }
-              }
-            } else {
-              FileSystem fs = FileSystem.get(conf);
-              if (cancel) {
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  ((DistributedFileSystem) fs)
-                      .cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token for "
-                        + token.getService());
-                  }
-                }
-              } else if (renew) {
-                long result;
-                for (Token<?> token : readTokens(tokenFile, conf)) {
-                  result = ((DistributedFileSystem) fs)
-                      .renewDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Renewed token for " + token.getService()
-                        + " until: " + new Date(result));
-                  }
-                }
               } else {
               } else {
+                FileSystem fs = FileSystem.get(conf);
                 Token<?> token = fs.getDelegationToken(renewer);
                 Token<?> token = fs.getDelegationToken(renewer);
                 Credentials cred = new Credentials();
                 Credentials cred = new Credentials();
                 cred.addToken(token.getService(), token);
                 cred.addToken(token.getService(), token);
@@ -230,8 +209,9 @@ public class DelegationTokenFetcher {
     try {
     try {
       StringBuffer url = new StringBuffer();
       StringBuffer url = new StringBuffer();
       if (renewer != null) {
       if (renewer != null) {
-        url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC).append("?").
-        append(GetDelegationTokenServlet.RENEWER).append("=").append(renewer);
+        url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC)
+           .append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
+           .append(renewer);
       } else {
       } else {
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
       }
       }
@@ -248,6 +228,12 @@ public class DelegationTokenFetcher {
       Credentials ts = new Credentials();
       Credentials ts = new Credentials();
       dis = new DataInputStream(in);
       dis = new DataInputStream(in);
       ts.readFields(dis);
       ts.readFields(dis);
+      for(Token<?> token: ts.getAllTokens()) {
+        token.setKind(HftpFileSystem.TOKEN_KIND);
+        token.setService(new Text(SecurityUtil.buildDTServiceName
+                                   (remoteURL.toURI(), 
+                                    DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
+      }
       return ts;
       return ts;
     } catch (Exception e) {
     } catch (Exception e) {
       throw new IOException("Unable to obtain remote token", e);
       throw new IOException("Unable to obtain remote token", e);
@@ -295,7 +281,8 @@ public class DelegationTokenFetcher {
 
 
       IOUtils.cleanup(LOG, in);
       IOUtils.cleanup(LOG, in);
       if(e!=null) {
       if(e!=null) {
-        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        LOG.info("rethrowing exception from HTTP request: " + 
+                 e.getLocalizedMessage());
         throw e;
         throw e;
       }
       }
       throw ie;
       throw ie;
@@ -383,7 +370,8 @@ public class DelegationTokenFetcher {
 
 
       IOUtils.cleanup(LOG, in);
       IOUtils.cleanup(LOG, in);
       if(e!=null) {
       if(e!=null) {
-        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        LOG.info("rethrowing exception from HTTP request: " + 
+                 e.getLocalizedMessage());
         throw e;
         throw e;
       }
       }
       throw ie;
       throw ie;

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1,3 @@
+org.apache.hadoop.hdfs.DFSClient$Renewer
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
+org.apache.hadoop.hdfs.HftpFileSystem$TokenManager

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java

@@ -105,7 +105,7 @@ public class TestResolveHdfsSymlink {
    * @throws IOException
    * @throws IOException
    * @throws InterruptedException
    * @throws InterruptedException
    */
    */
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   @Test
   public void testFcDelegationToken() throws UnsupportedFileSystemException,
   public void testFcDelegationToken() throws UnsupportedFileSystemException,
       IOException, InterruptedException {
       IOException, InterruptedException {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java

@@ -183,6 +183,7 @@ public class TestDelegationToken {
     dtSecretManager.renewToken(token, "JobTracker");
     dtSecretManager.renewToken(token, "JobTracker");
   }
   }
 
 
+  @SuppressWarnings("deprecation")
   @Test
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java

@@ -203,11 +203,9 @@ public class OfflineEditsViewerHelper {
       "JobTracker/foo.com@FOO.COM");
       "JobTracker/foo.com@FOO.COM");
     try {
     try {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
-        public Object run() throws IOException {
-          final DistributedFileSystem dfs =
-            (DistributedFileSystem) cluster.getFileSystem();
-          dfs.renewDelegationToken(token);
-          dfs.cancelDelegationToken(token);
+        public Object run() throws IOException, InterruptedException {
+          token.renew(config);
+          token.cancel(config);
           return null;
           return null;
         }
         }
       });
       });

+ 48 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java

@@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -37,7 +36,9 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -46,6 +47,7 @@ public class TestDelegationTokenFetcher {
   private Configuration conf;
   private Configuration conf;
   private URI uri;
   private URI uri;
   private static final String SERVICE_VALUE = "localhost:2005";
   private static final String SERVICE_VALUE = "localhost:2005";
+  private static final Text KIND = new Text("TESTING-TOKEN-KIND");
   private static String tokenFile = "file.dta";
   private static String tokenFile = "file.dta";
 
 
   @Before 
   @Before 
@@ -56,25 +58,59 @@ public class TestDelegationTokenFetcher {
     FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
     FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
   }
   }
   
   
+  public static class FakeRenewer extends TokenRenewer {
+    static Token<?> lastRenewed = null;
+    static Token<?> lastCanceled = null;
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return KIND.equals(kind);
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+
+    @Override
+    public long renew(Token<?> token, Configuration conf) {
+      lastRenewed = token;
+      return 0;
+    }
+
+    @Override
+    public void cancel(Token<?> token, Configuration conf) {
+      lastCanceled = token;
+    }
+    
+    public static void reset() {
+      lastRenewed = null;
+      lastCanceled = null;
+    }
+  }
+
   /**
   /**
    * Verify that when the DelegationTokenFetcher runs, it talks to the Namenode,
    * Verify that when the DelegationTokenFetcher runs, it talks to the Namenode,
    * pulls out the correct user's token and successfully serializes it to disk.
    * pulls out the correct user's token and successfully serializes it to disk.
    */
    */
+  @SuppressWarnings("deprecation")
   @Test
   @Test
   public void expectedTokenIsRetrievedFromDFS() throws Exception {
   public void expectedTokenIsRetrievedFromDFS() throws Exception {
     final byte[] ident = new DelegationTokenIdentifier(new Text("owner"),
     final byte[] ident = new DelegationTokenIdentifier(new Text("owner"),
         new Text("renewer"), new Text("realuser")).getBytes();
         new Text("renewer"), new Text("realuser")).getBytes();
     final byte[] pw = new byte[] { 42 };
     final byte[] pw = new byte[] { 42 };
-    final Text kind = new Text("MY-KIND");
     final Text service = new Text(uri.toString());
     final Text service = new Text(uri.toString());
+    final String user = 
+        UserGroupInformation.getCurrentUser().getShortUserName();
 
 
     // Create a token for the fetcher to fetch, wire NN to return it when asked
     // Create a token for the fetcher to fetch, wire NN to return it when asked
     // for this particular user.
     // for this particular user.
-    Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(
-        ident, pw, kind, service);
-    when(dfs.getDelegationToken((String) null)).thenReturn(t);
+    Token<DelegationTokenIdentifier> t = 
+      new Token<DelegationTokenIdentifier>(ident, pw, KIND, service);
+    when(dfs.getDelegationToken(eq((String) null))).thenReturn(t);
     when(dfs.renewDelegationToken(eq(t))).thenReturn(1000L);
     when(dfs.renewDelegationToken(eq(t))).thenReturn(1000L);
     when(dfs.getUri()).thenReturn(uri);
     when(dfs.getUri()).thenReturn(uri);
+    FakeRenewer.reset();
 
 
     FileSystem fileSys = FileSystem.getLocal(conf);
     FileSystem fileSys = FileSystem.getLocal(conf);
     try {
     try {
@@ -88,14 +124,13 @@ public class TestDelegationTokenFetcher {
       assertEquals(t, itr.next());
       assertEquals(t, itr.next());
       assertTrue(!itr.hasNext());
       assertTrue(!itr.hasNext());
 
 
-      DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
-          "--print", tokenFile });
-      DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
-          "--renew", tokenFile });
-      DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
-          "--cancel", tokenFile });
-      verify(dfs).renewDelegationToken(eq(t));
-      verify(dfs).cancelDelegationToken(eq(t));
+      DelegationTokenFetcher.main(new String[] { "--print", tokenFile });
+      DelegationTokenFetcher.main(new String[] { "--renew", tokenFile });
+      assertEquals(t, FakeRenewer.lastRenewed);
+      FakeRenewer.reset();
+
+      DelegationTokenFetcher.main(new String[] { "--cancel", tokenFile });
+      assertEquals(t, FakeRenewer.lastCanceled);
     } finally {
     } finally {
       fileSys.delete(new Path(tokenFile), true);
       fileSys.delete(new Path(tokenFile), true);
     }
     }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1 @@
+org.apache.hadoop.tools.TestDelegationTokenFetcher$FakeRenewer

+ 2 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -48,6 +48,8 @@ Trunk (unreleased changes)
     MAPREDUCE-3183. hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml 
     MAPREDUCE-3183. hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml 
     missing license header. (Hitesh Shah via tucu).
     missing license header. (Hitesh Shah via tucu).
 
 
+    MAPREDUCE-2764. Fix renewal of dfs delegation tokens. (Owen via jitendra)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 36 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
@@ -459,6 +460,37 @@ public class JobClient extends CLI {
     cluster = new Cluster(conf);
     cluster = new Cluster(conf);
   }
   }
 
 
+  @InterfaceAudience.Private
+  public static class Renewer extends TokenRenewer {
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return DelegationTokenIdentifier.MAPREDUCE_DELEGATION_KIND.equals(kind);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public long renew(Token<?> token, Configuration conf
+                      ) throws IOException, InterruptedException {
+      return new Cluster(conf).
+        renewDelegationToken((Token<DelegationTokenIdentifier>) token);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void cancel(Token<?> token, Configuration conf
+                       ) throws IOException, InterruptedException {
+      new Cluster(conf).
+        cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+    
+  }
+
   /**
   /**
    * Build a job client, connect to the indicated job tracker.
    * Build a job client, connect to the indicated job tracker.
    * 
    * 
@@ -1048,22 +1080,24 @@ public class JobClient extends CLI {
    * @return true if the renewal went well
    * @return true if the renewal went well
    * @throws InvalidToken
    * @throws InvalidToken
    * @throws IOException
    * @throws IOException
+   * @deprecated Use {@link Token.renew} instead
    */
    */
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                    ) throws InvalidToken, IOException, 
                                    ) throws InvalidToken, IOException, 
                                             InterruptedException {
                                             InterruptedException {
-    return cluster.renewDelegationToken(token);
+    return token.renew(getConf());
   }
   }
 
 
   /**
   /**
    * Cancel a delegation token from the JobTracker
    * Cancel a delegation token from the JobTracker
    * @param token the token to cancel
    * @param token the token to cancel
    * @throws IOException
    * @throws IOException
+   * @deprecated Use {@link Token.cancel} instead
    */
    */
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
                                     ) throws InvalidToken, IOException, 
                                     ) throws InvalidToken, IOException, 
                                              InterruptedException {
                                              InterruptedException {
-    cluster.cancelDelegationToken(token);
+    token.cancel(getConf());
   }
   }
 
 
   /**
   /**

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java

@@ -371,6 +371,7 @@ public class Cluster {
    * @return the new expiration time
    * @return the new expiration time
    * @throws InvalidToken
    * @throws InvalidToken
    * @throws IOException
    * @throws IOException
+   * @deprecated Use {@link Token.renew} instead
    */
    */
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                    ) throws InvalidToken, IOException,
                                    ) throws InvalidToken, IOException,
@@ -387,6 +388,7 @@ public class Cluster {
    * Cancel a delegation token from the JobTracker
    * Cancel a delegation token from the JobTracker
    * @param token the token to cancel
    * @param token the token to cancel
    * @throws IOException
    * @throws IOException
+   * @deprecated Use {@link Token.cancel} instead
    */
    */
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
                                     ) throws IOException,
                                     ) throws IOException,

+ 53 - 198
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java

@@ -19,8 +19,6 @@
 package org.apache.hadoop.mapreduce.security.token;
 package org.apache.hadoop.mapreduce.security.token;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.net.InetAddress;
-import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
@@ -37,18 +35,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -64,14 +54,14 @@ public class DelegationTokenRenewal {
    *
    *
    */
    */
   private static class DelegationTokenToRenew {
   private static class DelegationTokenToRenew {
-    public final Token<DelegationTokenIdentifier> token;
+    public final Token<?> token;
     public final JobID jobId;
     public final JobID jobId;
     public final Configuration conf;
     public final Configuration conf;
     public long expirationDate;
     public long expirationDate;
     public TimerTask timerTask;
     public TimerTask timerTask;
     
     
     public DelegationTokenToRenew(
     public DelegationTokenToRenew(
-        JobID jId, Token<DelegationTokenIdentifier> t, 
+        JobID jId, Token<?> t, 
         Configuration newConf, long newExpirationDate) {
         Configuration newConf, long newExpirationDate) {
       token = t;
       token = t;
       jobId = jId;
       jobId = jId;
@@ -124,10 +114,9 @@ public class DelegationTokenRenewal {
   
   
   private static class DelegationTokenCancelThread extends Thread {
   private static class DelegationTokenCancelThread extends Thread {
     private static class TokenWithConf {
     private static class TokenWithConf {
-      Token<DelegationTokenIdentifier> token;
+      Token<?> token;
       Configuration conf;
       Configuration conf;
-      TokenWithConf(Token<DelegationTokenIdentifier> token,  
-          Configuration conf) {
+      TokenWithConf(Token<?> token, Configuration conf) {
         this.token = token;
         this.token = token;
         this.conf = conf;
         this.conf = conf;
       }
       }
@@ -139,7 +128,7 @@ public class DelegationTokenRenewal {
       super("Delegation Token Canceler");
       super("Delegation Token Canceler");
       setDaemon(true);
       setDaemon(true);
     }
     }
-    public void cancelToken(Token<DelegationTokenIdentifier> token,  
+    public void cancelToken(Token<?> token,  
         Configuration conf) {
         Configuration conf) {
       TokenWithConf tokenWithConf = new TokenWithConf(token, conf);
       TokenWithConf tokenWithConf = new TokenWithConf(token, conf);
       while (!queue.offer(tokenWithConf)) {
       while (!queue.offer(tokenWithConf)) {
@@ -158,25 +147,21 @@ public class DelegationTokenRenewal {
         TokenWithConf tokenWithConf = null;
         TokenWithConf tokenWithConf = null;
         try {
         try {
           tokenWithConf = queue.take();
           tokenWithConf = queue.take();
-          DistributedFileSystem dfs = null;
-          try {
-            // do it over rpc. For that we need DFS object
-            dfs = getDFSForToken(tokenWithConf.token, tokenWithConf.conf);
-          } catch (Exception e) {
-            LOG.info("couldn't get DFS to cancel. Will retry over HTTPS");
-            dfs = null;
-          }
-      
-          if(dfs != null) {
-            dfs.cancelDelegationToken(tokenWithConf.token);
-          } else {
-            cancelDelegationTokenOverHttps(tokenWithConf.token, 
-                                           tokenWithConf.conf);
-          }
+          final TokenWithConf current = tokenWithConf;
+          
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug("Canceling token " + tokenWithConf.token.getService() +  
-                " for dfs=" + dfs);
+            LOG.debug("Canceling token " + tokenWithConf.token.getService());
           }
           }
+          // need to use doAs so that http can find the kerberos tgt
+          UserGroupInformation.getLoginUser().doAs(
+              new PrivilegedExceptionAction<Void>() {
+
+                @Override
+                public Void run() throws Exception {
+                  current.token.cancel(current.conf);
+                  return null;
+                }
+              });
         } catch (IOException e) {
         } catch (IOException e) {
           LOG.warn("Failed to cancel token " + tokenWithConf.token + " " +  
           LOG.warn("Failed to cancel token " + tokenWithConf.token + " " +  
               StringUtils.stringifyException(e));
               StringUtils.stringifyException(e));
@@ -195,119 +180,29 @@ public class DelegationTokenRenewal {
     delegationTokens.add(t);
     delegationTokens.add(t);
   }
   }
   
   
-  // kind of tokens we currently renew
-  private static final Text kindHdfs = 
-    DelegationTokenIdentifier.HDFS_DELEGATION_KIND;
-  
-  @SuppressWarnings("unchecked")
   public static synchronized void registerDelegationTokensForRenewal(
   public static synchronized void registerDelegationTokensForRenewal(
-      JobID jobId, Credentials ts, Configuration conf) {
+      JobID jobId, Credentials ts, Configuration conf) throws IOException {
     if(ts==null)
     if(ts==null)
       return; //nothing to add
       return; //nothing to add
     
     
-    Collection <Token<? extends TokenIdentifier>> tokens = ts.getAllTokens();
+    Collection <Token<?>> tokens = ts.getAllTokens();
     long now = System.currentTimeMillis();
     long now = System.currentTimeMillis();
-    
-    for(Token<? extends TokenIdentifier> t : tokens) {
-      // currently we only check for HDFS delegation tokens
-      // later we can add more different types.
-      if(! t.getKind().equals(kindHdfs)) {
-        continue; 
-      }
-      Token<DelegationTokenIdentifier> dt = 
-        (Token<DelegationTokenIdentifier>)t;
-      
-      // first renew happens immediately
-      DelegationTokenToRenew dtr = 
-        new DelegationTokenToRenew(jobId, dt, conf, now); 
-
-      addTokenToList(dtr);
-      
-      setTimerForTokenRenewal(dtr, true);
-      LOG.info("registering token for renewal for service =" + dt.getService()+
-          " and jobID = " + jobId);
-    }
-  }
-  
-  private static String getHttpAddressForToken(
-      Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws IOException {
-
-    String[] ipaddr = token.getService().toString().split(":");
 
 
-    InetAddress iaddr = InetAddress.getByName(ipaddr[0]);
-    String dnsName = iaddr.getCanonicalHostName();
-    
-    // in case it is a different cluster it may have a different port
-    String httpsPort = conf.get("dfs.hftp.https.port");
-    if(httpsPort == null) {
-      // get from this cluster
-      httpsPort = conf.get(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
-          "" + DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
-    }
+    for (Token<?> t : tokens) {
+      // first renew happens immediately
+      if (t.isManaged()) {
+        DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, t, conf,
+            now);
 
 
-    // always use https (it is for security only)
-    return "https://" + dnsName+":"+httpsPort;
-  }
+        addTokenToList(dtr);
 
 
-  protected static long renewDelegationTokenOverHttps(
-      final Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws InterruptedException, IOException{
-    final String httpAddress = getHttpAddressForToken(token, conf);
-    // will be chaged to debug
-    LOG.info("address to renew=" + httpAddress + "; tok=" + token.getService());
-    Long expDate = (Long) UserGroupInformation.getLoginUser().doAs(
-        new PrivilegedExceptionAction<Long>() {
-          public Long run() throws IOException {
-            return DelegationTokenFetcher.renewDelegationToken(httpAddress, token);  
-          }
-        });
-    LOG.info("Renew over HTTP done. addr="+httpAddress+";res="+expDate);
-    return expDate;
-  }
-  
-  private static long renewDelegationToken(DelegationTokenToRenew dttr) 
-  throws Exception {
-    long newExpirationDate=System.currentTimeMillis()+3600*1000;
-    Token<DelegationTokenIdentifier> token = dttr.token;
-    Configuration conf = dttr.conf;
-    if(token.getKind().equals(kindHdfs)) {
-      DistributedFileSystem dfs=null;
-    
-      try {
-        // do it over rpc. For that we need DFS object
-        dfs = getDFSForToken(token, conf);
-      } catch (IOException e) {
-        LOG.info("couldn't get DFS to renew. Will retry over HTTPS");
-        dfs = null;
-      }
-      
-      try {
-        if(dfs != null)
-          newExpirationDate = dfs.renewDelegationToken(token);
-        else {
-          // try HTTP
-          newExpirationDate = renewDelegationTokenOverHttps(token, conf);
-        }
-      } catch (InvalidToken ite) {
-        LOG.warn("invalid token - not scheduling for renew");
-        removeFailedDelegationToken(dttr);
-        throw new IOException("failed to renew token", ite);
-      } catch (AccessControlException ioe) {
-        LOG.warn("failed to renew token:"+token, ioe);
-        removeFailedDelegationToken(dttr);
-        throw new IOException("failed to renew token", ioe);
-      } catch (Exception e) {
-        LOG.warn("failed to renew token:"+token, e);
-        // returns default expiration date
+        setTimerForTokenRenewal(dtr, true);
+        LOG.info("registering token for renewal for service =" + t.getService()
+            + " and jobID = " + jobId);
       }
       }
-    } else {
-      throw new Exception("unknown token type to renew:"+token.getKind());
     }
     }
-    return newExpirationDate;
   }
   }
-
-  
+    
   /**
   /**
    * Task - to renew a token
    * Task - to renew a token
    *
    *
@@ -319,43 +214,31 @@ public class DelegationTokenRenewal {
     
     
     @Override
     @Override
     public void run() {
     public void run() {
-      Token<DelegationTokenIdentifier> token = dttr.token;
+      Token<?> token = dttr.token;
       long newExpirationDate=0;
       long newExpirationDate=0;
       try {
       try {
-        newExpirationDate = renewDelegationToken(dttr);
+        // need to use doAs so that http can find the kerberos tgt
+        dttr.expirationDate = UserGroupInformation.getLoginUser().doAs(
+            new PrivilegedExceptionAction<Long>() {
+
+              @Override
+              public Long run() throws Exception {
+                return dttr.token.renew(dttr.conf);
+              }
+            });
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("renewing for:" + token.getService() + ";newED="
+              + dttr.expirationDate);
+        }
+        setTimerForTokenRenewal(dttr, false);// set the next one
       } catch (Exception e) {
       } catch (Exception e) {
-        return; // message logged in renewDT method
+        LOG.error("Exception renewing token" + token + ". Not rescheduled", e);
+        removeFailedDelegationToken(dttr);
       }
       }
-      if (LOG.isDebugEnabled())
-        LOG.debug("renewing for:"+token.getService()+";newED=" + 
-            newExpirationDate);
-      
-      // new expiration date
-      dttr.expirationDate = newExpirationDate;
-      setTimerForTokenRenewal(dttr, false);// set the next one
     }
     }
   }
   }
   
   
-  private static DistributedFileSystem getDFSForToken(
-      Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws Exception {
-    DistributedFileSystem dfs = null;
-    try {
-      final URI uri = new URI (SCHEME + "://" + token.getService().toString());
-      dfs = 
-      UserGroupInformation.getLoginUser().doAs(
-          new PrivilegedExceptionAction<DistributedFileSystem>() {
-        public DistributedFileSystem run() throws IOException {
-          return (DistributedFileSystem) FileSystem.get(uri, conf);  
-        }
-      });
-    } catch (Exception e) {
-      LOG.warn("Failed to create a dfs to renew/cancel for:" + token.getService(), e);
-      throw e;
-    } 
-    return dfs;
-  }
-  
   /**
   /**
    * find the soonest expiring token and set it for renew
    * find the soonest expiring token and set it for renew
    */
    */
@@ -372,15 +255,11 @@ public class DelegationTokenRenewal {
       renewIn = now + expiresIn - expiresIn/10; // little before expiration
       renewIn = now + expiresIn - expiresIn/10; // little before expiration
     }
     }
     
     
-    try {
-      // need to create new timer every time
-      TimerTask tTask = new RenewalTimerTask(token);
-      token.setTimerTask(tTask); // keep reference to the timer
+    // need to create new timer every time
+    TimerTask tTask = new RenewalTimerTask(token);
+    token.setTimerTask(tTask); // keep reference to the timer
 
 
-      renewalTimer.schedule(token.timerTask, new Date(renewIn));
-    } catch (Exception e) {
-      LOG.warn("failed to schedule a task, token will not renew more", e);
-    }
+    renewalTimer.schedule(token.timerTask, new Date(renewIn));
   }
   }
 
 
   /**
   /**
@@ -391,33 +270,9 @@ public class DelegationTokenRenewal {
     delegationTokens.clear();
     delegationTokens.clear();
   }
   }
   
   
-  
-  protected static void cancelDelegationTokenOverHttps(
-      final Token<DelegationTokenIdentifier> token, final Configuration conf) 
-  throws InterruptedException, IOException{
-    final String httpAddress = getHttpAddressForToken(token, conf);
-    // will be chaged to debug
-    LOG.info("address to cancel=" + httpAddress + "; tok=" + token.getService());
-    
-    UserGroupInformation.getLoginUser().doAs(
-        new PrivilegedExceptionAction<Void>() {
-          public Void run() throws IOException {
-            DelegationTokenFetcher.cancelDelegationToken(httpAddress, token);
-            return null;
-          }
-        });
-    LOG.info("Cancel over HTTP done. addr="+httpAddress);
-  }
-  
-  
   // cancel a token
   // cancel a token
   private static void cancelToken(DelegationTokenToRenew t) {
   private static void cancelToken(DelegationTokenToRenew t) {
-    Token<DelegationTokenIdentifier> token = t.token;
-    Configuration conf = t.conf;
-    
-    if(token.getKind().equals(kindHdfs)) {
-      dtCancelThread.cancelToken(token, conf);
-    }
+    dtCancelThread.cancelToken(t.token, t.conf);
   }
   }
   
   
   /**
   /**

+ 10 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java

@@ -25,6 +25,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
@@ -35,7 +36,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class JobTokenIdentifier extends TokenIdentifier {
 public class JobTokenIdentifier extends TokenIdentifier {
   private Text jobid;
   private Text jobid;
-  final static Text KIND_NAME = new Text("mapreduce.job");
+  public final static Text KIND_NAME = new Text("mapreduce.job");
   
   
   /**
   /**
    * Default constructor
    * Default constructor
@@ -86,4 +87,12 @@ public class JobTokenIdentifier extends TokenIdentifier {
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
     jobid.write(out);
     jobid.write(out);
   }
   }
+
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
 }
 }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class DelegationTokenIdentifier 
 public class DelegationTokenIdentifier 
     extends AbstractDelegationTokenIdentifier {
     extends AbstractDelegationTokenIdentifier {
-  static final Text MAPREDUCE_DELEGATION_KIND = 
+  public static final Text MAPREDUCE_DELEGATION_KIND = 
     new Text("MAPREDUCE_DELEGATION_TOKEN");
     new Text("MAPREDUCE_DELEGATION_TOKEN");
 
 
   /**
   /**

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1,2 @@
+org.apache.hadoop.mapred.JobClient$Renewer
+org.apache.hadoop.mapreduce.security.token.JobTokenIndentifier$Renewer

+ 9 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java

@@ -22,8 +22,10 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 
 
@@ -74,4 +76,11 @@ public class ApplicationTokenIdentifier extends TokenIdentifier {
     return UserGroupInformation.createRemoteUser(appId.toString());
     return UserGroupInformation.createRemoteUser(appId.toString());
   }
   }
 
 
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
 }
 }

+ 10 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java

@@ -24,8 +24,10 @@ import java.io.IOException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -115,4 +117,12 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     return UserGroupInformation.createRemoteUser(this.containerId.toString());
     return UserGroupInformation.createRemoteUser(this.containerId.toString());
   }
   }
 
 
+
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND;
+    }
+  }
 }
 }

+ 2 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1,2 @@
+org.apache.hadoop.yarn.security.ApplicationTokenIdentifier$Renewer
+org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer

+ 1 - 0
hadoop-mapreduce-project/src/test/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1 @@
+org.apache.hadoop.mapreduce.security.token.TestDelegationTokenRenewal$Renewer

+ 72 - 54
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.security.token;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
@@ -41,6 +42,7 @@ import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
@@ -54,6 +56,53 @@ public class TestDelegationTokenRenewal {
   private static final Log LOG = 
   private static final Log LOG = 
       LogFactory.getLog(TestDelegationTokenRenewal.class);
       LogFactory.getLog(TestDelegationTokenRenewal.class);
 
 
+  private static final Text KIND = 
+    new Text("TestDelegationTokenRenewal.Token");
+
+  public static class Renewer extends TokenRenewer {
+    private static int counter = 0;
+    private static Token<?> lastRenewed = null;
+    private static Token<?> tokenToRenewIn2Sec = null;
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return KIND.equals(kind);
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+
+    @Override
+    public long renew(Token<?> t, Configuration conf) throws IOException {
+      MyToken token = (MyToken)t;
+      if(token.isCanceled()) {
+        throw new InvalidToken("token has been canceled");
+      }
+      lastRenewed = token;
+      counter ++;
+      LOG.info("Called MYDFS.renewdelegationtoken " + token + 
+               ";this dfs=" + this.hashCode() + ";c=" + counter);
+      if(tokenToRenewIn2Sec == token) { 
+        // this token first renewal in 2 seconds
+        LOG.info("RENEW in 2 seconds");
+        tokenToRenewIn2Sec=null;
+        return 2*1000 + System.currentTimeMillis();
+      } else {
+        return 86400*1000 + System.currentTimeMillis();
+      }
+    }
+
+    @Override
+    public void cancel(Token<?> t, Configuration conf) {
+      MyToken token = (MyToken)t;
+      LOG.info("Cancel token " + token);
+      token.cancelToken();
+   }
+
+  }
+
   private static Configuration conf;
   private static Configuration conf;
  
  
   @BeforeClass
   @BeforeClass
@@ -66,7 +115,7 @@ public class TestDelegationTokenRenewal {
     System.out.println("scheme is : " + uri.getScheme());
     System.out.println("scheme is : " + uri.getScheme());
     conf.setClass("fs." + uri.getScheme() + ".impl", MyFS.class, DistributedFileSystem.class);
     conf.setClass("fs." + uri.getScheme() + ".impl", MyFS.class, DistributedFileSystem.class);
     FileSystem.setDefaultUri(conf, uri);
     FileSystem.setDefaultUri(conf, uri);
-    System.out.println("filesystem uri = " + FileSystem.getDefaultUri(conf).toString());
+    LOG.info("filesystem uri = " + FileSystem.getDefaultUri(conf).toString());
   }
   }
   
   
   private static class MyDelegationTokenSecretManager extends DelegationTokenSecretManager {
   private static class MyDelegationTokenSecretManager extends DelegationTokenSecretManager {
@@ -97,11 +146,14 @@ public class TestDelegationTokenRenewal {
     public MyToken(DelegationTokenIdentifier dtId1,
     public MyToken(DelegationTokenIdentifier dtId1,
         MyDelegationTokenSecretManager sm) {
         MyDelegationTokenSecretManager sm) {
       super(dtId1, sm);
       super(dtId1, sm);
+      setKind(KIND);
       status = "GOOD";
       status = "GOOD";
     }
     }
     
     
     public boolean isCanceled() {return status.equals(CANCELED);}
     public boolean isCanceled() {return status.equals(CANCELED);}
+
     public void cancelToken() {this.status=CANCELED;}
     public void cancelToken() {this.status=CANCELED;}
+
     public String toString() {
     public String toString() {
       StringBuilder sb = new StringBuilder(1024);
       StringBuilder sb = new StringBuilder(1024);
       
       
@@ -127,50 +179,19 @@ public class TestDelegationTokenRenewal {
    * exception
    * exception
    */
    */
   static class MyFS extends DistributedFileSystem {
   static class MyFS extends DistributedFileSystem {
-    int counter=0;
-    MyToken token;
-    MyToken tokenToRenewIn2Sec;
     
     
     public MyFS() {}
     public MyFS() {}
     public void close() {}
     public void close() {}
     @Override
     @Override
     public void initialize(URI uri, Configuration conf) throws IOException {}
     public void initialize(URI uri, Configuration conf) throws IOException {}
     
     
-    @Override
-    public long renewDelegationToken(Token<DelegationTokenIdentifier> t)
-    throws InvalidToken, IOException {
-      MyToken token = (MyToken)t;
-      if(token.isCanceled()) {
-        throw new InvalidToken("token has been canceled");
-      }
-      counter ++;
-      this.token = (MyToken)token;
-      System.out.println("Called MYDFS.renewdelegationtoken " + token);
-      if(tokenToRenewIn2Sec == token) { 
-        // this token first renewal in 2 seconds
-        System.out.println("RENEW in 2 seconds");
-        tokenToRenewIn2Sec=null;
-        return 2*1000 + System.currentTimeMillis();
-      } else {
-        return 86400*1000 + System.currentTimeMillis();
-      }
-    }
     @Override 
     @Override 
-    public MyToken getDelegationToken(Text renewer)
-    throws IOException {
-      System.out.println("Called MYDFS.getdelegationtoken");
-      return createTokens(renewer);
-    }
-    @Override
-    public void cancelDelegationToken(Token<DelegationTokenIdentifier> t)
-    throws IOException {
-      MyToken token = (MyToken)t;
-      token.cancelToken();
+    public MyToken getDelegationToken(Text renewer) throws IOException {
+      MyToken result = createTokens(renewer);
+      LOG.info("Called MYDFS.getdelegationtoken " + result);
+      return result;
     }
     }
 
 
-    public void setTokenToRenewIn2Sec(MyToken t) {tokenToRenewIn2Sec=t;}
-    public int getCounter() {return counter; }
-    public MyToken getToken() {return token;}
   }
   }
   
   
   /**
   /**
@@ -218,9 +239,9 @@ public class TestDelegationTokenRenewal {
    * @throws URISyntaxException
    * @throws URISyntaxException
    */
    */
   @Test
   @Test
-  public void testDTRenewal () throws IOException, URISyntaxException {
+  public void testDTRenewal () throws Exception {
     MyFS dfs = (MyFS)FileSystem.get(conf);
     MyFS dfs = (MyFS)FileSystem.get(conf);
-    System.out.println("dfs="+(Object)dfs);
+    LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+conf.hashCode());
     // Test 1. - add three tokens - make sure exactly one get's renewed
     // Test 1. - add three tokens - make sure exactly one get's renewed
     
     
     // get the delegation tokens
     // get the delegation tokens
@@ -230,8 +251,8 @@ public class TestDelegationTokenRenewal {
     token3 = dfs.getDelegationToken(new Text("user3"));
     token3 = dfs.getDelegationToken(new Text("user3"));
 
 
     //to cause this one to be set for renew in 2 secs
     //to cause this one to be set for renew in 2 secs
-    dfs.setTokenToRenewIn2Sec(token1); 
-    System.out.println("token="+token1+" should be renewed for 2 secs");
+    Renewer.tokenToRenewIn2Sec = token1;
+    LOG.info("token="+token1+" should be renewed for 2 secs");
     
     
     // two distinct Namenodes
     // two distinct Namenodes
     String nn1 = DelegationTokenRenewal.SCHEME + "://host1:0";
     String nn1 = DelegationTokenRenewal.SCHEME + "://host1:0";
@@ -258,15 +279,13 @@ public class TestDelegationTokenRenewal {
       } catch (InterruptedException e) {}
       } catch (InterruptedException e) {}
       
       
       // since we cannot guarantee timely execution - let's give few chances
       // since we cannot guarantee timely execution - let's give few chances
-      if(dfs.getCounter()==numberOfExpectedRenewals)
+      if(Renewer.counter==numberOfExpectedRenewals)
         break;
         break;
     }
     }
     
     
-    System.out.println("Counter = " + dfs.getCounter() + ";t="+
-        dfs.getToken());
     assertEquals("renew wasn't called as many times as expected(4):",
     assertEquals("renew wasn't called as many times as expected(4):",
-        numberOfExpectedRenewals, dfs.getCounter());
-    assertEquals("most recently renewed token mismatch", dfs.getToken(), 
+        numberOfExpectedRenewals, Renewer.counter);
+    assertEquals("most recently renewed token mismatch", Renewer.lastRenewed, 
         token1);
         token1);
     
     
     // Test 2. 
     // Test 2. 
@@ -277,8 +296,8 @@ public class TestDelegationTokenRenewal {
     MyToken token4 = dfs.getDelegationToken(new Text("user4"));
     MyToken token4 = dfs.getDelegationToken(new Text("user4"));
     
     
     //to cause this one to be set for renew in 2 secs
     //to cause this one to be set for renew in 2 secs
-    dfs.setTokenToRenewIn2Sec(token4); 
-    System.out.println("token="+token4+" should be renewed for 2 secs");
+    Renewer.tokenToRenewIn2Sec = token4; 
+    LOG.info("token="+token4+" should be renewed for 2 secs");
     
     
     String nn4 = DelegationTokenRenewal.SCHEME + "://host4:0";
     String nn4 = DelegationTokenRenewal.SCHEME + "://host4:0";
     ts.addToken(new Text(nn4), token4);
     ts.addToken(new Text(nn4), token4);
@@ -287,24 +306,23 @@ public class TestDelegationTokenRenewal {
     JobID jid2 = new JobID("job2",1);
     JobID jid2 = new JobID("job2",1);
     DelegationTokenRenewal.registerDelegationTokensForRenewal(jid2, ts, conf);
     DelegationTokenRenewal.registerDelegationTokensForRenewal(jid2, ts, conf);
     DelegationTokenRenewal.removeDelegationTokenRenewalForJob(jid2);
     DelegationTokenRenewal.removeDelegationTokenRenewalForJob(jid2);
-    numberOfExpectedRenewals = dfs.getCounter(); // number of renewals so far
+    numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
     try {
     try {
       Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew
       Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew
     } catch (InterruptedException e) {}
     } catch (InterruptedException e) {}
-    System.out.println("Counter = " + dfs.getCounter() + ";t="+dfs.getToken());
+    System.out.println("Counter = " + Renewer.counter + ";t="+ 
+                       Renewer.lastRenewed);
     
     
     // counter and the token should stil be the old ones
     // counter and the token should stil be the old ones
     assertEquals("renew wasn't called as many times as expected",
     assertEquals("renew wasn't called as many times as expected",
-        numberOfExpectedRenewals, dfs.getCounter());
+        numberOfExpectedRenewals, Renewer.counter);
     
     
     // also renewing of the cancelled token should fail
     // also renewing of the cancelled token should fail
-    boolean exception=false;
     try {
     try {
-      dfs.renewDelegationToken(token4);
+      token4.renew(conf);
+      fail("Renew of canceled token didn't fail");
     } catch (InvalidToken ite) {
     } catch (InvalidToken ite) {
       //expected
       //expected
-      exception = true;
     }
     }
-    assertTrue("Renew of canceled token didn't fail", exception);
   }
   }
 }
 }