Browse Source

Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-2841@1613007 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 10 years ago
parent
commit
5149a8a6f1
56 changed files with 1589 additions and 205 deletions
  1. 14 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  3. 29 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  5. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  6. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  7. 80 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  8. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
  9. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
  10. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
  11. 8 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
  12. 30 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  13. 30 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  14. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
  15. 6 1
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
  16. 15 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  17. 4 1
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  18. 10 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  19. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  20. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
  21. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  22. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
  23. 42 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  24. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  25. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  26. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  27. 47 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  28. 15 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
  29. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
  30. 69 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
  31. 330 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
  32. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
  33. 52 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
  34. 0 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
  35. BIN
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
  36. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
  37. 12 0
      hadoop-yarn-project/CHANGES.txt
  38. 6 0
      hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
  39. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
  40. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
  41. 104 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
  42. 29 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
  43. 38 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
  44. 88 17
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java
  45. 3 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
  46. 50 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
  47. 90 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
  48. 144 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java
  49. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java
  50. 12 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
  51. 9 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
  52. 5 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
  53. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
  54. 35 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
  55. 6 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
  56. 45 60
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

+ 14 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -452,6 +452,11 @@ Release 2.6.0 - UNRELEASED
 
 
     HADOOP-10855. Allow Text to be read with a known Length. (todd)
     HADOOP-10855. Allow Text to be read with a known Length. (todd)
 
 
+    HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem
+    internal dir behavior consistent. (Stephen Chu via wang)
+
+    HADOOP-10882. Move DirectBufferPool into common util. (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -480,6 +485,9 @@ Release 2.6.0 - UNRELEASED
     command when the format of the stat command uses non-curly quotes (yzhang
     command when the format of the stat command uses non-curly quotes (yzhang
     via cmccabe)
     via cmccabe)
 
 
+    HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
+    (Benoy Antony via umamahesh)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -805,6 +813,12 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
     HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
     via Arpit Agarwal)
     via Arpit Agarwal)
 
 
+    HADOOP-10872. TestPathData fails intermittently with "Mkdirs failed
+    to create d1". (Yongjun Zhang via Arpit Agarwal)
+
+    HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun
+    Zhang via Arpit Agarwal)
+
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -437,7 +437,9 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         throw new FileNotFoundException("Parent directory doesn't exist: "
         throw new FileNotFoundException("Parent directory doesn't exist: "
             + parent);
             + parent);
       } else if (!mkdirs(parent)) {
       } else if (!mkdirs(parent)) {
-        throw new IOException("Mkdirs failed to create " + parent);
+        throw new IOException("Mkdirs failed to create " + parent
+            + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
+            + ")");
       }
       }
     }
     }
     final FSDataOutputStream out;
     final FSDataOutputStream out;

+ 29 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -2484,4 +2484,33 @@ public final class FileContext {
       }
       }
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
+
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for "user" and "trusted"
+   * namespaces.
+   * The xattrs of the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return List<String> of the XAttr names of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<List<String>>() {
+      @Override
+      public List<String> next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.listXAttrs(p);
+      }
+    }.resolve(this, absF);
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2509,7 +2509,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return List<String> of the XAttr names of the file or directory
    * @throws IOException
    * @throws IOException
    */
    */
   public List<String> listXAttrs(Path path) throws IOException {
   public List<String> listXAttrs(Path path) throws IOException {

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -22,6 +22,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -313,6 +315,38 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.getAclStatus(fullPath(path));
     return myFs.getAclStatus(fullPath(path));
   }
   }
 
 
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+                       EnumSet<XAttrSetFlag> flag) throws IOException {
+    myFs.setXAttr(fullPath(path), name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    return myFs.getXAttr(fullPath(path), name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    return myFs.getXAttrs(fullPath(path));
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    return myFs.getXAttrs(fullPath(path), names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return myFs.listXAttrs(fullPath(path));
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    myFs.removeXAttr(fullPath(path), name);
+  }
+
   @Override
   @Override
   public void setVerifyChecksum(final boolean verifyChecksum) 
   public void setVerifyChecksum(final boolean verifyChecksum) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -913,5 +913,39 @@ public class ViewFileSystem extends FileSystem {
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
           .stickyBit(false).build();
     }
     }
+
+    @Override
+    public void setXAttr(Path path, String name, byte[] value,
+                         EnumSet<XAttrSetFlag> flag) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setXAttr", path);
+    }
+
+    @Override
+    public byte[] getXAttr(Path path, String name) throws IOException {
+      throw new NotInMountpointException(path, "getXAttr");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+        throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public List<String> listXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "listXAttrs");
+    }
+
+    @Override
+    public void removeXAttr(Path path, String name) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeXAttr", path);
+    }
   }
   }
 }
 }

+ 80 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -26,6 +26,7 @@ import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -48,6 +49,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
@@ -651,6 +653,50 @@ public class ViewFs extends AbstractFileSystem {
         fsState.resolve(getUriPath(path), true);
         fsState.resolve(getUriPath(path), true);
     return res.targetFileSystem.getAclStatus(res.remainingPath);
     return res.targetFileSystem.getAclStatus(res.remainingPath);
   }
   }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+                       EnumSet<XAttrSetFlag> flag) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttr(res.remainingPath, name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttrs(res.remainingPath);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttrs(res.remainingPath, names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.listXAttrs(res.remainingPath);
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.removeXAttr(res.remainingPath, name);
+  }
   
   
   
   
   /*
   /*
@@ -921,5 +967,39 @@ public class ViewFs extends AbstractFileSystem {
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
           .stickyBit(false).build();
     }
     }
+
+    @Override
+    public void setXAttr(Path path, String name, byte[] value,
+                         EnumSet<XAttrSetFlag> flag) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setXAttr", path);
+    }
+
+    @Override
+    public byte[] getXAttr(Path path, String name) throws IOException {
+      throw new NotInMountpointException(path, "getXAttr");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+        throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public List<String> listXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "listXAttrs");
+    }
+
+    @Override
+    public void removeXAttr(Path path, String name) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeXAttr", path);
+    }
   }
   }
 }
 }

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java

@@ -194,15 +194,18 @@ public class JavaKeyStoreProvider extends CredentialProvider {
   @Override
   @Override
   public CredentialEntry createCredentialEntry(String alias, char[] credential)
   public CredentialEntry createCredentialEntry(String alias, char[] credential)
       throws IOException {
       throws IOException {
+    writeLock.lock();
     try {
     try {
       if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
       if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
         throw new IOException("Credential " + alias + " already exists in " + this);
         throw new IOException("Credential " + alias + " already exists in " + this);
       }
       }
+      return innerSetCredential(alias, credential);
     } catch (KeyStoreException e) {
     } catch (KeyStoreException e) {
       throw new IOException("Problem looking up credential " + alias + " in " + this,
       throw new IOException("Problem looking up credential " + alias + " in " + this,
           e);
           e);
+    } finally {
+      writeLock.unlock();
     }
     }
-    return innerSetCredential(alias, credential);
   }
   }
 
 
   @Override
   @Override

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import java.lang.ref.WeakReference;
 import java.lang.ref.WeakReference;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
@@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
  * A simple class for pooling direct ByteBuffers. This is necessary
  * A simple class for pooling direct ByteBuffers. This is necessary
@@ -40,7 +41,8 @@ import com.google.common.annotations.VisibleForTesting;
  * allocated at the same size. There is no attempt to reuse larger
  * allocated at the same size. There is no attempt to reuse larger
  * buffers to satisfy smaller allocations.
  * buffers to satisfy smaller allocations.
  */
  */
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
 public class DirectBufferPool {
 public class DirectBufferPool {
 
 
   // Essentially implement a multimap with weak values.
   // Essentially implement a multimap with weak values.

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java

@@ -29,14 +29,33 @@ import java.util.Random;
 
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
 public class TestDFVariations {
 public class TestDFVariations {
+  private static final String TEST_ROOT_DIR =
+      System.getProperty("test.build.data","build/test/data") + "/TestDFVariations";
+  private static File test_root = null;
 
 
+  @Before
+  public void setup() throws IOException {
+    test_root = new File(TEST_ROOT_DIR);
+    test_root.mkdirs();
+  }
+  
+  @After
+  public void after() throws IOException {
+    FileUtil.setWritable(test_root, true);
+    FileUtil.fullyDelete(test_root);
+    assertTrue(!test_root.exists());
+  }
+  
   public static class XXDF extends DF {
   public static class XXDF extends DF {
     public XXDF() throws IOException {
     public XXDF() throws IOException {
-      super(new File(System.getProperty("test.build.data","/tmp")), 0L);
+      super(test_root, 0L);
     }
     }
 
 
     @Override
     @Override

+ 8 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java

@@ -35,19 +35,22 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestPathData {
 public class TestPathData {
+  private static final String TEST_ROOT_DIR = 
+      System.getProperty("test.build.data","build/test/data") + "/testPD";
   protected Configuration conf;
   protected Configuration conf;
   protected FileSystem fs;
   protected FileSystem fs;
   protected Path testDir;
   protected Path testDir;
-
+  
   @Before
   @Before
   public void initialize() throws Exception {
   public void initialize() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
     fs = FileSystem.getLocal(conf);
     fs = FileSystem.getLocal(conf);
-    testDir = new Path(
-        System.getProperty("test.build.data", "build/test/data") + "/testPD"
-    );
+    testDir = new Path(TEST_ROOT_DIR);
+    
     // don't want scheme on the path, just an absolute path
     // don't want scheme on the path, just an absolute path
     testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
     testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
+    fs.mkdirs(testDir);
+
     FileSystem.setDefaultUri(conf, fs.getUri());    
     FileSystem.setDefaultUri(conf, fs.getUri());    
     fs.setWorkingDirectory(testDir);
     fs.setWorkingDirectory(testDir);
     fs.mkdirs(new Path("d1"));
     fs.mkdirs(new Path("d1"));
@@ -60,6 +63,7 @@ public class TestPathData {
 
 
   @After
   @After
   public void cleanup() throws Exception {
   public void cleanup() throws Exception {
+    fs.delete(testDir, true);
     fs.close();
     fs.close();
   }
   }
 
 

+ 30 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -773,4 +773,34 @@ public class ViewFileSystemBaseTest {
     assertFalse(aclStatus.isStickyBit());
     assertFalse(aclStatus.isStickyBit());
   }
   }
 
 
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetXAttr() throws IOException {
+    fsView.setXAttr(new Path("/internalDir"), "xattrName", null);
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttr() throws IOException {
+    fsView.getXAttr(new Path("/internalDir"), "xattrName");
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrs() throws IOException {
+    fsView.getXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrsWithNames() throws IOException {
+    fsView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalListXAttr() throws IOException {
+    fsView.listXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveXAttr() throws IOException {
+    fsView.removeXAttr(new Path("/internalDir"), "xattrName");
+  }
+
 }
 }

+ 30 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -747,4 +747,34 @@ public class ViewFsBaseTest {
         AclUtil.getMinimalAcl(PERMISSION_555));
         AclUtil.getMinimalAcl(PERMISSION_555));
     assertFalse(aclStatus.isStickyBit());
     assertFalse(aclStatus.isStickyBit());
   }
   }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetXAttr() throws IOException {
+    fcView.setXAttr(new Path("/internalDir"), "xattrName", null);
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttr() throws IOException {
+    fcView.getXAttr(new Path("/internalDir"), "xattrName");
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrs() throws IOException {
+    fcView.getXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrsWithNames() throws IOException {
+    fcView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalListXAttr() throws IOException {
+    fcView.listXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveXAttr() throws IOException {
+    fcView.removeXAttr(new Path("/internalDir"), "xattrName");
+  }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNotSame;
@@ -29,7 +29,7 @@ import org.junit.Test;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
 public class TestDirectBufferPool {
 public class TestDirectBufferPool {
-  final DirectBufferPool pool = new DirectBufferPool();
+  final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
   
   
   @Test
   @Test
   public void testBasics() {
   public void testBasics() {

+ 6 - 1
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java

@@ -53,7 +53,12 @@ public class NfsExports {
       long expirationPeriodNano = conf.getLong(
       long expirationPeriodNano = conf.getLong(
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
-      exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+      try {
+        exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+      } catch (IllegalArgumentException e) {
+        LOG.error("Invalid NFS Exports provided: ", e);
+        return exports;
+      }
     }
     }
     return exports;
     return exports;
   }
   }

+ 15 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -104,6 +104,10 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
 
 
   @Override
   @Override
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+    if (hostsMatcher == null) {
+      return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+          null);
+    }
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     if (accessPrivilege == AccessPrivilege.NONE) {
     if (accessPrivilege == AccessPrivilege.NONE) {
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     } else if (mntproc == MNTPROC.UMNTALL) {
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
-      hostsMatchers.add(hostsMatcher);
-      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      if (hostsMatcher != null) {
+        hostsMatchers.add(hostsMatcher);
+        out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      } else {
+        // This means there are no valid exports provided.
+        RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
+      }
     } else {
     } else {
       // Invalid procedure
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
       RpcAcceptedReply.getInstance(xid,
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           out);
           out);
-    }  
+    }
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);
     RpcUtil.sendRpcResponse(ctx, rsp);

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -2123,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if (!doPortMonitoring(remoteAddress)) {
     if (!doPortMonitoring(remoteAddress)) {
       return false;
       return false;
     }
     }
-    
+
     // Check export table
     // Check export table
+    if (exports == null) {
+        return false;
+    }
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     AccessPrivilege access = exports.getAccessPrivilege(client);
     AccessPrivilege access = exports.getAccessPrivilege(client);
     if (access == AccessPrivilege.NONE) {
     if (access == AccessPrivilege.NONE) {

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -357,6 +357,13 @@ Release 2.6.0 - UNRELEASED
     HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
     HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
     a null pointer exception. (Masatake Iwasaki via brandonli)
     a null pointer exception. (Masatake Iwasaki via brandonli)
 
 
+    HDFS-6114. Block Scan log rolling will never happen if blocks written
+    continuously leading to huge size of dncp_block_verification.log.curr
+    (vinayakumarb via cmccabe)
+
+    HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in
+    nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -892,6 +899,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6703. NFS: Files can be deleted from a read-only mount
     HDFS-6703. NFS: Files can be deleted from a read-only mount
     (Srikanth Upputuri via brandonli)
     (Srikanth Upputuri via brandonli)
 
 
+    HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code 
+    when xattr doesn't exist. (Charles Lamb via umamahesh)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1337,6 +1337,6 @@ public interface ClientProtocol {
    * @param xAttr <code>XAttr</code> to remove
    * @param xAttr <code>XAttr</code> to remove
    * @throws IOException
    * @throws IOException
    */
    */
-  @Idempotent
+  @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java

@@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteChannel;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;

+ 42 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -84,6 +84,10 @@ class BlockPoolSliceScanner {
   
   
   private final SortedSet<BlockScanInfo> blockInfoSet
   private final SortedSet<BlockScanInfo> blockInfoSet
       = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
       = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
+  private final SortedSet<BlockScanInfo> newBlockInfoSet =
+      new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
   private final GSet<Block, BlockScanInfo> blockMap
   private final GSet<Block, BlockScanInfo> blockMap
       = new LightWeightGSet<Block, BlockScanInfo>(
       = new LightWeightGSet<Block, BlockScanInfo>(
           LightWeightGSet.computeCapacity(0.5, "BlockMap"));
           LightWeightGSet.computeCapacity(0.5, "BlockMap"));
@@ -195,7 +199,7 @@ class BlockPoolSliceScanner {
       BlockScanInfo info = new BlockScanInfo( block );
       BlockScanInfo info = new BlockScanInfo( block );
       info.lastScanTime = scanTime--; 
       info.lastScanTime = scanTime--; 
       //still keep 'info.lastScanType' to NONE.
       //still keep 'info.lastScanType' to NONE.
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
     }
 
 
     RollingLogs rollingLogs = null;
     RollingLogs rollingLogs = null;
@@ -221,25 +225,42 @@ class BlockPoolSliceScanner {
     // Should we change throttler bandwidth every time bytesLeft changes?
     // Should we change throttler bandwidth every time bytesLeft changes?
     // not really required.
     // not really required.
   }
   }
-  
-  private synchronized void addBlockInfo(BlockScanInfo info) {
-    boolean added = blockInfoSet.add(info);
+
+  /**
+   * Add the BlockScanInfo to sorted set of blockScanInfo
+   * @param info BlockScanInfo to be added
+   * @param isNewBlock true if the block is the new Block, false if
+   *          BlockScanInfo is being updated with new scanTime
+   */
+  private synchronized void addBlockInfo(BlockScanInfo info,
+      boolean isNewBlock) {
+    boolean added = false;
+    if (isNewBlock) {
+      // check whether the block already present
+      boolean exists = blockInfoSet.contains(info);
+      added = !exists && newBlockInfoSet.add(info);
+    } else {
+      added = blockInfoSet.add(info);
+    }
     blockMap.put(info);
     blockMap.put(info);
     
     
     if (added) {
     if (added) {
       updateBytesToScan(info.getNumBytes(), info.lastScanTime);
       updateBytesToScan(info.getNumBytes(), info.lastScanTime);
     }
     }
   }
   }
-  
+
   private synchronized void delBlockInfo(BlockScanInfo info) {
   private synchronized void delBlockInfo(BlockScanInfo info) {
     boolean exists = blockInfoSet.remove(info);
     boolean exists = blockInfoSet.remove(info);
+    if (!exists){
+      exists = newBlockInfoSet.remove(info);
+    }
     blockMap.remove(info);
     blockMap.remove(info);
 
 
     if (exists) {
     if (exists) {
       updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
       updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
     }
     }
   }
   }
-  
+
   /** Update blockMap by the given LogEntry */
   /** Update blockMap by the given LogEntry */
   private synchronized void updateBlockInfo(LogEntry e) {
   private synchronized void updateBlockInfo(LogEntry e) {
     BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
     BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
@@ -249,7 +270,7 @@ class BlockPoolSliceScanner {
       delBlockInfo(info);
       delBlockInfo(info);
       info.lastScanTime = e.verificationTime;
       info.lastScanTime = e.verificationTime;
       info.lastScanType = ScanType.VERIFICATION_SCAN;
       info.lastScanType = ScanType.VERIFICATION_SCAN;
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
     }
   }
   }
 
 
@@ -275,14 +296,14 @@ class BlockPoolSliceScanner {
     info = new BlockScanInfo(block.getLocalBlock());    
     info = new BlockScanInfo(block.getLocalBlock());    
     info.lastScanTime = getNewBlockScanTime();
     info.lastScanTime = getNewBlockScanTime();
     
     
-    addBlockInfo(info);
+    addBlockInfo(info, true);
     adjustThrottler();
     adjustThrottler();
   }
   }
   
   
   /** Deletes the block from internal structures */
   /** Deletes the block from internal structures */
   synchronized void deleteBlock(Block block) {
   synchronized void deleteBlock(Block block) {
     BlockScanInfo info = blockMap.get(block);
     BlockScanInfo info = blockMap.get(block);
-    if ( info != null ) {
+    if (info != null) {
       delBlockInfo(info);
       delBlockInfo(info);
     }
     }
   }
   }
@@ -319,7 +340,7 @@ class BlockPoolSliceScanner {
     info.lastScanType = type;
     info.lastScanType = type;
     info.lastScanTime = now;
     info.lastScanTime = now;
     info.lastScanOk = scanOk;
     info.lastScanOk = scanOk;
-    addBlockInfo(info);
+    addBlockInfo(info, false);
         
         
     // Don't update meta data if the verification failed.
     // Don't update meta data if the verification failed.
     if (!scanOk) {
     if (!scanOk) {
@@ -578,7 +599,7 @@ class BlockPoolSliceScanner {
           delBlockInfo(info);        
           delBlockInfo(info);        
           info.lastScanTime = lastScanTime;
           info.lastScanTime = lastScanTime;
           lastScanTime += verifyInterval;
           lastScanTime += verifyInterval;
-          addBlockInfo(info);
+          addBlockInfo(info, false);
         }
         }
       }
       }
     }
     }
@@ -674,12 +695,21 @@ class BlockPoolSliceScanner {
       throw e;
       throw e;
     } finally {
     } finally {
       rollVerificationLogs();
       rollVerificationLogs();
+      rollNewBlocksInfo();
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Done scanning block pool: " + blockPoolId);
         LOG.debug("Done scanning block pool: " + blockPoolId);
       }
       }
     }
     }
   }
   }
-  
+
+  // add new blocks to scan in next iteration
+  private synchronized void rollNewBlocksInfo() {
+    for (BlockScanInfo newBlock : newBlockInfoSet) {
+      blockInfoSet.add(newBlock);
+    }
+    newBlockInfoSet.clear();
+  }
+
   private synchronized void rollVerificationLogs() {
   private synchronized void rollVerificationLogs() {
     if (verificationLog != null) {
     if (verificationLog != null) {
       try {
       try {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPurgeable {
     logEdit(op);
     logEdit(op);
   }
   }
   
   
-  void logRemoveXAttrs(String src, List<XAttr> xAttrs) {
+  void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
     final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
     final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
     op.src = src;
     op.src = src;
     op.xAttrs = xAttrs;
     op.xAttrs = xAttrs;
+    logRpcIds(op, toLogRpcIds);
     logEdit(op);
     logEdit(op);
   }
   }
 
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -821,6 +821,10 @@ public class FSEditLogLoader {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
       fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
       fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
           removeXAttrOp.xAttrs);
           removeXAttrOp.xAttrs);
+      if (toAddRetryCache) {
+        fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+            removeXAttrOp.rpcCallId);
+      }
       break;
       break;
     }
     }
     default:
     default:

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
       src = p.getSrc();
       xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
       xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+      readRpcIds(in, logVersion);
     }
     }
 
 
     @Override
     @Override
@@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp {
       }
       }
       b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.build().writeDelimitedTo(out);
       b.build().writeDelimitedTo(out);
+      // clientId and callId
+      writeRpcIds(rpcClientId, rpcCallId, out);
     }
     }
 
 
     @Override
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       appendXAttrsToXml(contentHandler, xAttrs);
       appendXAttrsToXml(contentHandler, xAttrs);
+      appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
     }
 
 
     @Override
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
     void fromXml(Stanza st) throws InvalidXmlException {
       src = st.getValue("SRC");
       src = st.getValue("SRC");
       xAttrs = readXAttrsFromXml(st);
       xAttrs = readXAttrsFromXml(st);
+      readRpcIdsFromXml(st);
     }
     }
   }
   }
   
   

+ 47 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -8279,11 +8279,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     nnConf.checkXAttrsConfigFlag();
     nnConf.checkXAttrsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
     FSPermissionChecker pc = getPermissionChecker();
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
-    List<XAttr> filteredXAttrs = null;
     if (!getAll) {
     if (!getAll) {
-      filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
-      if (filteredXAttrs.isEmpty()) {
-        return filteredXAttrs;
+      try {
+        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+      } catch (AccessControlException e) {
+        logAuditEvent(false, "getXAttrs", src);
+        throw e;
       }
       }
     }
     }
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
@@ -8302,15 +8303,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         if (filteredAll == null || filteredAll.isEmpty()) {
         if (filteredAll == null || filteredAll.isEmpty()) {
           return null;
           return null;
         }
         }
-        List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
-        for (XAttr xAttr : filteredXAttrs) {
+        List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+        for (XAttr xAttr : xAttrs) {
+          boolean foundIt = false;
           for (XAttr a : filteredAll) {
           for (XAttr a : filteredAll) {
             if (xAttr.getNameSpace() == a.getNameSpace()
             if (xAttr.getNameSpace() == a.getNameSpace()
                 && xAttr.getName().equals(a.getName())) {
                 && xAttr.getName().equals(a.getName())) {
               toGet.add(a);
               toGet.add(a);
+              foundIt = true;
               break;
               break;
             }
             }
           }
           }
+          if (!foundIt) {
+            throw new IOException(
+                "At least one of the attributes provided was not found.");
+          }
         }
         }
         return toGet;
         return toGet;
       }
       }
@@ -8344,17 +8351,42 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       readUnlock();
       readUnlock();
     }
     }
   }
   }
-  
+
+  /**
+   * Remove an xattr for a file or directory.
+   *
+   * @param src
+   *          - path to remove the xattr from
+   * @param xAttr
+   *          - xAttr to remove
+   * @throws AccessControlException
+   * @throws SafeModeException
+   * @throws UnresolvedLinkException
+   * @throws IOException
+   */
   void removeXAttr(String src, XAttr xAttr) throws IOException {
   void removeXAttr(String src, XAttr xAttr) throws IOException {
-    nnConf.checkXAttrsConfigFlag();
-    HdfsFileStatus resultingStat = null;
-    FSPermissionChecker pc = getPermissionChecker();
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
     try {
     try {
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+      removeXAttrInt(src, xAttr, cacheEntry != null);
+      success = true;
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
       logAuditEvent(false, "removeXAttr", src);
       logAuditEvent(false, "removeXAttr", src);
       throw e;
       throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
     }
     }
+  }
+
+  void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+      throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     writeLock();
@@ -8368,12 +8400,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       xAttrs.add(xAttr);
       xAttrs.add(xAttr);
       List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
       List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
-        getEditLog().logRemoveXAttrs(src, removedXAttrs);
+        getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+      } else {
+        throw new IOException(
+            "No matching attributes found for remove operation");
       }
       }
       resultingStat = getAuditFileInfo(src, false);
       resultingStat = getAuditFileInfo(src, false);
-    } catch (AccessControlException e) {
-      logAuditEvent(false, "removeXAttr", src);
-      throw e;
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }

+ 15 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
 
 
 /**
 /**
  * There are four types of extended attributes <XAttr> defined by the
  * There are four types of extended attributes <XAttr> defined by the
@@ -60,8 +61,20 @@ public class XAttrPermissionFilter {
     throw new AccessControlException("User doesn't have permission for xattr: "
     throw new AccessControlException("User doesn't have permission for xattr: "
         + XAttrHelper.getPrefixName(xAttr));
         + XAttrHelper.getPrefixName(xAttr));
   }
   }
-  
-  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, 
+
+  static void checkPermissionForApi(FSPermissionChecker pc,
+                                    List<XAttr> xAttrs) throws AccessControlException {
+    Preconditions.checkArgument(xAttrs != null);
+    if (xAttrs.isEmpty()) {
+      return;
+    }
+
+    for (XAttr xAttr : xAttrs) {
+      checkPermissionForApi(pc, xAttr);
+    }
+  }
+
+  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
       List<XAttr> xAttrs) {
       List<XAttr> xAttrs) {
     assert xAttrs != null : "xAttrs can not be null";
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {
     if (xAttrs == null || xAttrs.isEmpty()) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java

@@ -25,8 +25,8 @@ public class XAttrNameParam extends StringParam {
   /** Default parameter value. **/
   /** Default parameter value. **/
   public static final String DEFAULT = "";
   public static final String DEFAULT = "";
   
   
-  private static Domain DOMAIN = new Domain(NAME, 
-      Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+  private static Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(".*"));
   
   
   public XAttrNameParam(final String str) {
   public XAttrNameParam(final String str) {
     super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
     super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);

+ 69 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -2653,6 +2653,75 @@ public class TestDFSShell {
     }
     }
   }
   }
 
 
+  /*
+   * 1. Test that CLI throws an exception and returns non-0 when user does
+   * not have permission to read an xattr.
+   * 2. Test that CLI throws an exception and returns non-0 when a non-existent
+   * xattr is requested.
+   */
+  @Test (timeout = 120000)
+  public void testGetFAttrErrors() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    MiniDFSCluster cluster = null;
+    PrintStream bakErr = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+
+      final FileSystem fs = cluster.getFileSystem();
+      final Path p = new Path("/foo");
+      fs.mkdirs(p);
+      bakErr = System.err;
+
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setErr(new PrintStream(out));
+
+      // No permission for "other".
+      fs.setPermission(p, new FsPermission((short) 0700));
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[] {
+            "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+        assertEquals("Returned should be 0", 0, ret);
+        out.reset();
+      }
+
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            int ret = ToolRunner.run(fshell, new String[] {
+                "-getfattr", "-n", "user.a1", "/foo"});
+            String str = out.toString();
+            assertTrue("xattr value was incorrectly returned",
+                str.indexOf("1234") == -1);
+            out.reset();
+            return null;
+          }
+        });
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[]{
+            "-getfattr", "-n", "user.nonexistent", "/foo"});
+        String str = out.toString();
+        assertTrue("xattr value was incorrectly returned",
+          str.indexOf(
+              "getfattr: At least one of the attributes provided was not found")
+               >= 0);
+        out.reset();
+      }
+    } finally {
+      if (bakErr != null) {
+        System.setErr(bakErr);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
   /**
    * Test that the server trash configuration is respected when
    * Test that the server trash configuration is respected when
    * the client configuration is not set.
    * the client configuration is not set.

+ 330 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.FileNotFoundException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
@@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permission.AclEntryType.USER;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 import org.junit.After;
 import org.junit.After;
@@ -261,11 +261,12 @@ public class FSXAttrBaseTest {
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with empty name should fail.");
       Assert.fail("Setting xattr with empty name should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
     } catch (HadoopIllegalArgumentException e) {
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     }
     
     
     // Set xattr with invalid name: "a1"
     // Set xattr with invalid name: "a1"
@@ -274,11 +275,12 @@ public class FSXAttrBaseTest {
           XAttrSetFlag.REPLACE));
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with invalid name prefix or without " +
       Assert.fail("Setting xattr with invalid name prefix or without " +
           "name prefix should fail.");
           "name prefix should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
     } catch (HadoopIllegalArgumentException e) {
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     }
     
     
     // Set xattr without XAttrSetFlag
     // Set xattr without XAttrSetFlag
@@ -341,9 +343,18 @@ public class FSXAttrBaseTest {
   }
   }
   
   
   /**
   /**
-   * Tests for getting xattr
-   * 1. To get xattr which does not exist.
-   * 2. To get multiple xattrs.
+   * getxattr tests. Test that getxattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have search access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only search access to the owning directory and
+   * execute/search access to the actual entity
+   * the caller does not have search access to the owning directory and read
+   * access to the actual entity
    */
    */
   @Test(timeout = 120000)
   @Test(timeout = 120000)
   public void testGetXAttrs() throws Exception {
   public void testGetXAttrs() throws Exception {
@@ -351,21 +362,159 @@ public class FSXAttrBaseTest {
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     
     
-    // XAttr does not exist.
-    byte[] value = fs.getXAttr(path, name3);
-    Assert.assertEquals(value, null);
+    /* An XAttr that was requested does not exist. */
+    try {
+      final byte[] value = fs.getXAttr(path, name3);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "At least one of the attributes provided was not found.", e);
+    }
     
     
-    List<String> names = Lists.newArrayList();
-    names.add(name1);
-    names.add(name2);
-    names.add(name3);
-    Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    /* Throw an exception if an xattr that was requested does not exist. */
+    {
+      final List<String> names = Lists.newArrayList();
+      names.add(name1);
+      names.add(name2);
+      names.add(name3);
+      try {
+        final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
+        Assert.fail("expected IOException");
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains(
+            "At least one of the attributes provided was not found.", e);
+      }
+    }
     
     
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name2);
     fs.removeXAttr(path, name2);
+
+    /* Unknown namespace should throw an exception. */
+    try {
+      final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains
+          ("An XAttr name must be prefixed with user/trusted/security/system, " +
+           "followed by a '.'",
+          e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    }
+
+    fs.setXAttr(path, name1, "1234".getBytes());
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have search access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and search/execute
+     * access to the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and read access to
+     * the actual entity with the extended attribute is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0704));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          final byte[] xattr = userFs.getXAttr(childDir, name1);
+          return null;
+        }
+      });
   }
   }
   
   
   /**
   /**
@@ -402,6 +551,166 @@ public class FSXAttrBaseTest {
     fs.removeXAttr(path, name3);
     fs.removeXAttr(path, name3);
   }
   }
 
 
+  /**
+   * removexattr tests. Test that removexattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have "execute" (scan) access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only execute access to the owning directory and execute
+   * access to the actual entity
+   * the caller does not have execute access to the owning directory and write
+   * access to the actual entity
+   */
+  @Test(timeout = 120000)
+  public void testRemoveXAttrPermissions() throws Exception {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
+
+    try {
+      fs.removeXAttr(path, name2);
+      fs.removeXAttr(path, name2);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No matching attributes found", e);
+    }
+
+    /* Unknown namespace should throw an exception. */
+    final String expectedExceptionString = "An XAttr name must be prefixed " +
+        "with user/trusted/security/system, followed by a '.'";
+    try {
+      fs.removeXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    } catch (HadoopIllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    } finally {
+      fs.removeXAttr(path, "trusted.foo");
+    }
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have "execute" (scan) access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and scan access to
+     * the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and write access to
+     * the actual entity with extended attributes is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0706));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          userFs.removeXAttr(childDir, name1);
+          return null;
+        }
+      });
+  }
+
   @Test(timeout = 120000)
   @Test(timeout = 120000)
   public void testRenameFileWithXAttr() throws Exception {
   public void testRenameFileWithXAttr() throws Exception {
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java

@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
     
     
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     
     Map<CacheEntry, CacheEntry> oldEntries = 
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
         new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
       CacheEntry entry = iter.next();

+ 52 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

@@ -160,7 +160,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     
     Map<CacheEntry, CacheEntry> oldEntries = 
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
         new HashMap<CacheEntry, CacheEntry>();
@@ -181,7 +181,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
       CacheEntry entry = iter.next();
@@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA {
     }
     }
   }
   }
 
 
+  /** removeXAttr */
+  class RemoveXAttrOp extends AtMostOnceOp {
+    private final String src;
+
+    RemoveXAttrOp(DFSClient client, String src) {
+      super("removeXAttr", client);
+      this.src = src;
+    }
+
+    @Override
+    void prepare() throws Exception {
+      Path p = new Path(src);
+      if (!dfs.exists(p)) {
+        DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+        client.setXAttr(src, "user.key", "value".getBytes(),
+          EnumSet.of(XAttrSetFlag.CREATE));
+      }
+    }
+
+    @Override
+    void invoke() throws Exception {
+      client.removeXAttr(src, "user.key");
+    }
+
+    @Override
+    boolean checkNamenodeBeforeReturn() throws Exception {
+      for (int i = 0; i < CHECKTIMES; i++) {
+        Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+        Set<String> keySet = iter.keySet();
+        if (!keySet.contains("user.key")) {
+          return true;
+        }
+        Thread.sleep(1000);
+      }
+      return false;
+    }
+
+    @Override
+    Object getResult() {
+      return null;
+    }
+  }
+
   @Test (timeout=60000)
   @Test (timeout=60000)
   public void testCreateSnapshot() throws Exception {
   public void testCreateSnapshot() throws Exception {
     final DFSClient client = genClientWithDummyHandler();
     final DFSClient client = genClientWithDummyHandler();
@@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA {
     testClientRetryWithFailover(op);
     testClientRetryWithFailover(op);
   }
   }
 
 
+  @Test (timeout=60000)
+  public void testRemoveXAttr() throws Exception {
+    DFSClient client = genClientWithDummyHandler();
+    AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr");
+    testClientRetryWithFailover(op);
+  }
+
   /**
   /**
    * When NN failover happens, if the client did not receive the response and
    * When NN failover happens, if the client did not receive the response and
    * send a retry request to the other NN, the same response should be recieved
    * send a retry request to the other NN, the same response should be recieved

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -355,12 +355,6 @@ public class TestParam {
   public void testXAttrNameParam() {
   public void testXAttrNameParam() {
     final XAttrNameParam p = new XAttrNameParam("user.a1");
     final XAttrNameParam p = new XAttrNameParam("user.a1");
     Assert.assertEquals(p.getXAttrName(), "user.a1");
     Assert.assertEquals(p.getXAttrName(), "user.a1");
-    try {
-      new XAttrNameParam("a1");
-      Assert.fail();
-    } catch (IllegalArgumentException e) {
-      LOG.info("EXPECTED: " + e);
-    }
   }
   }
   
   
   @Test
   @Test

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored


+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -986,6 +986,8 @@
         <NAMESPACE>USER</NAMESPACE>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
         <NAME>a2</NAME>
       </XATTR>
       </XATTR>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>82</RPC_CALLID>
     </DATA>
     </DATA>
   </RECORD>
   </RECORD>
   <RECORD>
   <RECORD>

+ 12 - 0
hadoop-yarn-project/CHANGES.txt

@@ -62,6 +62,9 @@ Release 2.6.0 - UNRELEASED
     YARN-2295. Refactored DistributedShell to use public APIs of protocol records.
     YARN-2295. Refactored DistributedShell to use public APIs of protocol records.
     (Li Lu via jianhe)
     (Li Lu via jianhe)
 
 
+    YARN-1342. Recover container tokens upon nodemanager restart. (Jason Lowe via
+    devaraj)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -91,6 +94,12 @@ Release 2.6.0 - UNRELEASED
     YARN-2273. NPE in ContinuousScheduling thread when we lose a node. 
     YARN-2273. NPE in ContinuousScheduling thread when we lose a node. 
     (Wei Yan via kasha)
     (Wei Yan via kasha)
 
 
+    YARN-2313. Livelock can occur in FairScheduler when there are lots of
+    running apps (Tsuyoshi Ozawa via Sandy Ryza)
+
+    YARN-2147. client lacks delegation token exception details when
+    application submit fails (Chen He via jlowe)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -297,6 +306,9 @@ Release 2.5.0 - UNRELEASED
 	YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and 
 	YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and 
 	caused a task timeout for 30mins. (Sunil G via mayank)
 	caused a task timeout for 30mins. (Sunil G via mayank)
 
 
+    YARN-2300. Improved the documentation of the sample requests for RM REST API -
+    submitting an app. (Varun Vasudev via zjshen)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES 
   BUG FIXES 

+ 6 - 0
hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml

@@ -194,6 +194,12 @@
     <Field name="scheduleAsynchronously" />
     <Field name="scheduleAsynchronously" />
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
   </Match>
   </Match>
+  <!-- Inconsistent sync warning - updateInterval is only initialized once and never changed -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler" />
+    <Field name="updateInterval" />
+    <Bug pattern="IS2_INCONSISTENT_SYNC" />
+  </Match>
   <!-- Inconsistent sync warning - numRetries is only initialized once and never changed -->
   <!-- Inconsistent sync warning - numRetries is only initialized once and never changed -->
   <Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore" />
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore" />

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java

@@ -43,7 +43,7 @@ public class BaseContainerTokenSecretManager extends
   private static Log LOG = LogFactory
   private static Log LOG = LogFactory
     .getLog(BaseContainerTokenSecretManager.class);
     .getLog(BaseContainerTokenSecretManager.class);
 
 
-  private int serialNo = new SecureRandom().nextInt();
+  protected int serialNo = new SecureRandom().nextInt();
 
 
   protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   protected final Lock readLock = readWriteLock.readLock();
   protected final Lock readLock = readWriteLock.readLock();

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java

@@ -173,8 +173,8 @@ public class NodeManager extends CompositeService
       NMContainerTokenSecretManager containerTokenSecretManager)
       NMContainerTokenSecretManager containerTokenSecretManager)
           throws IOException {
           throws IOException {
     if (nmStore.canRecover()) {
     if (nmStore.canRecover()) {
-      nmTokenSecretManager.recover(nmStore.loadNMTokenState());
-      // TODO: recover containerTokenSecretManager
+      nmTokenSecretManager.recover();
+      containerTokenSecretManager.recover();
     }
     }
   }
   }
 
 
@@ -190,7 +190,7 @@ public class NodeManager extends CompositeService
     initAndStartRecoveryStore(conf);
     initAndStartRecoveryStore(conf);
 
 
     NMContainerTokenSecretManager containerTokenSecretManager =
     NMContainerTokenSecretManager containerTokenSecretManager =
-        new NMContainerTokenSecretManager(conf);
+        new NMContainerTokenSecretManager(conf, nmStore);
 
 
     NMTokenSecretManagerInNM nmTokenSecretManager =
     NMTokenSecretManagerInNM nmTokenSecretManager =
         new NMTokenSecretManagerInNM(nmStore);
         new NMTokenSecretManagerInNM(nmStore);

+ 104 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
@@ -90,6 +91,12 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
       NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
   private static final String NM_TOKENS_PREV_MASTER_KEY =
   private static final String NM_TOKENS_PREV_MASTER_KEY =
       NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
       NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
+  private static final String CONTAINER_TOKENS_KEY_PREFIX =
+      "ContainerTokens/";
+  private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY =
+      CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
+  private static final String CONTAINER_TOKENS_PREV_MASTER_KEY =
+      CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
 
 
   private DB db;
   private DB db;
 
 
@@ -141,7 +148,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
             key.substring(0, userEndPos+1)));
             key.substring(0, userEndPos+1)));
       }
       }
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     } finally {
     } finally {
       if (iter != null) {
       if (iter != null) {
         iter.close();
         iter.close();
@@ -260,7 +267,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.put(bytes(key), proto.toByteArray());
       db.put(bytes(key), proto.toByteArray());
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -283,7 +290,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         batch.close();
         batch.close();
       }
       }
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -306,7 +313,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         batch.close();
         batch.close();
       }
       }
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -355,7 +362,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
             DeletionServiceDeleteTaskProto.parseFrom(entry.getValue()));
             DeletionServiceDeleteTaskProto.parseFrom(entry.getValue()));
       }
       }
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     } finally {
     } finally {
       if (iter != null) {
       if (iter != null) {
         iter.close();
         iter.close();
@@ -371,7 +378,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.put(bytes(key), taskProto.toByteArray());
       db.put(bytes(key), taskProto.toByteArray());
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -381,14 +388,14 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.delete(bytes(key));
       db.delete(bytes(key));
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
 
 
   @Override
   @Override
-  public RecoveredNMTokenState loadNMTokenState() throws IOException {
-    RecoveredNMTokenState state = new RecoveredNMTokenState();
+  public RecoveredNMTokensState loadNMTokensState() throws IOException {
+    RecoveredNMTokensState state = new RecoveredNMTokensState();
     state.applicationMasterKeys =
     state.applicationMasterKeys =
         new HashMap<ApplicationAttemptId, MasterKey>();
         new HashMap<ApplicationAttemptId, MasterKey>();
     LeveldbIterator iter = null;
     LeveldbIterator iter = null;
@@ -420,7 +427,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         }
         }
       }
       }
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     } finally {
     } finally {
       if (iter != null) {
       if (iter != null) {
         iter.close();
         iter.close();
@@ -454,7 +461,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.delete(bytes(key));
       db.delete(bytes(key));
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -468,7 +475,91 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.put(bytes(dbKey), pb.getProto().toByteArray());
       db.put(bytes(dbKey), pb.getProto().toByteArray());
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
+    }
+  }
+
+
+  @Override
+  public RecoveredContainerTokensState loadContainerTokensState()
+      throws IOException {
+    RecoveredContainerTokensState state = new RecoveredContainerTokensState();
+    state.activeTokens = new HashMap<ContainerId, Long>();
+    LeveldbIterator iter = null;
+    try {
+      iter = new LeveldbIterator(db);
+      iter.seek(bytes(CONTAINER_TOKENS_KEY_PREFIX));
+      final int containerTokensKeyPrefixLength =
+          CONTAINER_TOKENS_KEY_PREFIX.length();
+      while (iter.hasNext()) {
+        Entry<byte[], byte[]> entry = iter.next();
+        String fullKey = asString(entry.getKey());
+        if (!fullKey.startsWith(CONTAINER_TOKENS_KEY_PREFIX)) {
+          break;
+        }
+        String key = fullKey.substring(containerTokensKeyPrefixLength);
+        if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) {
+          state.currentMasterKey = parseMasterKey(entry.getValue());
+        } else if (key.equals(PREV_MASTER_KEY_SUFFIX)) {
+          state.previousMasterKey = parseMasterKey(entry.getValue());
+        } else if (key.startsWith(ConverterUtils.CONTAINER_PREFIX)) {
+          loadContainerToken(state, fullKey, key, entry.getValue());
+        }
+      }
+    } catch (DBException e) {
+      throw new IOException(e);
+    } finally {
+      if (iter != null) {
+        iter.close();
+      }
+    }
+    return state;
+  }
+
+  private static void loadContainerToken(RecoveredContainerTokensState state,
+      String key, String containerIdStr, byte[] value) throws IOException {
+    ContainerId containerId;
+    Long expTime;
+    try {
+      containerId = ConverterUtils.toContainerId(containerIdStr);
+      expTime = Long.parseLong(asString(value));
+    } catch (IllegalArgumentException e) {
+      throw new IOException("Bad container token state for " + key, e);
+    }
+    state.activeTokens.put(containerId, expTime);
+  }
+
+  @Override
+  public void storeContainerTokenCurrentMasterKey(MasterKey key)
+      throws IOException {
+    storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key);
+  }
+
+  @Override
+  public void storeContainerTokenPreviousMasterKey(MasterKey key)
+      throws IOException {
+    storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key);
+  }
+
+  @Override
+  public void storeContainerToken(ContainerId containerId, Long expTime)
+      throws IOException {
+    String key = CONTAINER_TOKENS_KEY_PREFIX + containerId;
+    try {
+      db.put(bytes(key), bytes(expTime.toString()));
+    } catch (DBException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void removeContainerToken(ContainerId containerId)
+      throws IOException {
+    String key = CONTAINER_TOKENS_KEY_PREFIX + containerId;
+    try {
+      db.delete(bytes(key));
+    } catch (DBException e) {
+      throw new IOException(e);
     }
     }
   }
   }
 
 
@@ -554,7 +645,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     try {
     try {
       db.put(bytes(key), data);
       db.put(bytes(key), data);
     } catch (DBException e) {
     } catch (DBException e) {
-      throw new IOException(e.getMessage(), e);
+      throw new IOException(e);
     }
     }
   }
   }
 
 

+ 29 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
@@ -80,7 +81,7 @@ public class NMNullStateStoreService extends NMStateStoreService {
   }
   }
 
 
   @Override
   @Override
-  public RecoveredNMTokenState loadNMTokenState() throws IOException {
+  public RecoveredNMTokensState loadNMTokensState() throws IOException {
     throw new UnsupportedOperationException(
     throw new UnsupportedOperationException(
         "Recovery not supported by this state store");
         "Recovery not supported by this state store");
   }
   }
@@ -105,6 +106,33 @@ public class NMNullStateStoreService extends NMStateStoreService {
       throws IOException {
       throws IOException {
   }
   }
 
 
+  @Override
+  public RecoveredContainerTokensState loadContainerTokensState()
+      throws IOException {
+    throw new UnsupportedOperationException(
+        "Recovery not supported by this state store");
+  }
+
+  @Override
+  public void storeContainerTokenCurrentMasterKey(MasterKey key)
+      throws IOException {
+  }
+
+  @Override
+  public void storeContainerTokenPreviousMasterKey(MasterKey key)
+      throws IOException {
+  }
+
+  @Override
+  public void storeContainerToken(ContainerId containerId,
+      Long expirationTime) throws IOException {
+  }
+
+  @Override
+  public void removeContainerToken(ContainerId containerId)
+      throws IOException {
+  }
+
   @Override
   @Override
   protected void initStorage(Configuration conf) throws IOException {
   protected void initStorage(Configuration conf) throws IOException {
   }
   }

+ 38 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
@@ -102,7 +103,7 @@ public abstract class NMStateStoreService extends AbstractService {
     }
     }
   }
   }
 
 
-  public static class RecoveredNMTokenState {
+  public static class RecoveredNMTokensState {
     MasterKey currentMasterKey;
     MasterKey currentMasterKey;
     MasterKey previousMasterKey;
     MasterKey previousMasterKey;
     Map<ApplicationAttemptId, MasterKey> applicationMasterKeys;
     Map<ApplicationAttemptId, MasterKey> applicationMasterKeys;
@@ -120,6 +121,24 @@ public abstract class NMStateStoreService extends AbstractService {
     }
     }
   }
   }
 
 
+  public static class RecoveredContainerTokensState {
+    MasterKey currentMasterKey;
+    MasterKey previousMasterKey;
+    Map<ContainerId, Long> activeTokens;
+
+    public MasterKey getCurrentMasterKey() {
+      return currentMasterKey;
+    }
+
+    public MasterKey getPreviousMasterKey() {
+      return previousMasterKey;
+    }
+
+    public Map<ContainerId, Long> getActiveTokens() {
+      return activeTokens;
+    }
+  }
+
   /** Initialize the state storage */
   /** Initialize the state storage */
   @Override
   @Override
   public void serviceInit(Configuration conf) throws IOException {
   public void serviceInit(Configuration conf) throws IOException {
@@ -193,7 +212,8 @@ public abstract class NMStateStoreService extends AbstractService {
   public abstract void removeDeletionTask(int taskId) throws IOException;
   public abstract void removeDeletionTask(int taskId) throws IOException;
 
 
 
 
-  public abstract RecoveredNMTokenState loadNMTokenState() throws IOException;
+  public abstract RecoveredNMTokensState loadNMTokensState()
+      throws IOException;
 
 
   public abstract void storeNMTokenCurrentMasterKey(MasterKey key)
   public abstract void storeNMTokenCurrentMasterKey(MasterKey key)
       throws IOException;
       throws IOException;
@@ -208,6 +228,22 @@ public abstract class NMStateStoreService extends AbstractService {
       ApplicationAttemptId attempt) throws IOException;
       ApplicationAttemptId attempt) throws IOException;
 
 
 
 
+  public abstract RecoveredContainerTokensState loadContainerTokensState()
+      throws IOException;
+
+  public abstract void storeContainerTokenCurrentMasterKey(MasterKey key)
+      throws IOException;
+
+  public abstract void storeContainerTokenPreviousMasterKey(MasterKey key)
+      throws IOException;
+
+  public abstract void storeContainerToken(ContainerId containerId,
+      Long expirationTime) throws IOException;
+
+  public abstract void removeContainerToken(ContainerId containerId)
+      throws IOException;
+
+
   protected abstract void initStorage(Configuration conf) throws IOException;
   protected abstract void initStorage(Configuration conf) throws IOException;
 
 
   protected abstract void startStorage() throws IOException;
   protected abstract void startStorage() throws IOException;

+ 88 - 17
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.yarn.server.nodemanager.security;
 package org.apache.hadoop.yarn.server.nodemanager.security;
 
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
@@ -33,6 +34,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState;
 import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 
 
@@ -49,14 +53,74 @@ public class NMContainerTokenSecretManager extends
   
   
   private MasterKeyData previousMasterKey;
   private MasterKeyData previousMasterKey;
   private final TreeMap<Long, List<ContainerId>> recentlyStartedContainerTracker;
   private final TreeMap<Long, List<ContainerId>> recentlyStartedContainerTracker;
-
+  private final NMStateStoreService stateStore;
   
   
   private String nodeHostAddr;
   private String nodeHostAddr;
   
   
   public NMContainerTokenSecretManager(Configuration conf) {
   public NMContainerTokenSecretManager(Configuration conf) {
+    this(conf, new NMNullStateStoreService());
+  }
+
+  public NMContainerTokenSecretManager(Configuration conf,
+      NMStateStoreService stateStore) {
     super(conf);
     super(conf);
     recentlyStartedContainerTracker =
     recentlyStartedContainerTracker =
         new TreeMap<Long, List<ContainerId>>();
         new TreeMap<Long, List<ContainerId>>();
+    this.stateStore = stateStore;
+  }
+
+  public synchronized void recover()
+      throws IOException {
+    RecoveredContainerTokensState state =
+        stateStore.loadContainerTokensState();
+    MasterKey key = state.getCurrentMasterKey();
+    if (key != null) {
+      super.currentMasterKey =
+          new MasterKeyData(key, createSecretKey(key.getBytes().array()));
+    }
+
+    key = state.getPreviousMasterKey();
+    if (key != null) {
+      previousMasterKey =
+          new MasterKeyData(key, createSecretKey(key.getBytes().array()));
+    }
+
+    // restore the serial number from the current master key
+    if (super.currentMasterKey != null) {
+      super.serialNo = super.currentMasterKey.getMasterKey().getKeyId() + 1;
+    }
+
+    for (Entry<ContainerId, Long> entry : state.getActiveTokens().entrySet()) {
+      ContainerId containerId = entry.getKey();
+      Long expTime = entry.getValue();
+      List<ContainerId> containerList =
+          recentlyStartedContainerTracker.get(expTime);
+      if (containerList == null) {
+        containerList = new ArrayList<ContainerId>();
+        recentlyStartedContainerTracker.put(expTime, containerList);
+      }
+      if (!containerList.contains(containerId)) {
+        containerList.add(containerId);
+      }
+    }
+  }
+
+  private void updateCurrentMasterKey(MasterKeyData key) {
+    super.currentMasterKey = key;
+    try {
+      stateStore.storeContainerTokenCurrentMasterKey(key.getMasterKey());
+    } catch (IOException e) {
+      LOG.error("Unable to update current master key in state store", e);
+    }
+  }
+
+  private void updatePreviousMasterKey(MasterKeyData key) {
+    previousMasterKey = key;
+    try {
+      stateStore.storeContainerTokenPreviousMasterKey(key.getMasterKey());
+    } catch (IOException e) {
+      LOG.error("Unable to update previous master key in state store", e);
+    }
   }
   }
 
 
   /**
   /**
@@ -68,21 +132,16 @@ public class NMContainerTokenSecretManager extends
    */
    */
   @Private
   @Private
   public synchronized void setMasterKey(MasterKey masterKeyRecord) {
   public synchronized void setMasterKey(MasterKey masterKeyRecord) {
-    LOG.info("Rolling master-key for container-tokens, got key with id "
-        + masterKeyRecord.getKeyId());
-    if (super.currentMasterKey == null) {
-      super.currentMasterKey =
-          new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord
-            .getBytes().array()));
-    } else {
-      if (super.currentMasterKey.getMasterKey().getKeyId() != masterKeyRecord
-          .getKeyId()) {
-        // Update keys only if the key has changed.
-        this.previousMasterKey = super.currentMasterKey;
-        super.currentMasterKey =
-            new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord
-              .getBytes().array()));
+    // Update keys only if the key has changed.
+    if (super.currentMasterKey == null || super.currentMasterKey.getMasterKey()
+          .getKeyId() != masterKeyRecord.getKeyId()) {
+      LOG.info("Rolling master-key for container-tokens, got key with id "
+          + masterKeyRecord.getKeyId());
+      if (super.currentMasterKey != null) {
+        updatePreviousMasterKey(super.currentMasterKey);
       }
       }
+      updateCurrentMasterKey(new MasterKeyData(masterKeyRecord,
+          createSecretKey(masterKeyRecord.getBytes().array())));
     }
     }
   }
   }
 
 
@@ -137,14 +196,19 @@ public class NMContainerTokenSecretManager extends
 
 
     removeAnyContainerTokenIfExpired();
     removeAnyContainerTokenIfExpired();
     
     
+    ContainerId containerId = tokenId.getContainerID();
     Long expTime = tokenId.getExpiryTimeStamp();
     Long expTime = tokenId.getExpiryTimeStamp();
     // We might have multiple containers with same expiration time.
     // We might have multiple containers with same expiration time.
     if (!recentlyStartedContainerTracker.containsKey(expTime)) {
     if (!recentlyStartedContainerTracker.containsKey(expTime)) {
       recentlyStartedContainerTracker
       recentlyStartedContainerTracker
         .put(expTime, new ArrayList<ContainerId>());
         .put(expTime, new ArrayList<ContainerId>());
     }
     }
-    recentlyStartedContainerTracker.get(expTime).add(tokenId.getContainerID());
-
+    recentlyStartedContainerTracker.get(expTime).add(containerId);
+    try {
+      stateStore.storeContainerToken(containerId, expTime);
+    } catch (IOException e) {
+      LOG.error("Unable to store token for container " + containerId, e);
+    }
   }
   }
 
 
   protected synchronized void removeAnyContainerTokenIfExpired() {
   protected synchronized void removeAnyContainerTokenIfExpired() {
@@ -155,6 +219,13 @@ public class NMContainerTokenSecretManager extends
     while (containersI.hasNext()) {
     while (containersI.hasNext()) {
       Entry<Long, List<ContainerId>> containerEntry = containersI.next();
       Entry<Long, List<ContainerId>> containerEntry = containersI.next();
       if (containerEntry.getKey() < currTime) {
       if (containerEntry.getKey() < currTime) {
+        for (ContainerId container : containerEntry.getValue()) {
+          try {
+            stateStore.removeContainerToken(container);
+          } catch (IOException e) {
+            LOG.error("Unable to remove token for container " + container, e);
+          }
+        }
         containersI.remove();
         containersI.remove();
       } else {
       } else {
         break;
         break;

+ 3 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java

@@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokenState;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState;
 import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 
 
@@ -64,8 +64,9 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
     this.stateStore = stateStore;
     this.stateStore = stateStore;
   }
   }
   
   
-  public synchronized void recover(RecoveredNMTokenState state)
+  public synchronized void recover()
       throws IOException {
       throws IOException {
+    RecoveredNMTokensState state = stateStore.loadNMTokensState();
     MasterKey key = state.getCurrentMasterKey();
     MasterKey key = state.getCurrentMasterKey();
     if (key != null) {
     if (key != null) {
       super.currentMasterKey =
       super.currentMasterKey =

+ 50 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
@@ -36,7 +37,8 @@ import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
 public class NMMemoryStateStoreService extends NMStateStoreService {
 public class NMMemoryStateStoreService extends NMStateStoreService {
   private Map<TrackerKey, TrackerState> trackerStates;
   private Map<TrackerKey, TrackerState> trackerStates;
   private Map<Integer, DeletionServiceDeleteTaskProto> deleteTasks;
   private Map<Integer, DeletionServiceDeleteTaskProto> deleteTasks;
-  private RecoveredNMTokenState nmTokenState;
+  private RecoveredNMTokensState nmTokenState;
+  private RecoveredContainerTokensState containerTokenState;
 
 
   public NMMemoryStateStoreService() {
   public NMMemoryStateStoreService() {
     super(NMMemoryStateStoreService.class.getName());
     super(NMMemoryStateStoreService.class.getName());
@@ -117,12 +119,13 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
 
 
   @Override
   @Override
   protected void initStorage(Configuration conf) {
   protected void initStorage(Configuration conf) {
-    nmTokenState = new RecoveredNMTokenState();
+    nmTokenState = new RecoveredNMTokensState();
     nmTokenState.applicationMasterKeys =
     nmTokenState.applicationMasterKeys =
         new HashMap<ApplicationAttemptId, MasterKey>();
         new HashMap<ApplicationAttemptId, MasterKey>();
+    containerTokenState = new RecoveredContainerTokensState();
+    containerTokenState.activeTokens = new HashMap<ContainerId, Long>();
     trackerStates = new HashMap<TrackerKey, TrackerState>();
     trackerStates = new HashMap<TrackerKey, TrackerState>();
     deleteTasks = new HashMap<Integer, DeletionServiceDeleteTaskProto>();
     deleteTasks = new HashMap<Integer, DeletionServiceDeleteTaskProto>();
-
   }
   }
 
 
   @Override
   @Override
@@ -157,9 +160,9 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
 
 
 
 
   @Override
   @Override
-  public RecoveredNMTokenState loadNMTokenState() throws IOException {
+  public RecoveredNMTokensState loadNMTokensState() throws IOException {
     // return a copy so caller can't modify our state
     // return a copy so caller can't modify our state
-    RecoveredNMTokenState result = new RecoveredNMTokenState();
+    RecoveredNMTokensState result = new RecoveredNMTokensState();
     result.currentMasterKey = nmTokenState.currentMasterKey;
     result.currentMasterKey = nmTokenState.currentMasterKey;
     result.previousMasterKey = nmTokenState.previousMasterKey;
     result.previousMasterKey = nmTokenState.previousMasterKey;
     result.applicationMasterKeys =
     result.applicationMasterKeys =
@@ -197,6 +200,48 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
   }
   }
 
 
 
 
+  @Override
+  public RecoveredContainerTokensState loadContainerTokensState()
+      throws IOException {
+    // return a copy so caller can't modify our state
+    RecoveredContainerTokensState result =
+        new RecoveredContainerTokensState();
+    result.currentMasterKey = containerTokenState.currentMasterKey;
+    result.previousMasterKey = containerTokenState.previousMasterKey;
+    result.activeTokens =
+        new HashMap<ContainerId, Long>(containerTokenState.activeTokens);
+    return result;
+  }
+
+  @Override
+  public void storeContainerTokenCurrentMasterKey(MasterKey key)
+      throws IOException {
+    MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
+    containerTokenState.currentMasterKey =
+        new MasterKeyPBImpl(keypb.getProto());
+  }
+
+  @Override
+  public void storeContainerTokenPreviousMasterKey(MasterKey key)
+      throws IOException {
+    MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
+    containerTokenState.previousMasterKey =
+        new MasterKeyPBImpl(keypb.getProto());
+  }
+
+  @Override
+  public void storeContainerToken(ContainerId containerId,
+      Long expirationTime) throws IOException {
+    containerTokenState.activeTokens.put(containerId, expirationTime);
+  }
+
+  @Override
+  public void removeContainerToken(ContainerId containerId)
+      throws IOException {
+    containerTokenState.activeTokens.remove(containerId);
+  }
+
+
   private static class TrackerState {
   private static class TrackerState {
     Map<Path, LocalResourceProto> inProgressMap =
     Map<Path, LocalResourceProto> inProgressMap =
         new HashMap<Path, LocalResourceProto>();
         new HashMap<Path, LocalResourceProto>();

+ 90 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java

@@ -27,11 +27,13 @@ import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Map;
 import java.util.Map;
 
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -42,12 +44,15 @@ import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Deletion
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokenState;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion;
+import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -502,7 +507,7 @@ public class TestNMLeveldbStateStoreService {
   @Test
   @Test
   public void testNMTokenStorage() throws IOException {
   public void testNMTokenStorage() throws IOException {
     // test empty when no state
     // test empty when no state
-    RecoveredNMTokenState state = stateStore.loadNMTokenState();
+    RecoveredNMTokensState state = stateStore.loadNMTokensState();
     assertNull(state.getCurrentMasterKey());
     assertNull(state.getCurrentMasterKey());
     assertNull(state.getPreviousMasterKey());
     assertNull(state.getPreviousMasterKey());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
@@ -512,7 +517,7 @@ public class TestNMLeveldbStateStoreService {
     MasterKey currentKey = secretMgr.generateKey();
     MasterKey currentKey = secretMgr.generateKey();
     stateStore.storeNMTokenCurrentMasterKey(currentKey);
     stateStore.storeNMTokenCurrentMasterKey(currentKey);
     restartStateStore();
     restartStateStore();
-    state = stateStore.loadNMTokenState();
+    state = stateStore.loadNMTokensState();
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertNull(state.getPreviousMasterKey());
     assertNull(state.getPreviousMasterKey());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
@@ -521,7 +526,7 @@ public class TestNMLeveldbStateStoreService {
     MasterKey prevKey = secretMgr.generateKey();
     MasterKey prevKey = secretMgr.generateKey();
     stateStore.storeNMTokenPreviousMasterKey(prevKey);
     stateStore.storeNMTokenPreviousMasterKey(prevKey);
     restartStateStore();
     restartStateStore();
-    state = stateStore.loadNMTokenState();
+    state = stateStore.loadNMTokensState();
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
     assertTrue(state.getApplicationMasterKeys().isEmpty());
@@ -536,7 +541,7 @@ public class TestNMLeveldbStateStoreService {
     MasterKey attemptKey2 = secretMgr.generateKey();
     MasterKey attemptKey2 = secretMgr.generateKey();
     stateStore.storeNMTokenApplicationMasterKey(attempt2, attemptKey2);
     stateStore.storeNMTokenApplicationMasterKey(attempt2, attemptKey2);
     restartStateStore();
     restartStateStore();
-    state = stateStore.loadNMTokenState();
+    state = stateStore.loadNMTokensState();
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     Map<ApplicationAttemptId, MasterKey> loadedAppKeys =
     Map<ApplicationAttemptId, MasterKey> loadedAppKeys =
@@ -558,7 +563,7 @@ public class TestNMLeveldbStateStoreService {
     currentKey = secretMgr.generateKey();
     currentKey = secretMgr.generateKey();
     stateStore.storeNMTokenCurrentMasterKey(currentKey);
     stateStore.storeNMTokenCurrentMasterKey(currentKey);
     restartStateStore();
     restartStateStore();
-    state = stateStore.loadNMTokenState();
+    state = stateStore.loadNMTokensState();
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(currentKey, state.getCurrentMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     assertEquals(prevKey, state.getPreviousMasterKey());
     loadedAppKeys = state.getApplicationMasterKeys();
     loadedAppKeys = state.getApplicationMasterKeys();
@@ -568,10 +573,89 @@ public class TestNMLeveldbStateStoreService {
     assertEquals(attemptKey3, loadedAppKeys.get(attempt3));
     assertEquals(attemptKey3, loadedAppKeys.get(attempt3));
   }
   }
 
 
+  @Test
+  public void testContainerTokenStorage() throws IOException {
+    // test empty when no state
+    RecoveredContainerTokensState state =
+        stateStore.loadContainerTokensState();
+    assertNull(state.getCurrentMasterKey());
+    assertNull(state.getPreviousMasterKey());
+    assertTrue(state.getActiveTokens().isEmpty());
+
+    // store a master key and verify recovered
+    ContainerTokenKeyGeneratorForTest keygen =
+        new ContainerTokenKeyGeneratorForTest(new YarnConfiguration());
+    MasterKey currentKey = keygen.generateKey();
+    stateStore.storeContainerTokenCurrentMasterKey(currentKey);
+    restartStateStore();
+    state = stateStore.loadContainerTokensState();
+    assertEquals(currentKey, state.getCurrentMasterKey());
+    assertNull(state.getPreviousMasterKey());
+    assertTrue(state.getActiveTokens().isEmpty());
+
+    // store a previous key and verify recovered
+    MasterKey prevKey = keygen.generateKey();
+    stateStore.storeContainerTokenPreviousMasterKey(prevKey);
+    restartStateStore();
+    state = stateStore.loadContainerTokensState();
+    assertEquals(currentKey, state.getCurrentMasterKey());
+    assertEquals(prevKey, state.getPreviousMasterKey());
+    assertTrue(state.getActiveTokens().isEmpty());
+
+    // store a few container tokens and verify recovered
+    ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1);
+    Long expTime1 = 1234567890L;
+    ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2);
+    Long expTime2 = 9876543210L;
+    stateStore.storeContainerToken(cid1, expTime1);
+    stateStore.storeContainerToken(cid2, expTime2);
+    restartStateStore();
+    state = stateStore.loadContainerTokensState();
+    assertEquals(currentKey, state.getCurrentMasterKey());
+    assertEquals(prevKey, state.getPreviousMasterKey());
+    Map<ContainerId, Long> loadedActiveTokens =
+        state.getActiveTokens();
+    assertEquals(2, loadedActiveTokens.size());
+    assertEquals(expTime1, loadedActiveTokens.get(cid1));
+    assertEquals(expTime2, loadedActiveTokens.get(cid2));
+
+    // add/update/remove tokens and verify recovered
+    ContainerId cid3 = BuilderUtils.newContainerId(3, 3, 3, 3);
+    Long expTime3 = 135798642L;
+    stateStore.storeContainerToken(cid3, expTime3);
+    stateStore.removeContainerToken(cid1);
+    expTime2 += 246897531L;
+    stateStore.storeContainerToken(cid2, expTime2);
+    prevKey = currentKey;
+    stateStore.storeContainerTokenPreviousMasterKey(prevKey);
+    currentKey = keygen.generateKey();
+    stateStore.storeContainerTokenCurrentMasterKey(currentKey);
+    restartStateStore();
+    state = stateStore.loadContainerTokensState();
+    assertEquals(currentKey, state.getCurrentMasterKey());
+    assertEquals(prevKey, state.getPreviousMasterKey());
+    loadedActiveTokens = state.getActiveTokens();
+    assertEquals(2, loadedActiveTokens.size());
+    assertNull(loadedActiveTokens.get(cid1));
+    assertEquals(expTime2, loadedActiveTokens.get(cid2));
+    assertEquals(expTime3, loadedActiveTokens.get(cid3));
+  }
+
   private static class NMTokenSecretManagerForTest extends
   private static class NMTokenSecretManagerForTest extends
       BaseNMTokenSecretManager {
       BaseNMTokenSecretManager {
     public MasterKey generateKey() {
     public MasterKey generateKey() {
       return createNewMasterKey().getMasterKey();
       return createNewMasterKey().getMasterKey();
     }
     }
   }
   }
+
+  private static class ContainerTokenKeyGeneratorForTest extends
+      BaseContainerTokenSecretManager {
+    public ContainerTokenKeyGeneratorForTest(Configuration conf) {
+      super(conf);
+    }
+
+    public MasterKey generateKey() {
+      return createNewMasterKey().getMasterKey();
+    }
+  }
 }
 }

+ 144 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java

@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
+import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Test;
+
+public class TestNMContainerTokenSecretManager {
+
+  @Test
+  public void testRecovery() throws IOException {
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
+    final NodeId nodeId = NodeId.newInstance("somehost", 1234);
+    final ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1);
+    final ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2);
+    ContainerTokenKeyGeneratorForTest keygen =
+        new ContainerTokenKeyGeneratorForTest(conf);
+    NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
+    stateStore.init(conf);
+    stateStore.start();
+    NMContainerTokenSecretManager secretMgr =
+        new NMContainerTokenSecretManager(conf, stateStore);
+    secretMgr.setNodeId(nodeId);
+    MasterKey currentKey = keygen.generateKey();
+    secretMgr.setMasterKey(currentKey);
+    ContainerTokenIdentifier tokenId1 =
+        createContainerTokenId(cid1, nodeId, "user1", secretMgr);
+    ContainerTokenIdentifier tokenId2 =
+        createContainerTokenId(cid2, nodeId, "user2", secretMgr);
+    assertNotNull(secretMgr.retrievePassword(tokenId1));
+    assertNotNull(secretMgr.retrievePassword(tokenId2));
+
+    // restart and verify tokens still valid
+    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
+    secretMgr.setNodeId(nodeId);
+    secretMgr.recover();
+    assertEquals(currentKey, secretMgr.getCurrentKey());
+    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
+    assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
+    assertNotNull(secretMgr.retrievePassword(tokenId1));
+    assertNotNull(secretMgr.retrievePassword(tokenId2));
+
+    // roll master key and start a container
+    secretMgr.startContainerSuccessful(tokenId2);
+    currentKey = keygen.generateKey();
+    secretMgr.setMasterKey(currentKey);
+
+    // restart and verify tokens still valid due to prev key persist
+    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
+    secretMgr.setNodeId(nodeId);
+    secretMgr.recover();
+    assertEquals(currentKey, secretMgr.getCurrentKey());
+    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
+    assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
+    assertNotNull(secretMgr.retrievePassword(tokenId1));
+    assertNotNull(secretMgr.retrievePassword(tokenId2));
+
+    // roll master key again, restart, and verify keys no longer valid
+    currentKey = keygen.generateKey();
+    secretMgr.setMasterKey(currentKey);
+    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
+    secretMgr.setNodeId(nodeId);
+    secretMgr.recover();
+    assertEquals(currentKey, secretMgr.getCurrentKey());
+    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
+    assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
+    try {
+      secretMgr.retrievePassword(tokenId1);
+      fail("token should not be valid");
+    } catch (InvalidToken e) {
+      // expected
+    }
+    try {
+      secretMgr.retrievePassword(tokenId2);
+      fail("token should not be valid");
+    } catch (InvalidToken e) {
+      // expected
+    }
+
+    stateStore.close();
+  }
+
+  private static ContainerTokenIdentifier createContainerTokenId(
+      ContainerId cid, NodeId nodeId, String user,
+      NMContainerTokenSecretManager secretMgr) throws IOException {
+    long rmid = cid.getApplicationAttemptId().getApplicationId()
+        .getClusterTimestamp();
+    ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid,
+        nodeId.toString(), user, BuilderUtils.newResource(1024, 1),
+        System.currentTimeMillis() + 100000L,
+        secretMgr.getCurrentKey().getKeyId(), rmid,
+        Priority.newInstance(0), 0);
+    Token token = BuilderUtils.newContainerToken(nodeId,
+        secretMgr.createPassword(ctid), ctid);
+    return BuilderUtils.newContainerTokenIdentifier(token);
+  }
+
+  private static class ContainerTokenKeyGeneratorForTest extends
+      BaseContainerTokenSecretManager {
+    public ContainerTokenKeyGeneratorForTest(Configuration conf) {
+      super(conf);
+    }
+
+    public MasterKey generateKey() {
+      return createNewMasterKey().getMasterKey();
+    }
+  }
+}

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java

@@ -73,7 +73,7 @@ public class TestNMTokenSecretManagerInNM {
 
 
     // restart and verify key is still there and token still valid
     // restart and verify key is still there and token still valid
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
-    secretMgr.recover(stateStore.loadNMTokenState());
+    secretMgr.recover();
     secretMgr.setNodeId(nodeId);
     secretMgr.setNodeId(nodeId);
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
     assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
@@ -88,7 +88,7 @@ public class TestNMTokenSecretManagerInNM {
 
 
     // restart and verify attempt1 key is still valid due to prev key persist
     // restart and verify attempt1 key is still valid due to prev key persist
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
-    secretMgr.recover(stateStore.loadNMTokenState());
+    secretMgr.recover();
     secretMgr.setNodeId(nodeId);
     secretMgr.setNodeId(nodeId);
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
@@ -101,7 +101,7 @@ public class TestNMTokenSecretManagerInNM {
     currentKey = keygen.generateKey();
     currentKey = keygen.generateKey();
     secretMgr.setMasterKey(currentKey);
     secretMgr.setMasterKey(currentKey);
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
-    secretMgr.recover(stateStore.loadNMTokenState());
+    secretMgr.recover();
     secretMgr.setNodeId(nodeId);
     secretMgr.setNodeId(nodeId);
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
@@ -117,7 +117,7 @@ public class TestNMTokenSecretManagerInNM {
     // remove last attempt, restart, verify both tokens are now bad
     // remove last attempt, restart, verify both tokens are now bad
     secretMgr.appFinished(attempt2.getApplicationId());
     secretMgr.appFinished(attempt2.getApplicationId());
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
     secretMgr = new NMTokenSecretManagerInNM(stateStore);
-    secretMgr.recover(stateStore.loadNMTokenState());
+    secretMgr.recover();
     secretMgr.setNodeId(nodeId);
     secretMgr.setNodeId(nodeId);
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertEquals(currentKey, secretMgr.getCurrentKey());
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
     assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));

+ 12 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java

@@ -135,7 +135,7 @@ public class FairScheduler extends
   public static final Resource CONTAINER_RESERVED = Resources.createResource(-1);
   public static final Resource CONTAINER_RESERVED = Resources.createResource(-1);
 
 
   // How often fair shares are re-calculated (ms)
   // How often fair shares are re-calculated (ms)
-  protected long UPDATE_INTERVAL = 500;
+  protected long updateInterval;
   private final int UPDATE_DEBUG_FREQUENCY = 5;
   private final int UPDATE_DEBUG_FREQUENCY = 5;
   private int updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
   private int updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
 
 
@@ -244,13 +244,13 @@ public class FairScheduler extends
 
 
   /**
   /**
    * A runnable which calls {@link FairScheduler#update()} every
    * A runnable which calls {@link FairScheduler#update()} every
-   * <code>UPDATE_INTERVAL</code> milliseconds.
+   * <code>updateInterval</code> milliseconds.
    */
    */
   private class UpdateThread implements Runnable {
   private class UpdateThread implements Runnable {
     public void run() {
     public void run() {
       while (true) {
       while (true) {
         try {
         try {
-          Thread.sleep(UPDATE_INTERVAL);
+          Thread.sleep(updateInterval);
           update();
           update();
           preemptTasksIfNecessary();
           preemptTasksIfNecessary();
         } catch (Exception e) {
         } catch (Exception e) {
@@ -1206,6 +1206,15 @@ public class FairScheduler extends
     waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill();
     waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill();
     usePortForNodeName = this.conf.getUsePortForNodeName();
     usePortForNodeName = this.conf.getUsePortForNodeName();
 
 
+    updateInterval = this.conf.getUpdateInterval();
+    if (updateInterval < 0) {
+      updateInterval = FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS;
+      LOG.warn(FairSchedulerConfiguration.UPDATE_INTERVAL_MS
+              + " is invalid, so using default value " +
+              + FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS
+              + " ms instead");
+    }
+
     rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf);
     rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf);
     // This stores per-application scheduling information
     // This stores per-application scheduling information
     this.applications =
     this.applications =

+ 9 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java

@@ -123,6 +123,11 @@ public class FairSchedulerConfiguration extends Configuration {
   protected static final String MAX_ASSIGN = CONF_PREFIX + "max.assign";
   protected static final String MAX_ASSIGN = CONF_PREFIX + "max.assign";
   protected static final int DEFAULT_MAX_ASSIGN = -1;
   protected static final int DEFAULT_MAX_ASSIGN = -1;
 
 
+  /** The update interval for calculating resources in FairScheduler .*/
+  public static final String UPDATE_INTERVAL_MS =
+      CONF_PREFIX + "update-interval-ms";
+  public static final int DEFAULT_UPDATE_INTERVAL_MS = 500;
+
   public FairSchedulerConfiguration() {
   public FairSchedulerConfiguration() {
     super();
     super();
   }
   }
@@ -246,6 +251,10 @@ public class FairSchedulerConfiguration extends Configuration {
           "Error reading resource config", ex);
           "Error reading resource config", ex);
     }
     }
   }
   }
+
+  public long getUpdateInterval() {
+    return getLong(UPDATE_INTERVAL_MS, DEFAULT_UPDATE_INTERVAL_MS);
+  }
   
   
   private static int findResource(String val, String units)
   private static int findResource(String val, String units)
     throws AllocationConfigurationException {
     throws AllocationConfigurationException {

+ 5 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java

@@ -388,7 +388,11 @@ public class DelegationTokenRenewer extends AbstractService {
       // If user provides incorrect token then it should not be added for
       // If user provides incorrect token then it should not be added for
       // renewal.
       // renewal.
       for (DelegationTokenToRenew dtr : tokenList) {
       for (DelegationTokenToRenew dtr : tokenList) {
-        renewToken(dtr);
+        try {
+          renewToken(dtr);
+        } catch (IOException ioe) {
+          throw new IOException("Failed to renew token: " + dtr.token, ioe);
+        }
       }
       }
       for (DelegationTokenToRenew dtr : tokenList) {
       for (DelegationTokenToRenew dtr : tokenList) {
         addTokenToList(dtr);
         addTokenToList(dtr);

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java

@@ -94,7 +94,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     scheduler = (FairScheduler)resourceManager.getResourceScheduler();
     scheduler = (FairScheduler)resourceManager.getResourceScheduler();
 
 
     scheduler.setClock(clock);
     scheduler.setClock(clock);
-    scheduler.UPDATE_INTERVAL = 60 * 1000;
+    scheduler.updateInterval = 60 * 1000;
   }
   }
 
 
   private void registerNodeAndSubmitApp(
   private void registerNodeAndSubmitApp(

+ 35 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java

@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
@@ -673,7 +674,40 @@ public class TestDelegationTokenRenewer {
       Thread.sleep(200);
       Thread.sleep(200);
     }
     }
   }
   }
-  
+
+  @Test(timeout=20000)
+  public void testDTRonAppSubmission()
+      throws IOException, InterruptedException, BrokenBarrierException {
+    final Credentials credsx = new Credentials();
+    final Token<?> tokenx = mock(Token.class);
+    credsx.addToken(new Text("token"), tokenx);
+    doReturn(true).when(tokenx).isManaged();
+    doThrow(new IOException("boom"))
+        .when(tokenx).renew(any(Configuration.class));
+      // fire up the renewer
+    final DelegationTokenRenewer dtr =
+         createNewDelegationTokenRenewer(conf, counter);
+    RMContext mockContext = mock(RMContext.class);
+    ClientRMService mockClientRMService = mock(ClientRMService.class);
+    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
+    InetSocketAddress sockAddr =
+        InetSocketAddress.createUnresolved("localhost", 1234);
+    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
+    dtr.setRMContext(mockContext);
+    when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr);
+    dtr.init(conf);
+    dtr.start();
+
+    try {
+      dtr.addApplicationSync(mock(ApplicationId.class), credsx, false);
+      fail("Catch IOException on app submission");
+    } catch (IOException e){
+      Assert.assertTrue(e.getMessage().contains(tokenx.toString()));
+      Assert.assertTrue(e.getCause().toString().contains("boom"));
+    }
+
+  }
+
   @Test(timeout=20000)                                                         
   @Test(timeout=20000)                                                         
   public void testConcurrentAddApplication()                                  
   public void testConcurrentAddApplication()                                  
       throws IOException, InterruptedException, BrokenBarrierException {       
       throws IOException, InterruptedException, BrokenBarrierException {       

+ 6 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm

@@ -205,6 +205,12 @@ Properties that can be placed in yarn-site.xml
       instead. Defaults to true. If a queue placement policy is given in the
       instead. Defaults to true. If a queue placement policy is given in the
       allocations file, this property is ignored.
       allocations file, this property is ignored.
 
 
+ * <<<yarn.scheduler.fair.update-interval-ms>>>
+ 
+    * The interval at which to lock the scheduler and recalculate fair shares,
+      recalculate demand, and check whether anything is due for preemption.
+      Defaults to 500 ms. 
+
 Allocation file format
 Allocation file format
 
 
   The allocation file must be in XML format. The format contains five types of
   The allocation file must be in XML format. The format contains five types of

+ 45 - 60
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

@@ -2228,68 +2228,48 @@ _01_000001</amContainerLogs>
   {
   {
     "application-id":"application_1404203615263_0001",
     "application-id":"application_1404203615263_0001",
     "application-name":"test",
     "application-name":"test",
-    "queue":"testqueue",
-    "priority":"3",
     "am-container-spec":
     "am-container-spec":
     {
     {
       "local-resources":
       "local-resources":
       {
       {
         "entry":
         "entry":
-        {
-          "key":"example",
-          "value":
+        [
           {
           {
-            "resource":"http://www.test.com/file.txt",
-            "type":"FILE",
-            "visibility":"APPLICATION",
-            "size":"100",
-            "timestamp":"1404203616003"
+            "key":"AppMaster.jar",
+            "value":
+            {
+              "resource":"hdfs://hdfs-namenode:9000/user/testuser/DistributedShell/demo-app/AppMaster.jar",
+              "type":"FILE",
+              "visibility":"APPLICATION",
+              "size": "43004",
+              "timestamp": "1405452071209"
+            }
           }
           }
-        }
-      },
-      "environment":
-      {
-        "entry":
-        {
-          "key":"APP_VAR",
-          "value":"ENV_SETTING"
-        }
+        ]
       },
       },
       "commands":
       "commands":
       {
       {
-        "command":"/bin/sleep 5"
-      },
-      "service-data":
-      {
-        "entry":
-        {
-          "key":"test",
-          "value":"dmFsdWUxMg"
-        }
-      },
-      "credentials":
-      {
-        "tokens":null,
-        "secrets":
-        {
-          "entry":
-          {
-            "key":"secret1",
-            "value":"c2VjcmV0MQ"
-          }
-        }
+        "command":"{{JAVA_HOME}}/bin/java -Xmx10m org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster --container_memory 10 --container_vcores 1 --num_containers 1 --priority 0 1><LOG_DIR>/AppMaster.stdout 2><LOG_DIR>/AppMaster.stderr"
       },
       },
-      "application-acls":
+      "environment":
       {
       {
         "entry":
         "entry":
         [
         [
           {
           {
-            "key":"VIEW_APP",
-            "value":"testuser3, testuser4"
+            "key": "DISTRIBUTEDSHELLSCRIPTTIMESTAMP",
+            "value": "1405459400754"
+          },
+          {
+            "key": "CLASSPATH",
+            "value": "{{CLASSPATH}}<CPS>./*<CPS>{{HADOOP_CONF_DIR}}<CPS>{{HADOOP_COMMON_HOME}}/share/hadoop/common/*<CPS>{{HADOOP_COMMON_HOME}}/share/hadoop/common/lib/*<CPS>{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/*<CPS>{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/lib/*<CPS>{{HADOOP_YARN_HOME}}/share/hadoop/yarn/*<CPS>{{HADOOP_YARN_HOME}}/share/hadoop/yarn/lib/*<CPS>./log4j.properties"
           },
           },
           {
           {
-            "key":"MODIFY_APP",
-            "value":"testuser1, testuser2"
+            "key": "DISTRIBUTEDSHELLSCRIPTLEN",
+            "value": "6"
+          },
+          {
+            "key": "DISTRIBUTEDSHELLSCRIPTLOCATION",
+            "value": "hdfs://hdfs-namenode:9000/user/testuser/demo-app/shellCommands"
           }
           }
         ]
         ]
       }
       }
@@ -2302,16 +2282,9 @@ _01_000001</amContainerLogs>
       "vCores":"1"
       "vCores":"1"
     },
     },
     "application-type":"YARN",
     "application-type":"YARN",
-    "keep-containers-across-application-attempts":"false",
-    "application-tags":
-    {
-      "tag":
-      [
-        "tag 2",
-        "tag1"
-      ]
-    }
+    "keep-containers-across-application-attempts":"false"
   }
   }
+
 +---+
 +---+
 
 
   Response Header:
   Response Header:
@@ -2349,22 +2322,34 @@ _01_000001</amContainerLogs>
         <entry>
         <entry>
           <key>example</key>
           <key>example</key>
           <value>
           <value>
-            <resource>http://www.test.com/file.txt</resource>
+            <resource>hdfs://hdfs-namenode:9000/user/testuser/DistributedShell/demo-app/AppMaster.jar</resource>
             <type>FILE</type>
             <type>FILE</type>
             <visibility>APPLICATION</visibility>
             <visibility>APPLICATION</visibility>
-            <size>100</size>
-            <timestamp>1404204892877</timestamp>
+            <size>43004</size>
+            <timestamp>1405452071209</timestamp>
           </value>
           </value>
         </entry>
         </entry>
       </local-resources>
       </local-resources>
       <environment>
       <environment>
         <entry>
         <entry>
-          <key>APP_VAR</key>
-          <value>ENV_SETTING</value>
+          <key>DISTRIBUTEDSHELLSCRIPTTIMESTAMP</key>
+          <value>1405459400754</value>
+        </entry>
+        <entry>
+          <key>CLASSPATH</key>
+          <value>{{CLASSPATH}}&lt;CPS&gt;./*&lt;CPS&gt;{{HADOOP_CONF_DIR}}&lt;CPS&gt;{{HADOOP_COMMON_HOME}}/share/hadoop/common/*&lt;CPS&gt;{{HADOOP_COMMON_HOME}}/share/hadoop/common/lib/*&lt;CPS&gt;{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/*&lt;CPS&gt;{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/lib/*&lt;CPS&gt;{{HADOOP_YARN_HOME}}/share/hadoop/yarn/*&lt;CPS&gt;{{HADOOP_YARN_HOME}}/share/hadoop/yarn/lib/*&lt;CPS&gt;./log4j.properties</value>
+        </entry>
+        <entry>
+          <key>DISTRIBUTEDSHELLSCRIPTLEN</key>
+          <value>6</value>
+        </entry>
+        <entry>
+          <key>DISTRIBUTEDSHELLSCRIPTLOCATION</key>
+          <value>hdfs://hdfs-namenode:9000/user/testuser/demo-app/shellCommands</value>
         </entry>
         </entry>
       </environment>
       </environment>
       <commands>
       <commands>
-        <command>/bin/sleep 5</command>
+        <command>{{JAVA_HOME}}/bin/java -Xmx10m org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster --container_memory 10 --container_vcores 1 --num_containers 1 --priority 0 1&gt;&lt;LOG_DIR&gt;/AppMaster.stdout 2&gt;&lt;LOG_DIR&gt;/AppMaster.stderr</command>
       </commands>
       </commands>
       <service-data>
       <service-data>
         <entry>
         <entry>