瀏覽代碼

Merge branch 'trunk' into HDFS-6584

Jing Zhao 10 年之前
父節點
當前提交
555900a9dc
共有 95 個文件被更改,包括 3000 次插入713 次删除
  1. 155 120
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 5 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  3. 13 15
      hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh
  4. 6 4
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
  5. 8 2
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  6. 26 38
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  7. 20 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  8. 11 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  9. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  10. 449 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
  11. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java
  12. 32 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcMultiplexer.java
  13. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
  14. 219 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
  15. 92 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  16. 10 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c
  17. 392 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
  18. 33 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheck.java
  19. 34 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckMain.java
  20. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckSecond.java
  21. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckThird.java
  22. 6 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
  23. 64 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
  24. 4 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  25. 31 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  26. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
  27. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
  28. 37 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
  29. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
  30. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
  31. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
  32. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  33. 18 11
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/LibHdfs.apt.vm
  34. 100 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  35. 24 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
  36. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
  37. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
  38. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
  39. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  41. 76 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
  42. 27 3
      hadoop-mapreduce-project/CHANGES.txt
  43. 11 9
      hadoop-mapreduce-project/bin/mapred-config.sh
  44. 29 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
  45. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
  46. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
  47. 3 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
  48. 18 76
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
  49. 28 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
  50. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
  51. 96 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
  52. 6 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/PluggableShuffleAndPluggableSort.apt.vm
  53. 9 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
  54. 9 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
  55. 43 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
  56. 39 23
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
  57. 19 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
  58. 23 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
  59. 23 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
  60. 2 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
  61. 6 6
      hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
  62. 32 0
      hadoop-yarn-project/CHANGES.txt
  63. 1 0
      hadoop-yarn-project/hadoop-yarn/bin/yarn
  64. 8 8
      hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
  65. 7 0
      hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
  66. 7 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
  67. 9 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  68. 10 160
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ApplicationClassLoader.java
  69. 16 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
  70. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
  71. 11 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
  72. 61 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
  73. 15 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
  74. 7 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
  75. 8 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java
  76. 13 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
  77. 0 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
  78. 10 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
  79. 16 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
  80. 16 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
  81. 4 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
  82. 9 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
  83. 0 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
  84. 21 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
  85. 28 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
  86. 9 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
  87. 8 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
  88. 8 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java
  89. 1 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
  90. 8 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
  91. 13 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
  92. 0 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
  93. 136 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
  94. 60 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java
  95. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java

+ 155 - 120
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -13,8 +13,6 @@ Trunk (Unreleased)
 
 
   NEW FEATURES
   NEW FEATURES
 
 
-    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
-
     HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
     HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
     (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
     (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
@@ -25,9 +23,6 @@ Trunk (Unreleased)
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
     
     
-    HADOOP-10719. Add generateEncryptedKey and decryptEncryptedKey 
-    methods to KeyProvider. (asuresh via tucu)
-
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -121,93 +116,15 @@ Trunk (Unreleased)
 
 
     HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel)
     HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel)
 
 
-    HADOOP-10141. Create KeyProvider API to separate encryption key storage
-    from the applications. (omalley)
-
-    HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
-
-    HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
-
-    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
-    McCay via omalley)
-
     HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
     HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
 
 
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     build a new UGI. (Larry McCay via omalley)
     build a new UGI. (Larry McCay via omalley)
 
 
-    HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions 
-    correctly. (Larry McCay via omalley)
-
-    HADOOP-10432. Refactor SSLFactory to expose static method to determine
-    HostnameVerifier. (tucu)
-
-    HADOOP-10427. KeyProvider implementations should be thread safe. (tucu)
-
-    HADOOP-10429. KeyStores should have methods to generate the materials 
-    themselves, KeyShell should use them. (tucu)
-
-    HADOOP-10428. JavaKeyStoreProvider should accept keystore password via 
-    configuration falling back to ENV VAR. (tucu)
-
-    HADOOP-10430. KeyProvider Metadata should have an optional description, 
-    there should be a method to retrieve the metadata from all keys. (tucu)
-
-    HADOOP-10534. KeyProvider getKeysMetadata should take a list of names 
-    rather than returning all keys. (omalley)
-
     HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
     HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
 
 
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
 
 
-    HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. 
-    (tucu)
-
-    HADOOP-10695. KMSClientProvider should respect a configurable timeout. 
-    (yoderme via tucu)
-
-    HADOOP-10757. KeyProvider KeyVersion should provide the key name. 
-    (asuresh via tucu)
-
-    HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
-    (Arun Suresh via atm)
-
-    HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
-    KeyProvider. (wang)
-
-    HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
-
-    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
-
-    HADOOP-10841. EncryptedKeyVersion should have a key name property. 
-    (asuresh via tucu)
-
-    HADOOP-10842. CryptoExtension generateEncryptedKey method should 
-    receive the key name. (asuresh via tucu)
-
-    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common. 
-    (asuresh via tucu)
-
-    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
-    in the REST API. (asuresh via tucu)
-
-    HADOOP-10891. Add EncryptedKeyVersion factory method to
-    KeyProviderCryptoExtension. (wang)
-
-    HADOOP-10756. KMS audit log should consolidate successful similar requests. 
-    (asuresh via tucu)
-
-    HADOOP-10793. KeyShell args should use single-dash style. (wang)
-
-    HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
-
-    HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting 
-    underlying store. (asuresh via tucu)
-
-    HADOOP-10770. KMS add delegation token support. (tucu)
-
-    HADOOP-10698. KMS, add proxyuser support. (tucu)
-
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -379,22 +296,9 @@ Trunk (Unreleased)
 
 
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
 
-    HADOOP-10488. TestKeyProviderFactory fails randomly. (tucu)
-
-    HADOOP-10431. Change visibility of KeyStore.Options getter methods to public. (tucu)
-
-    HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
-
-    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
-
     HADOOP-10625. Trim configuration names when putting/getting them
     HADOOP-10625. Trim configuration names when putting/getting them
     to properties. (Wangda Tan via xgong)
     to properties. (Wangda Tan via xgong)
 
 
-    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
-
-    HADOOP-10611. KMS, keyVersion name should not be assumed to be 
-    keyName@versionNumber. (tucu)
-
     HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
     HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
     going remote. (Dapeng Sun via wheat9)
     going remote. (Dapeng Sun via wheat9)
 
 
@@ -409,32 +313,15 @@ Trunk (Unreleased)
 
 
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
 
 
-    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
-    (Mike Yoder via wang)
-
     HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
     HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
     System. (Shanyu Zhao via cnauroth)
     System. (Shanyu Zhao via cnauroth)
 
 
-    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is 
-    thread-unsafe. (benoyantony viat tucu)
-
-    HADOOP-10881. Clarify usage of encryption and encrypted encryption
-    key in KeyProviderCryptoExtension. (wang)
-
-    HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
-    (Akira Ajisaka via wang)
-
     HADOOP-10925. Compilation fails in native link0 function on Windows.
     HADOOP-10925. Compilation fails in native link0 function on Windows.
     (cnauroth)
     (cnauroth)
 
 
-    HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
-    length keys. (Arun Suresh via wang)
+    HADOOP-11002. shell escapes are incompatible with previous releases (aw)
 
 
-    HADOOP-10862. Miscellaneous trivial corrections to KMS classes. 
-    (asuresh via tucu)
-
-    HADOOP-10967. Improve DefaultCryptoExtension#generateEncryptedKey 
-    performance. (hitliuyi via tucu)
+    HADOOP-10996. Stop violence in the *_HOME (aw)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -498,6 +385,11 @@ Release 2.6.0 - UNRELEASED
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
+
+    HADOOP-10893. isolated classloader on the client side (Sangjin Lee via
+    jlowe)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-10808. Remove unused native code for munlock. (cnauroth)
     HADOOP-10808. Remove unused native code for munlock. (cnauroth)
@@ -582,10 +474,100 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10975. org.apache.hadoop.util.DataChecksum should support calculating
     HADOOP-10975. org.apache.hadoop.util.DataChecksum should support calculating
     checksums in native code (James Thomas via Colin Patrick McCabe)
     checksums in native code (James Thomas via Colin Patrick McCabe)
 
 
+    HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
+
+    HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
+
+    HADOOP-10432. Refactor SSLFactory to expose static method to determine
+    HostnameVerifier. (tucu)
+
+    HADOOP-10429. KeyStores should have methods to generate the materials
+    themselves, KeyShell should use them. (tucu)
+
+    HADOOP-10427. KeyProvider implementations should be thread safe. (tucu)
+
+    HADOOP-10428. JavaKeyStoreProvider should accept keystore password via
+    configuration falling back to ENV VAR. (tucu)
+
+    HADOOP-10430. KeyProvider Metadata should have an optional description,
+    there should be a method to retrieve the metadata from all keys. (tucu)
+
+    HADOOP-10431. Change visibility of KeyStore.Options getter methods to
+    public. (tucu)
+
+    HADOOP-10534. KeyProvider getKeysMetadata should take a list of names
+    rather than returning all keys. (omalley)
+
+    HADOOP-10719. Add generateEncryptedKey and decryptEncryptedKey
+    methods to KeyProvider. (asuresh via tucu)
+
+    HADOOP-10817. ProxyUsers configuration should support configurable
+    prefixes. (tucu)
+
+    HADOOP-10881. Clarify usage of encryption and encrypted encryption
+    key in KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10770. KMS add delegation token support. (tucu)
+
+    HADOOP-10698. KMS, add proxyuser support. (tucu)
+
+    HADOOP-8896. Javadoc points to Wrong Reader and Writer classes 
+    in SequenceFile (Ray Chiang via aw)
+
+    HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
     HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
 
 
+    HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
+    (tucu)
+
+    HADOOP-10695. KMSClientProvider should respect a configurable timeout.
+    (yoderme via tucu)
+
+    HADOOP-10757. KeyProvider KeyVersion should provide the key name.
+    (asuresh via tucu)
+
+    HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
+    (Arun Suresh via atm)
+
+    HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
+    KeyProvider. (wang)
+
+    HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
+
+    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
+
+    HADOOP-10841. EncryptedKeyVersion should have a key name property.
+    (asuresh via tucu)
+
+    HADOOP-10842. CryptoExtension generateEncryptedKey method should
+    receive the key name. (asuresh via tucu)
+
+    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common.
+    (asuresh via tucu)
+
+    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
+    in the REST API. (asuresh via tucu)
+
+    HADOOP-10891. Add EncryptedKeyVersion factory method to
+    KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10756. KMS audit log should consolidate successful similar requests.
+    (asuresh via tucu)
+
+    HADOOP-10793. KeyShell args should use single-dash style. (wang)
+
+    HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
+
+    HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
+    underlying store. (asuresh via tucu)
+
+    HADOOP-10282. Create a FairCallQueue: a multi-level call queue which
+    schedules incoming calls and multiplexes outgoing calls. (Chris Li via
+    Arpit Agarwal)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
     HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
@@ -621,11 +603,6 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10927. Fix CredentialShell help behavior and error codes.
     HADOOP-10927. Fix CredentialShell help behavior and error codes.
     (Josh Elser via wang)
     (Josh Elser via wang)
 
 
-    HADOOP-10937. Need to set version name correctly before decrypting EEK.
-    (Arun Suresh via wang)
-
-    HADOOP-10918. JMXJsonServlet fails when used within Tomcat. (tucu)
-
     HADOOP-10933. FileBasedKeyStoresFactory Should use Configuration.getPassword 
     HADOOP-10933. FileBasedKeyStoresFactory Should use Configuration.getPassword 
     for SSL Passwords. (lmccay via tucu)
     for SSL Passwords. (lmccay via tucu)
 
 
@@ -676,6 +653,64 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10968. hadoop native build fails to detect java_libarch on
     HADOOP-10968. hadoop native build fails to detect java_libarch on
     ppc64le (Dinar Valeev via Colin Patrick McCabe)
     ppc64le (Dinar Valeev via Colin Patrick McCabe)
 
 
+    HADOOP-10141. Create KeyProvider API to separate encryption key storage
+    from the applications. (omalley)
+
+    HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions
+    correctly. (Larry McCay via omalley)
+
+    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
+    McCay via omalley)
+
+    HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
+
+    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
+
+    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
+
+    HADOOP-10611. KMS, keyVersion name should not be assumed to be
+    keyName@versionNumber. (tucu)
+
+    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
+    (Mike Yoder via wang)
+
+    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is
+    thread-unsafe. (benoyantony viat tucu)
+
+    HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
+    (Akira Ajisaka via wang)
+
+    HADOOP-10937. Need to set version name correctly before decrypting EEK.
+    (Arun Suresh via wang)
+
+    HADOOP-10918. JMXJsonServlet fails when used within Tomcat. (tucu)
+
+    HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
+    length keys. (Arun Suresh via wang)
+
+    HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
+    (asuresh via tucu)
+
+    HADOOP-10967. Improve DefaultCryptoExtension#generateEncryptedKey
+    performance. (hitliuyi via tucu)
+
+    HADOOP-10488. TestKeyProviderFactory fails randomly. (tucu)
+
+    HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
+    return 0 on failure. (cnauroth)
+
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 Release 2.5.0 - 2014-08-11
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -108,6 +108,11 @@
        <Method name="driver" />
        <Method name="driver" />
        <Bug pattern="DM_EXIT" />
        <Bug pattern="DM_EXIT" />
      </Match>
      </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.RunJar" />
+       <Method name="run" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
      <!--
      <!--
        We need to cast objects between old and new api objects
        We need to cast objects between old and new api objects
      -->
      -->

+ 13 - 15
hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh

@@ -26,7 +26,7 @@ _hadoop() {
   COMPREPLY=()
   COMPREPLY=()
   cur=${COMP_WORDS[COMP_CWORD]}
   cur=${COMP_WORDS[COMP_CWORD]}
   prev=${COMP_WORDS[COMP_CWORD-1]}  
   prev=${COMP_WORDS[COMP_CWORD-1]}  
-  script=`which ${COMP_WORDS[0]}`
+  script=$(which ${COMP_WORDS[0]})
   
   
   # Bash lets you tab complete things even if the script doesn't
   # Bash lets you tab complete things even if the script doesn't
   # exist (or isn't executable). Check to make sure it is, as we
   # exist (or isn't executable). Check to make sure it is, as we
@@ -36,9 +36,9 @@ _hadoop() {
     1)
     1)
       # Completing the first argument (the command).
       # Completing the first argument (the command).
 
 
-      temp=`$script | grep -n "^\s*or"`;
-      temp=`$script | head -n $((${temp%%:*} - 1)) | awk '/^ / {print $1}' | sort | uniq`;
-      COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+      temp=$($script | grep -n "^\s*or");
+      temp=$($script | head -n $((${temp%%:*} - 1)) | awk '/^ / {print $1}' | sort | uniq);
+      COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
       return 0;;
       return 0;;
 
 
     2)
     2)
@@ -51,21 +51,21 @@ _hadoop() {
       dfs | dfsadmin | fs | job | pipes)
       dfs | dfsadmin | fs | job | pipes)
         # One option per line, enclosed in square brackets
         # One option per line, enclosed in square brackets
 
 
-        temp=`$script ${COMP_WORDS[1]} 2>&1 | awk '/^[ \t]*\[/ {gsub("[[\\]]", ""); print $1}'`;
-        COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+        temp=$($script ${COMP_WORDS[1]} 2>&1 | awk '/^[ \t]*\[/ {gsub("[[\\]]", ""); print $1}');
+        COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
         return 0;;
         return 0;;
 
 
       jar)
       jar)
         # Any (jar) file
         # Any (jar) file
 
 
-        COMPREPLY=(`compgen -A file -- ${cur}`);
+        COMPREPLY=($(compgen -A file -- ${cur}));
         return 0;;
         return 0;;
 
 
       namenode)
       namenode)
         # All options specified in one line,
         # All options specified in one line,
         # enclosed in [] and separated with |
         # enclosed in [] and separated with |
-        temp=`$script ${COMP_WORDS[1]} -help 2>&1 | grep Usage: | cut -d '[' -f 2- | awk '{gsub("] \\| \\[|]", " "); print $0}'`;
-        COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+        temp=$($script ${COMP_WORDS[1]} -help 2>&1 | grep Usage: | cut -d '[' -f 2- | awk '{gsub("] \\| \\[|]", " "); print $0}');
+        COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
         return 0;;
         return 0;;
 
 
       *)
       *)
@@ -83,26 +83,24 @@ _hadoop() {
         # Pull the list of options, grep for the one the user is trying to use,
         # Pull the list of options, grep for the one the user is trying to use,
         # and then select the description of the relevant argument
         # and then select the description of the relevant argument
         temp=$((${COMP_CWORD} - 1));
         temp=$((${COMP_CWORD} - 1));
-        temp=`$script ${COMP_WORDS[1]} 2>&1 | grep -- "${COMP_WORDS[2]} " | awk '{gsub("[[ \\]]", ""); print $0}' | cut -d '<' -f ${temp}`;
+        temp=$($script ${COMP_WORDS[1]} 2>&1 | grep -- "${COMP_WORDS[2]} " | awk '{gsub("[[ \\]]", ""); print $0}' | cut -d '<' -f ${temp} | cut -d '>' -f 1);
 
 
         if [ ${#temp} -lt 1 ]; then
         if [ ${#temp} -lt 1 ]; then
           # No match
           # No match
           return 1;
           return 1;
         fi;
         fi;
 
 
-        temp=${temp:0:$((${#temp} - 1))};
-
         # Now do completion based on the argument
         # Now do completion based on the argument
         case $temp in
         case $temp in
         path | src | dst)
         path | src | dst)
           # DFS path completion
           # DFS path completion
-          temp=`$script ${COMP_WORDS[1]} -ls "${cur}*" 2>&1 | grep -vE '^Found ' | cut -f 1 | awk '{gsub("^.* ", ""); print $0;}'`
-          COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+          temp=$($script ${COMP_WORDS[1]} -ls -d "${cur}*" 2>/dev/null | grep -vE '^Found ' | cut -f 1 | awk '{gsub("^.* ", ""); print $0;}');
+          COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
           return 0;;
           return 0;;
 
 
         localsrc | localdst)
         localsrc | localdst)
           # Local path completion
           # Local path completion
-          COMPREPLY=(`compgen -A file -- ${cur}`);
+          COMPREPLY=($(compgen -A file -- ${cur}));
           return 0;;
           return 0;;
 
 
         *)
         *)

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

@@ -282,10 +282,12 @@ if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
 @rem
 @rem
 
 
 if defined HADOOP_CLASSPATH (
 if defined HADOOP_CLASSPATH (
-  if defined HADOOP_USER_CLASSPATH_FIRST (
-    set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
-  ) else (
-    set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+  if not defined HADOOP_USE_CLIENT_CLASSLOADER (
+    if defined HADOOP_USER_CLASSPATH_FIRST (
+      set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
+    ) else (
+      set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+    )
   )
   )
 )
 )
 
 

+ 8 - 2
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -53,7 +53,10 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
 fi
 fi
 
 
 # get our functions defined for usage later
 # get our functions defined for usage later
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] && 
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
   . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
   . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
 else
 else
   echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
   echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
@@ -61,7 +64,10 @@ else
 fi
 fi
 
 
 # allow overrides of the above and pre-defines of the below
 # allow overrides of the above and pre-defines of the below
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
   . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
   . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
 fi
 fi
 
 

+ 26 - 38
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -59,8 +59,7 @@ function hadoop_bootstrap_init
   TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
   TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
 
 
   export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
   export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
-  
+ 
   # defaults
   # defaults
   export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
   export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
 }
 }
@@ -69,17 +68,18 @@ function hadoop_find_confdir
 {
 {
   # NOTE: This function is not user replaceable.
   # NOTE: This function is not user replaceable.
 
 
+  local conf_dir
   # Look for the basic hadoop configuration area.
   # Look for the basic hadoop configuration area.
   #
   #
   #
   #
   # An attempt at compatibility with some Hadoop 1.x
   # An attempt at compatibility with some Hadoop 1.x
   # installs.
   # installs.
   if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
   if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
-    DEFAULT_CONF_DIR="conf"
+    conf_dir="conf"
   else
   else
-    DEFAULT_CONF_DIR="etc/hadoop"
+    conf_dir="etc/hadoop"
   fi
   fi
-  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${DEFAULT_CONF_DIR}}"
+  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
 }
 }
 
 
 function hadoop_exec_hadoopenv
 function hadoop_exec_hadoopenv
@@ -94,7 +94,6 @@ function hadoop_exec_hadoopenv
   fi
   fi
 }
 }
 
 
-
 function hadoop_basic_init
 function hadoop_basic_init
 {
 {
   # Some of these are also set in hadoop-env.sh.
   # Some of these are also set in hadoop-env.sh.
@@ -446,11 +445,11 @@ function hadoop_add_to_classpath_mapred
   hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
   hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
 }
 }
 
 
-
 function hadoop_add_to_classpath_userpath
 function hadoop_add_to_classpath_userpath
 {
 {
   # Add the user-specified HADOOP_CLASSPATH to the
   # Add the user-specified HADOOP_CLASSPATH to the
-  # official CLASSPATH env var.
+  # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER
+  # is not set.
   # Add it first or last depending on if user has
   # Add it first or last depending on if user has
   # set env-var HADOOP_USER_CLASSPATH_FIRST
   # set env-var HADOOP_USER_CLASSPATH_FIRST
   # we'll also dedupe it, because we're cool like that.
   # we'll also dedupe it, because we're cool like that.
@@ -469,14 +468,16 @@ function hadoop_add_to_classpath_userpath
     done
     done
     let j=c-1
     let j=c-1
     
     
-    if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
-      for ((i=j; i>=0; i--)); do
-        hadoop_add_classpath "${array[$i]}" before
-      done
-    else
-      for ((i=0; i<=j; i++)); do
-        hadoop_add_classpath "${array[$i]}" after
-      done
+    if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
+      if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
+        for ((i=j; i>=0; i--)); do
+          hadoop_add_classpath "${array[$i]}" before
+        done
+      else
+        for ((i=0; i<=j; i++)); do
+          hadoop_add_classpath "${array[$i]}" after
+        done
+      fi
     fi
     fi
   fi
   fi
 }
 }
@@ -548,7 +549,6 @@ function hadoop_java_setup
   fi
   fi
 }
 }
 
 
-
 function hadoop_finalize_libpaths
 function hadoop_finalize_libpaths
 {
 {
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
@@ -561,26 +561,20 @@ function hadoop_finalize_libpaths
 #
 #
 # fill in any last minute options that might not have been defined yet
 # fill in any last minute options that might not have been defined yet
 #
 #
-# Note that we are replacing ' ' with '\ ' so that directories with
-# spaces work correctly when run exec blah
-#
 function hadoop_finalize_hadoop_opts
 function hadoop_finalize_hadoop_opts
 {
 {
-  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
+  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
+  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
+  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
   hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
   hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
-  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
   hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
   hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
 }
 }
 
 
 function hadoop_finalize_classpath
 function hadoop_finalize_classpath
 {
 {
-  
-  # we want the HADOOP_CONF_DIR at the end
-  # according to oom, it gives a 2% perf boost
-  hadoop_add_classpath "${HADOOP_CONF_DIR}" after
+  hadoop_add_classpath "${HADOOP_CONF_DIR}" before
   
   
   # user classpath gets added at the last minute. this allows
   # user classpath gets added at the last minute. this allows
   # override of CONF dirs and more
   # override of CONF dirs and more
@@ -721,10 +715,8 @@ function hadoop_java_exec
   local command=$1
   local command=$1
   local class=$2
   local class=$2
   shift 2
   shift 2
-  # we eval this so that paths with spaces work
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
-  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 }
 
 
 function hadoop_start_daemon
 function hadoop_start_daemon
@@ -736,7 +728,7 @@ function hadoop_start_daemon
   local class=$2
   local class=$2
   shift 2
   shift 2
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
-  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 }
 
 
 function hadoop_start_daemon_wrapper
 function hadoop_start_daemon_wrapper
@@ -799,9 +791,7 @@ function hadoop_start_secure_daemon
   # where to send stderr.  same thing, except &2 = stderr
   # where to send stderr.  same thing, except &2 = stderr
   local daemonerrfile=$5
   local daemonerrfile=$5
   shift 5
   shift 5
-  
-  
-  
+ 
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonerrfile}"
   hadoop_rotate_log "${daemonerrfile}"
   
   
@@ -922,7 +912,6 @@ function hadoop_stop_daemon
   fi
   fi
 }
 }
 
 
-
 function hadoop_stop_secure_daemon
 function hadoop_stop_secure_daemon
 {
 {
   local command=$1
   local command=$1
@@ -981,7 +970,6 @@ function hadoop_daemon_handler
   esac
   esac
 }
 }
 
 
-
 function hadoop_secure_daemon_handler
 function hadoop_secure_daemon_handler
 {
 {
   local daemonmode=$1
   local daemonmode=$1

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -28,6 +28,26 @@
 @rem                                    classpath. Can be defined, for example,
 @rem                                    classpath. Can be defined, for example,
 @rem                                    by doing
 @rem                                    by doing
 @rem                                    export HADOOP_USER_CLASSPATH_FIRST=true
 @rem                                    export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem   HADOOP_USE_CLIENT_CLASSLOADER    When defined, HADOOP_CLASSPATH and the
+@rem                                    jar as the hadoop jar argument are
+@rem                                    handled by a separate isolated client
+@rem                                    classloader. If it is set,
+@rem                                    HADOOP_USER_CLASSPATH_FIRST is
+@rem                                    ignored. Can be defined by doing
+@rem                                    export HADOOP_USE_CLIENT_CLASSLOADER=true
+@rem
+@rem   HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES
+@rem                                    When defined, it overrides the default
+@rem                                    definition of system classes for the
+@rem                                    client classloader when
+@rem                                    HADOOP_USE_CLIENT_CLASSLOADER is
+@rem                                    enabled. Names ending in '.' (period)
+@rem                                    are treated as package names, and names
+@rem                                    starting with a '-' are treated as
+@rem                                    negative matches. For example,
+@rem                                    export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
+
 @rem
 @rem
 @rem   HADOOP_HEAPSIZE  The maximum amount of heap to use, in MB.
 @rem   HADOOP_HEAPSIZE  The maximum amount of heap to use, in MB.
 @rem                    Default is 1000.
 @rem                    Default is 1000.

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -111,6 +111,17 @@ esac
 # Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
 # Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
 # export HADOOP_USER_CLASSPATH_FIRST="yes"
 # export HADOOP_USER_CLASSPATH_FIRST="yes"
 
 
+# If HADOOP_USE_CLIENT_CLASSLOADER is set, HADOOP_CLASSPATH along with the main
+# jar are handled by a separate isolated client classloader. If it is set,
+# HADOOP_USER_CLASSPATH_FIRST is ignored. Can be defined by doing
+# export HADOOP_USE_CLIENT_CLASSLOADER=true
+
+# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
+# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
+# is enabled. Names ending in '.' (period) are treated as package names, and
+# names starting with a '-' are treated as negative matches. For example,
+# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
+
 ###
 ###
 # Options for remote shell connectivity
 # Options for remote shell connectivity
 ###
 ###

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -53,8 +53,9 @@ import org.apache.hadoop.util.Time;
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * pairs.
  * pairs.
  * 
  * 
- * <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
- * {@link Sorter} classes for writing, reading and sorting respectively.</p>
+ * <p><code>SequenceFile</code> provides {@link SequenceFile.Writer},
+ * {@link SequenceFile.Reader} and {@link Sorter} classes for writing,
+ * reading and sorting respectively.</p>
  * 
  * 
  * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
  * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
  * {@link CompressionType} used to compress key/value pairs:
  * {@link CompressionType} used to compress key/value pairs:
@@ -79,8 +80,8 @@ import org.apache.hadoop.util.Time;
  * <p>The recommended way is to use the static <tt>createWriter</tt> methods
  * <p>The recommended way is to use the static <tt>createWriter</tt> methods
  * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
  * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
  *
  *
- * <p>The {@link Reader} acts as the bridge and can read any of the above 
- * <code>SequenceFile</code> formats.</p>
+ * <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the
+ * above <code>SequenceFile</code> formats.</p>
  *
  *
  * <h4 id="Formats">SequenceFile Formats</h4>
  * <h4 id="Formats">SequenceFile Formats</h4>
  * 
  * 

+ 449 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java

@@ -0,0 +1,449 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.lang.ref.WeakReference;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.AbstractQueue;
+import java.util.HashMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.util.MBeans;
+
+/**
+ * A queue with multiple levels for each priority.
+ */
+public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
+  implements BlockingQueue<E> {
+  // Configuration Keys
+  public static final int    IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT = 4;
+  public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
+    "faircallqueue.priority-levels";
+
+  public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
+
+  /* The queues */
+  private final ArrayList<BlockingQueue<E>> queues;
+
+  /* Read locks */
+  private final ReentrantLock takeLock = new ReentrantLock();
+  private final Condition notEmpty = takeLock.newCondition();
+  private void signalNotEmpty() {
+    takeLock.lock();
+    try {
+      notEmpty.signal();
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  /* Scheduler picks which queue to place in */
+  private RpcScheduler scheduler;
+
+  /* Multiplexer picks which queue to draw from */
+  private RpcMultiplexer multiplexer;
+
+  /* Statistic tracking */
+  private final ArrayList<AtomicLong> overflowedCalls;
+
+  /**
+   * Create a FairCallQueue.
+   * @param capacity the maximum size of each sub-queue
+   * @param ns the prefix to use for configuration
+   * @param conf the configuration to read from
+   * Notes: the FairCallQueue has no fixed capacity. Rather, it has a minimum
+   * capacity of `capacity` and a maximum capacity of `capacity * number_queues`
+   */
+  public FairCallQueue(int capacity, String ns, Configuration conf) {
+    int numQueues = parseNumQueues(ns, conf);
+    LOG.info("FairCallQueue is in use with " + numQueues + " queues.");
+
+    this.queues = new ArrayList<BlockingQueue<E>>(numQueues);
+    this.overflowedCalls = new ArrayList<AtomicLong>(numQueues);
+
+    for(int i=0; i < numQueues; i++) {
+      this.queues.add(new LinkedBlockingQueue<E>(capacity));
+      this.overflowedCalls.add(new AtomicLong(0));
+    }
+
+    this.scheduler = new DecayRpcScheduler(numQueues, ns, conf);
+    this.multiplexer = new WeightedRoundRobinMultiplexer(numQueues, ns, conf);
+
+    // Make this the active source of metrics
+    MetricsProxy mp = MetricsProxy.getInstance(ns);
+    mp.setDelegate(this);
+  }
+
+  /**
+   * Read the number of queues from the configuration.
+   * This will affect the FairCallQueue's overall capacity.
+   * @throws IllegalArgumentException on invalid queue count
+   */
+  private static int parseNumQueues(String ns, Configuration conf) {
+    int retval = conf.getInt(ns + "." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY,
+      IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT);
+    if(retval < 1) {
+      throw new IllegalArgumentException("numQueues must be at least 1");
+    }
+    return retval;
+  }
+
+  /**
+   * Returns the first non-empty queue with equal or lesser priority
+   * than <i>startIdx</i>. Wraps around, searching a maximum of N
+   * queues, where N is this.queues.size().
+   *
+   * @param startIdx the queue number to start searching at
+   * @return the first non-empty queue with less priority, or null if
+   * everything was empty
+   */
+  private BlockingQueue<E> getFirstNonEmptyQueue(int startIdx) {
+    final int numQueues = this.queues.size();
+    for(int i=0; i < numQueues; i++) {
+      int idx = (i + startIdx) % numQueues; // offset and wrap around
+      BlockingQueue<E> queue = this.queues.get(idx);
+      if (queue.size() != 0) {
+        return queue;
+      }
+    }
+
+    // All queues were empty
+    return null;
+  }
+
+  /* AbstractQueue and BlockingQueue methods */
+
+  /**
+   * Put and offer follow the same pattern:
+   * 1. Get a priorityLevel from the scheduler
+   * 2. Get the nth sub-queue matching this priorityLevel
+   * 3. delegate the call to this sub-queue.
+   *
+   * But differ in how they handle overflow:
+   * - Put will move on to the next queue until it lands on the last queue
+   * - Offer does not attempt other queues on overflow
+   */
+  @Override
+  public void put(E e) throws InterruptedException {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+
+    final int numLevels = this.queues.size();
+    while (true) {
+      BlockingQueue<E> q = this.queues.get(priorityLevel);
+      boolean res = q.offer(e);
+      if (!res) {
+        // Update stats
+        this.overflowedCalls.get(priorityLevel).getAndIncrement();
+
+        // If we failed to insert, try again on the next level
+        priorityLevel++;
+
+        if (priorityLevel == numLevels) {
+          // That was the last one, we will block on put in the last queue
+          // Delete this line to drop the call
+          this.queues.get(priorityLevel-1).put(e);
+          break;
+        }
+      } else {
+        break;
+      }
+    }
+
+
+    signalNotEmpty();
+  }
+
+  @Override
+  public boolean offer(E e, long timeout, TimeUnit unit)
+      throws InterruptedException {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+    BlockingQueue<E> q = this.queues.get(priorityLevel);
+    boolean ret = q.offer(e, timeout, unit);
+
+    signalNotEmpty();
+
+    return ret;
+  }
+
+  @Override
+  public boolean offer(E e) {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+    BlockingQueue<E> q = this.queues.get(priorityLevel);
+    boolean ret = q.offer(e);
+
+    signalNotEmpty();
+
+    return ret;
+  }
+
+  @Override
+  public E take() throws InterruptedException {
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    takeLock.lockInterruptibly();
+    try {
+      // Wait while queue is empty
+      for (;;) {
+        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+        if (q != null) {
+          // Got queue, so return if we can poll out an object
+          E e = q.poll();
+          if (e != null) {
+            return e;
+          }
+        }
+
+        notEmpty.await();
+      }
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  @Override
+  public E poll(long timeout, TimeUnit unit)
+      throws InterruptedException {
+
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    long nanos = unit.toNanos(timeout);
+    takeLock.lockInterruptibly();
+    try {
+      for (;;) {
+        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+        if (q != null) {
+          E e = q.poll();
+          if (e != null) {
+            // Escape condition: there might be something available
+            return e;
+          }
+        }
+
+        if (nanos <= 0) {
+          // Wait has elapsed
+          return null;
+        }
+
+        try {
+          // Now wait on the condition for a bit. If we get
+          // spuriously awoken we'll re-loop
+          nanos = notEmpty.awaitNanos(nanos);
+        } catch (InterruptedException ie) {
+          notEmpty.signal(); // propagate to a non-interrupted thread
+          throw ie;
+        }
+      }
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  /**
+   * poll() provides no strict consistency: it is possible for poll to return
+   * null even though an element is in the queue.
+   */
+  @Override
+  public E poll() {
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+    if (q == null) {
+      return null; // everything is empty
+    }
+
+    // Delegate to the sub-queue's poll, which could still return null
+    return q.poll();
+  }
+
+  /**
+   * Peek, like poll, provides no strict consistency.
+   */
+  @Override
+  public E peek() {
+    BlockingQueue<E> q = this.getFirstNonEmptyQueue(0);
+    if (q == null) {
+      return null;
+    } else {
+      return q.peek();
+    }
+  }
+
+  /**
+   * Size returns the sum of all sub-queue sizes, so it may be greater than
+   * capacity.
+   * Note: size provides no strict consistency, and should not be used to
+   * control queue IO.
+   */
+  @Override
+  public int size() {
+    int size = 0;
+    for (BlockingQueue q : this.queues) {
+      size += q.size();
+    }
+    return size;
+  }
+
+  /**
+   * Iterator is not implemented, as it is not needed.
+   */
+  @Override
+  public Iterator<E> iterator() {
+    throw new NotImplementedException();
+  }
+
+  /**
+   * drainTo defers to each sub-queue. Note that draining from a FairCallQueue
+   * to another FairCallQueue will likely fail, since the incoming calls
+   * may be scheduled differently in the new FairCallQueue. Nonetheless this
+   * method is provided for completeness.
+   */
+  @Override
+  public int drainTo(Collection<? super E> c, int maxElements) {
+    int sum = 0;
+    for (BlockingQueue<E> q : this.queues) {
+      sum += q.drainTo(c, maxElements);
+    }
+    return sum;
+  }
+
+  @Override
+  public int drainTo(Collection<? super E> c) {
+    int sum = 0;
+    for (BlockingQueue<E> q : this.queues) {
+      sum += q.drainTo(c);
+    }
+    return sum;
+  }
+
+  /**
+   * Returns maximum remaining capacity. This does not reflect how much you can
+   * ideally fit in this FairCallQueue, as that would depend on the scheduler's
+   * decisions.
+   */
+  @Override
+  public int remainingCapacity() {
+    int sum = 0;
+    for (BlockingQueue q : this.queues) {
+      sum += q.remainingCapacity();
+    }
+    return sum;
+  }
+
+  /**
+   * MetricsProxy is a singleton because we may init multiple
+   * FairCallQueues, but the metrics system cannot unregister beans cleanly.
+   */
+  private static final class MetricsProxy implements FairCallQueueMXBean {
+    // One singleton per namespace
+    private static final HashMap<String, MetricsProxy> INSTANCES =
+      new HashMap<String, MetricsProxy>();
+
+    // Weakref for delegate, so we don't retain it forever if it can be GC'd
+    private WeakReference<FairCallQueue> delegate;
+
+    // Keep track of how many objects we registered
+    private int revisionNumber = 0;
+
+    private MetricsProxy(String namespace) {
+      MBeans.register(namespace, "FairCallQueue", this);
+    }
+
+    public static synchronized MetricsProxy getInstance(String namespace) {
+      MetricsProxy mp = INSTANCES.get(namespace);
+      if (mp == null) {
+        // We must create one
+        mp = new MetricsProxy(namespace);
+        INSTANCES.put(namespace, mp);
+      }
+      return mp;
+    }
+
+    public void setDelegate(FairCallQueue obj) {
+      this.delegate = new WeakReference<FairCallQueue>(obj);
+      this.revisionNumber++;
+    }
+
+    @Override
+    public int[] getQueueSizes() {
+      FairCallQueue obj = this.delegate.get();
+      if (obj == null) {
+        return new int[]{};
+      }
+
+      return obj.getQueueSizes();
+    }
+
+    @Override
+    public long[] getOverflowedCalls() {
+      FairCallQueue obj = this.delegate.get();
+      if (obj == null) {
+        return new long[]{};
+      }
+
+      return obj.getOverflowedCalls();
+    }
+
+    @Override public int getRevision() {
+      return revisionNumber;
+    }
+  }
+
+  // FairCallQueueMXBean
+  public int[] getQueueSizes() {
+    int numQueues = queues.size();
+    int[] sizes = new int[numQueues];
+    for (int i=0; i < numQueues; i++) {
+      sizes[i] = queues.get(i).size();
+    }
+    return sizes;
+  }
+
+  public long[] getOverflowedCalls() {
+    int numQueues = queues.size();
+    long[] calls = new long[numQueues];
+    for (int i=0; i < numQueues; i++) {
+      calls[i] = overflowedCalls.get(i).get();
+    }
+    return calls;
+  }
+
+  // For testing
+  @VisibleForTesting
+  public void setScheduler(RpcScheduler newScheduler) {
+    this.scheduler = newScheduler;
+  }
+
+  @VisibleForTesting
+  public void setMultiplexer(RpcMultiplexer newMux) {
+    this.multiplexer = newMux;
+  }
+}

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java

@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+public interface FairCallQueueMXBean {
+  // Get the size of each subqueue, the index corrosponding to the priority
+  // level.
+  int[] getQueueSizes();
+  long[] getOverflowedCalls();
+  int getRevision();
+}

+ 32 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcMultiplexer.java

@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+/**
+ * Implement this interface to make a pluggable multiplexer in the
+ * FairCallQueue.
+ */
+public interface RpcMultiplexer {
+  /**
+   * Should get current index and optionally perform whatever is needed
+   * to prepare the next index.
+   * @return current index
+   */
+  int getAndAdvanceCurrentIndex();
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.conf.Configuration;
  * There may be more reads than the minimum due to race conditions. This is
  * There may be more reads than the minimum due to race conditions. This is
  * allowed by design for performance reasons.
  * allowed by design for performance reasons.
  */
  */
-public class WeightedRoundRobinMultiplexer {
+public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
   // Config keys
   // Config keys
   public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
   public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
     "faircallqueue.multiplexer.weights";
     "faircallqueue.multiplexer.weights";

+ 219 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java

@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * A {@link URLClassLoader} for application isolation. Classes from the
+ * application JARs are loaded in preference to the parent loader.
+ */
+@Public
+@Unstable
+public class ApplicationClassLoader extends URLClassLoader {
+  /**
+   * Default value of the system classes if the user did not override them.
+   * JDK classes, hadoop classes and resources, and some select third-party
+   * classes are considered system classes, and are not loaded by the
+   * application classloader.
+   */
+  public static final String DEFAULT_SYSTEM_CLASSES =
+        "java.," +
+        "javax.," +
+        "org.w3c.dom.," +
+        "org.xml.sax.," +
+        "org.apache.commons.logging.," +
+        "org.apache.log4j.," +
+        "org.apache.hadoop.," +
+        "core-default.xml," +
+        "hdfs-default.xml," +
+        "mapred-default.xml," +
+        "yarn-default.xml";
+
+  private static final Log LOG =
+    LogFactory.getLog(ApplicationClassLoader.class.getName());
+
+  private static final FilenameFilter JAR_FILENAME_FILTER =
+    new FilenameFilter() {
+      @Override
+      public boolean accept(File dir, String name) {
+        return name.endsWith(".jar") || name.endsWith(".JAR");
+      }
+  };
+
+  private final ClassLoader parent;
+  private final List<String> systemClasses;
+
+  public ApplicationClassLoader(URL[] urls, ClassLoader parent,
+      List<String> systemClasses) {
+    super(urls, parent);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("urls: " + Arrays.toString(urls));
+      LOG.debug("system classes: " + systemClasses);
+    }
+    this.parent = parent;
+    if (parent == null) {
+      throw new IllegalArgumentException("No parent classloader!");
+    }
+    // if the caller-specified system classes are null or empty, use the default
+    this.systemClasses = (systemClasses == null || systemClasses.isEmpty()) ?
+        Arrays.asList(StringUtils.getTrimmedStrings(DEFAULT_SYSTEM_CLASSES)) :
+        systemClasses;
+    LOG.info("system classes: " + this.systemClasses);
+  }
+
+  public ApplicationClassLoader(String classpath, ClassLoader parent,
+      List<String> systemClasses) throws MalformedURLException {
+    this(constructUrlsFromClasspath(classpath), parent, systemClasses);
+  }
+
+  static URL[] constructUrlsFromClasspath(String classpath)
+      throws MalformedURLException {
+    List<URL> urls = new ArrayList<URL>();
+    for (String element : classpath.split(File.pathSeparator)) {
+      if (element.endsWith("/*")) {
+        String dir = element.substring(0, element.length() - 1);
+        File[] files = new File(dir).listFiles(JAR_FILENAME_FILTER);
+        if (files != null) {
+          for (File file : files) {
+            urls.add(file.toURI().toURL());
+          }
+        }
+      } else {
+        File file = new File(element);
+        if (file.exists()) {
+          urls.add(new File(element).toURI().toURL());
+        }
+      }
+    }
+    return urls.toArray(new URL[urls.size()]);
+  }
+
+  @Override
+  public URL getResource(String name) {
+    URL url = null;
+    
+    if (!isSystemClass(name, systemClasses)) {
+      url= findResource(name);
+      if (url == null && name.startsWith("/")) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Remove leading / off " + name);
+        }
+        url= findResource(name.substring(1));
+      }
+    }
+
+    if (url == null) {
+      url= parent.getResource(name);
+    }
+
+    if (url != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("getResource("+name+")=" + url);
+      }
+    }
+    
+    return url;
+  }
+
+  @Override
+  public Class<?> loadClass(String name) throws ClassNotFoundException {
+    return this.loadClass(name, false);
+  }
+
+  @Override
+  protected synchronized Class<?> loadClass(String name, boolean resolve)
+      throws ClassNotFoundException {
+    
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading class: " + name);
+    }
+
+    Class<?> c = findLoadedClass(name);
+    ClassNotFoundException ex = null;
+
+    if (c == null && !isSystemClass(name, systemClasses)) {
+      // Try to load class from this classloader's URLs. Note that this is like
+      // the servlet spec, not the usual Java 2 behaviour where we ask the
+      // parent to attempt to load first.
+      try {
+        c = findClass(name);
+        if (LOG.isDebugEnabled() && c != null) {
+          LOG.debug("Loaded class: " + name + " ");
+        }
+      } catch (ClassNotFoundException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(e);
+        }
+        ex = e;
+      }
+    }
+
+    if (c == null) { // try parent
+      c = parent.loadClass(name);
+      if (LOG.isDebugEnabled() && c != null) {
+        LOG.debug("Loaded class from parent: " + name + " ");
+      }
+    }
+
+    if (c == null) {
+      throw ex != null ? ex : new ClassNotFoundException(name);
+    }
+
+    if (resolve) {
+      resolveClass(c);
+    }
+
+    return c;
+  }
+
+  public static boolean isSystemClass(String name, List<String> systemClasses) {
+    if (systemClasses != null) {
+      String canonicalName = name.replace('/', '.');
+      while (canonicalName.startsWith(".")) {
+        canonicalName=canonicalName.substring(1);
+      }
+      for (String c : systemClasses) {
+        boolean result = true;
+        if (c.startsWith("-")) {
+          c = c.substring(1);
+          result = false;
+        }
+        if (c.endsWith(".") && canonicalName.startsWith(c)) {
+          return result;
+        } else if (canonicalName.equals(c)) {
+          return result;
+        }
+      }
+    }
+    return false;
+  }
+}

+ 92 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -18,23 +18,25 @@
 
 
 package org.apache.hadoop.util;
 package org.apache.hadoop.util;
 
 
-import java.lang.reflect.Array;
-import java.lang.reflect.Method;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URL;
-import java.net.URLClassLoader;
+import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
-import java.io.File;
-import java.util.regex.Pattern;
-import java.util.Arrays;
+import java.lang.reflect.Array;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.Enumeration;
-import java.util.jar.JarFile;
+import java.util.List;
 import java.util.jar.JarEntry;
 import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
 import java.util.jar.Manifest;
 import java.util.jar.Manifest;
+import java.util.regex.Pattern;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -55,6 +57,21 @@ public class RunJar {
    */
    */
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
 
 
+  /**
+   * Environment key for using the client classloader.
+   */
+  public static final String HADOOP_USE_CLIENT_CLASSLOADER =
+      "HADOOP_USE_CLIENT_CLASSLOADER";
+  /**
+   * Environment key for the (user-provided) hadoop classpath.
+   */
+  public static final String HADOOP_CLASSPATH = "HADOOP_CLASSPATH";
+  /**
+   * Environment key for the system classes.
+   */
+  public static final String HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES =
+      "HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES";
+
   /**
   /**
    * Unpack a jar file into a directory.
    * Unpack a jar file into a directory.
    *
    *
@@ -116,6 +133,10 @@ public class RunJar {
   /** Run a Hadoop job jar.  If the main class is not in the jar's manifest,
   /** Run a Hadoop job jar.  If the main class is not in the jar's manifest,
    * then it must be provided on the command line. */
    * then it must be provided on the command line. */
   public static void main(String[] args) throws Throwable {
   public static void main(String[] args) throws Throwable {
+    new RunJar().run(args);
+  }
+
+  public void run(String[] args) throws Throwable {
     String usage = "RunJar jarFile [mainClass] args...";
     String usage = "RunJar jarFile [mainClass] args...";
 
 
     if (args.length < 1) {
     if (args.length < 1) {
@@ -187,19 +208,7 @@ public class RunJar {
 
 
     unJar(file, workDir);
     unJar(file, workDir);
 
 
-    ArrayList<URL> classPath = new ArrayList<URL>();
-    classPath.add(new File(workDir+"/").toURI().toURL());
-    classPath.add(file.toURI().toURL());
-    classPath.add(new File(workDir, "classes/").toURI().toURL());
-    File[] libs = new File(workDir, "lib").listFiles();
-    if (libs != null) {
-      for (int i = 0; i < libs.length; i++) {
-        classPath.add(libs[i].toURI().toURL());
-      }
-    }
-    
-    ClassLoader loader =
-      new URLClassLoader(classPath.toArray(new URL[0]));
+    ClassLoader loader = createClassLoader(file, workDir);
 
 
     Thread.currentThread().setContextClassLoader(loader);
     Thread.currentThread().setContextClassLoader(loader);
     Class<?> mainClass = Class.forName(mainClassName, true, loader);
     Class<?> mainClass = Class.forName(mainClassName, true, loader);
@@ -214,5 +223,65 @@ public class RunJar {
       throw e.getTargetException();
       throw e.getTargetException();
     }
     }
   }
   }
-  
+
+  /**
+   * Creates a classloader based on the environment that was specified by the
+   * user. If HADOOP_USE_CLIENT_CLASSLOADER is specified, it creates an
+   * application classloader that provides the isolation of the user class space
+   * from the hadoop classes and their dependencies. It forms a class space for
+   * the user jar as well as the HADOOP_CLASSPATH. Otherwise, it creates a
+   * classloader that simply adds the user jar to the classpath.
+   */
+  private ClassLoader createClassLoader(File file, final File workDir)
+      throws MalformedURLException {
+    ClassLoader loader;
+    // see if the client classloader is enabled
+    if (useClientClassLoader()) {
+      StringBuilder sb = new StringBuilder();
+      sb.append(workDir+"/").
+          append(File.pathSeparator).append(file).
+          append(File.pathSeparator).append(workDir+"/classes/").
+          append(File.pathSeparator).append(workDir+"/lib/*");
+      // HADOOP_CLASSPATH is added to the client classpath
+      String hadoopClasspath = getHadoopClasspath();
+      if (hadoopClasspath != null && !hadoopClasspath.isEmpty()) {
+        sb.append(File.pathSeparator).append(hadoopClasspath);
+      }
+      String clientClasspath = sb.toString();
+      // get the system classes
+      String systemClasses = getSystemClasses();
+      List<String> systemClassesList = systemClasses == null ?
+          null :
+          Arrays.asList(StringUtils.getTrimmedStrings(systemClasses));
+      // create an application classloader that isolates the user classes
+      loader = new ApplicationClassLoader(clientClasspath,
+          getClass().getClassLoader(), systemClassesList);
+    } else {
+      List<URL> classPath = new ArrayList<URL>();
+      classPath.add(new File(workDir+"/").toURI().toURL());
+      classPath.add(file.toURI().toURL());
+      classPath.add(new File(workDir, "classes/").toURI().toURL());
+      File[] libs = new File(workDir, "lib").listFiles();
+      if (libs != null) {
+        for (int i = 0; i < libs.length; i++) {
+          classPath.add(libs[i].toURI().toURL());
+        }
+      }
+      // create a normal parent-delegating classloader
+      loader = new URLClassLoader(classPath.toArray(new URL[0]));
+    }
+    return loader;
+  }
+
+  boolean useClientClassLoader() {
+    return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER));
+  }
+
+  String getHadoopClasspath() {
+    return System.getenv(HADOOP_CLASSPATH);
+  }
+
+  String getSystemClasses() {
+    return System.getenv(HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES);
+  }
 }
 }

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c

@@ -193,7 +193,17 @@ int hadoop_user_info_getgroups(struct hadoop_user_info *uinfo)
   ngroups = uinfo->gids_size;
   ngroups = uinfo->gids_size;
   ret = getgrouplist(uinfo->pwd.pw_name, uinfo->pwd.pw_gid, 
   ret = getgrouplist(uinfo->pwd.pw_name, uinfo->pwd.pw_gid, 
                          uinfo->gids, &ngroups);
                          uinfo->gids, &ngroups);
+  // Return value is different on Linux vs. FreeBSD.  Linux: the number of groups
+  // or -1 on error.  FreeBSD: 0 on success or -1 on error.  Unfortunately, we
+  // can't accept a 0 return on Linux, because buggy implementations have been
+  // observed to return 0 but leave the other out parameters in an indeterminate
+  // state.  This deviates from the man page, but it has been observed in
+  // practice.  See issue HADOOP-10989 for details.
+#ifdef __linux__
+  if (ret > 0) {
+#else
   if (ret >= 0) {
   if (ret >= 0) {
+#endif
     uinfo->num_gids = ngroups;
     uinfo->num_gids = ngroups;
     ret = put_primary_gid_first(uinfo);
     ret = put_primary_gid_first(uinfo);
     if (ret) {
     if (ret) {

+ 392 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java

@@ -0,0 +1,392 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import junit.framework.TestCase;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.BlockingQueue;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.conf.Configuration;
+import org.mockito.Matchers;
+
+import static org.apache.hadoop.ipc.FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY;
+
+public class TestFairCallQueue extends TestCase {
+  private FairCallQueue<Schedulable> fcq;
+
+  private Schedulable mockCall(String id) {
+    Schedulable mockCall = mock(Schedulable.class);
+    UserGroupInformation ugi = mock(UserGroupInformation.class);
+
+    when(ugi.getUserName()).thenReturn(id);
+    when(mockCall.getUserGroupInformation()).thenReturn(ugi);
+
+    return mockCall;
+  }
+
+  // A scheduler which always schedules into priority zero
+  private RpcScheduler alwaysZeroScheduler;
+  {
+    RpcScheduler sched = mock(RpcScheduler.class);
+    when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0); // always queue 0
+    alwaysZeroScheduler = sched;
+  }
+
+  public void setUp() {
+    Configuration conf = new Configuration();
+    conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
+
+    fcq = new FairCallQueue<Schedulable>(5, "ns", conf);
+  }
+
+  //
+  // Ensure that FairCallQueue properly implements BlockingQueue
+  //
+  public void testPollReturnsNullWhenEmpty() {
+    assertNull(fcq.poll());
+  }
+
+  public void testPollReturnsTopCallWhenNotEmpty() {
+    Schedulable call = mockCall("c");
+    assertTrue(fcq.offer(call));
+
+    assertEquals(call, fcq.poll());
+
+    // Poll took it out so the fcq is empty
+    assertEquals(0, fcq.size());
+  }
+
+  public void testOfferSucceeds() {
+    fcq.setScheduler(alwaysZeroScheduler);
+
+    for (int i = 0; i < 5; i++) {
+      // We can fit 10 calls
+      assertTrue(fcq.offer(mockCall("c")));
+    }
+
+    assertEquals(5, fcq.size());
+  }
+
+  public void testOfferFailsWhenFull() {
+    fcq.setScheduler(alwaysZeroScheduler);
+    for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
+
+    assertFalse(fcq.offer(mockCall("c"))); // It's full
+
+    assertEquals(5, fcq.size());
+  }
+
+  public void testOfferSucceedsWhenScheduledLowPriority() {
+    // Scheduler will schedule into queue 0 x 5, then queue 1
+    RpcScheduler sched = mock(RpcScheduler.class);
+    when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 0, 0, 0, 0, 1, 0);
+    fcq.setScheduler(sched);
+    for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
+
+    assertTrue(fcq.offer(mockCall("c")));
+
+    assertEquals(6, fcq.size());
+  }
+
+  public void testPeekNullWhenEmpty() {
+    assertNull(fcq.peek());
+  }
+
+  public void testPeekNonDestructive() {
+    Schedulable call = mockCall("c");
+    assertTrue(fcq.offer(call));
+
+    assertEquals(call, fcq.peek());
+    assertEquals(call, fcq.peek()); // Non-destructive
+    assertEquals(1, fcq.size());
+  }
+
+  public void testPeekPointsAtHead() {
+    Schedulable call = mockCall("c");
+    Schedulable next = mockCall("b");
+    fcq.offer(call);
+    fcq.offer(next);
+
+    assertEquals(call, fcq.peek()); // Peek points at the head
+  }
+
+  public void testPollTimeout() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+
+    assertNull(fcq.poll(10, TimeUnit.MILLISECONDS));
+  }
+
+  public void testPollSuccess() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+
+    Schedulable call = mockCall("c");
+    assertTrue(fcq.offer(call));
+
+    assertEquals(call, fcq.poll(10, TimeUnit.MILLISECONDS));
+
+    assertEquals(0, fcq.size());
+  }
+
+  public void testOfferTimeout() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+    for (int i = 0; i < 5; i++) {
+      assertTrue(fcq.offer(mockCall("c"), 10, TimeUnit.MILLISECONDS));
+    }
+
+    assertFalse(fcq.offer(mockCall("e"), 10, TimeUnit.MILLISECONDS)); // It's full
+
+    assertEquals(5, fcq.size());
+  }
+
+  public void testDrainTo() {
+    Configuration conf = new Configuration();
+    conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
+    FairCallQueue<Schedulable> fcq2 = new FairCallQueue<Schedulable>(10, "ns", conf);
+
+    fcq.setScheduler(alwaysZeroScheduler);
+    fcq2.setScheduler(alwaysZeroScheduler);
+
+    // Start with 3 in fcq, to be drained
+    for (int i = 0; i < 3; i++) {
+      fcq.offer(mockCall("c"));
+    }
+
+    fcq.drainTo(fcq2);
+
+    assertEquals(0, fcq.size());
+    assertEquals(3, fcq2.size());
+  }
+
+  public void testDrainToWithLimit() {
+    Configuration conf = new Configuration();
+    conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
+    FairCallQueue<Schedulable> fcq2 = new FairCallQueue<Schedulable>(10, "ns", conf);
+
+    fcq.setScheduler(alwaysZeroScheduler);
+    fcq2.setScheduler(alwaysZeroScheduler);
+
+    // Start with 3 in fcq, to be drained
+    for (int i = 0; i < 3; i++) {
+      fcq.offer(mockCall("c"));
+    }
+
+    fcq.drainTo(fcq2, 2);
+
+    assertEquals(1, fcq.size());
+    assertEquals(2, fcq2.size());
+  }
+
+  public void testInitialRemainingCapacity() {
+    assertEquals(10, fcq.remainingCapacity());
+  }
+
+  public void testFirstQueueFullRemainingCapacity() {
+    fcq.setScheduler(alwaysZeroScheduler);
+    while (fcq.offer(mockCall("c"))) ; // Queue 0 will fill up first, then queue 1
+
+    assertEquals(5, fcq.remainingCapacity());
+  }
+
+  public void testAllQueuesFullRemainingCapacity() {
+    RpcScheduler sched = mock(RpcScheduler.class);
+    when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 0, 0, 0, 0, 1, 1, 1, 1, 1);
+    fcq.setScheduler(sched);
+    while (fcq.offer(mockCall("c"))) ;
+
+    assertEquals(0, fcq.remainingCapacity());
+    assertEquals(10, fcq.size());
+  }
+
+  public void testQueuesPartialFilledRemainingCapacity() {
+    RpcScheduler sched = mock(RpcScheduler.class);
+    when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 1, 0, 1, 0);
+    fcq.setScheduler(sched);
+    for (int i = 0; i < 5; i++) { fcq.offer(mockCall("c")); }
+
+    assertEquals(5, fcq.remainingCapacity());
+    assertEquals(5, fcq.size());
+  }
+
+  /**
+   * Putter produces FakeCalls
+   */
+  public class Putter implements Runnable {
+    private final BlockingQueue<Schedulable> cq;
+
+    public final String tag;
+    public volatile int callsAdded = 0; // How many calls we added, accurate unless interrupted
+    private final int maxCalls;
+
+    public Putter(BlockingQueue<Schedulable> aCq, int maxCalls, String tag) {
+      this.maxCalls = maxCalls;
+      this.cq = aCq;
+      this.tag = tag;
+    }
+
+    private String getTag() {
+      if (this.tag != null) return this.tag;
+      return "";
+    }
+
+    @Override
+    public void run() {
+      try {
+        // Fill up to max (which is infinite if maxCalls < 0)
+        while (callsAdded < maxCalls || maxCalls < 0) {
+          cq.put(mockCall(getTag()));
+          callsAdded++;
+        }
+      } catch (InterruptedException e) {
+        return;
+      }
+    }
+  }
+
+  /**
+   * Taker consumes FakeCalls
+   */
+  public class Taker implements Runnable {
+    private final BlockingQueue<Schedulable> cq;
+
+    public final String tag; // if >= 0 means we will only take the matching tag, and put back
+                          // anything else
+    public volatile int callsTaken = 0; // total calls taken, accurate if we aren't interrupted
+    public volatile Schedulable lastResult = null; // the last thing we took
+    private final int maxCalls; // maximum calls to take
+
+    private IdentityProvider uip;
+
+    public Taker(BlockingQueue<Schedulable> aCq, int maxCalls, String tag) {
+      this.maxCalls = maxCalls;
+      this.cq = aCq;
+      this.tag = tag;
+      this.uip = new UserIdentityProvider();
+    }
+
+    @Override
+    public void run() {
+      try {
+        // Take while we don't exceed maxCalls, or if maxCalls is undefined (< 0)
+        while (callsTaken < maxCalls || maxCalls < 0) {
+          Schedulable res = cq.take();
+          String identity = uip.makeIdentity(res);
+
+          if (tag != null && this.tag.equals(identity)) {
+            // This call does not match our tag, we should put it back and try again
+            cq.put(res);
+          } else {
+            callsTaken++;
+            lastResult = res;
+          }
+        }
+      } catch (InterruptedException e) {
+        return;
+      }
+    }
+  }
+
+  // Assert we can take exactly the numberOfTakes
+  public void assertCanTake(BlockingQueue<Schedulable> cq, int numberOfTakes,
+    int takeAttempts) throws InterruptedException {
+
+    Taker taker = new Taker(cq, takeAttempts, "default");
+    Thread t = new Thread(taker);
+    t.start();
+    t.join(100);
+
+    assertEquals(numberOfTakes, taker.callsTaken);
+    t.interrupt();
+  }
+
+  // Assert we can put exactly the numberOfPuts
+  public void assertCanPut(BlockingQueue<Schedulable> cq, int numberOfPuts,
+    int putAttempts) throws InterruptedException {
+
+    Putter putter = new Putter(cq, putAttempts, null);
+    Thread t = new Thread(putter);
+    t.start();
+    t.join(100);
+
+    assertEquals(numberOfPuts, putter.callsAdded);
+    t.interrupt();
+  }
+
+  // Make sure put will overflow into lower queues when the top is full
+  public void testPutOverflows() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+
+    // We can fit more than 5, even though the scheduler suggests the top queue
+    assertCanPut(fcq, 8, 8);
+    assertEquals(8, fcq.size());
+  }
+
+  public void testPutBlocksWhenAllFull() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+
+    assertCanPut(fcq, 10, 10); // Fill up
+    assertEquals(10, fcq.size());
+
+    // Put more which causes overflow
+    assertCanPut(fcq, 0, 1); // Will block
+  }
+
+  public void testTakeBlocksWhenEmpty() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+    assertCanTake(fcq, 0, 1);
+  }
+
+  public void testTakeRemovesCall() throws InterruptedException {
+    fcq.setScheduler(alwaysZeroScheduler);
+    Schedulable call = mockCall("c");
+    fcq.offer(call);
+
+    assertEquals(call, fcq.take());
+    assertEquals(0, fcq.size());
+  }
+
+  public void testTakeTriesNextQueue() throws InterruptedException {
+    // Make a FCQ filled with calls in q 1 but empty in q 0
+    RpcScheduler q1Scheduler = mock(RpcScheduler.class);
+    when(q1Scheduler.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(1);
+    fcq.setScheduler(q1Scheduler);
+
+    // A mux which only draws from q 0
+    RpcMultiplexer q0mux = mock(RpcMultiplexer.class);
+    when(q0mux.getAndAdvanceCurrentIndex()).thenReturn(0);
+    fcq.setMultiplexer(q0mux);
+
+    Schedulable call = mockCall("c");
+    fcq.put(call);
+
+    // Take from q1 even though mux said q0, since q0 empty
+    assertEquals(call, fcq.take());
+    assertEquals(0, fcq.size());
+  }
+}

+ 33 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheck.java

@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+public class ClassLoaderCheck {
+  /**
+   * Verifies the class is loaded by the right classloader.
+   */
+  public static void checkClassLoader(Class cls,
+      boolean shouldBeLoadedByAppClassLoader) {
+    boolean loadedByAppClassLoader =
+        cls.getClassLoader() instanceof ApplicationClassLoader;
+    if ((shouldBeLoadedByAppClassLoader && !loadedByAppClassLoader) ||
+        (!shouldBeLoadedByAppClassLoader && loadedByAppClassLoader)) {
+      throw new RuntimeException("incorrect classloader used");
+    }
+  }
+}

+ 34 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckMain.java

@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * Test class used by {@link TestRunJar} to verify that it is loaded by the
+ * {@link ApplicationClassLoader}.
+ */
+public class ClassLoaderCheckMain {
+  public static void main(String[] args) {
+    // ClassLoaderCheckMain should be loaded by the application classloader
+    ClassLoaderCheck.checkClassLoader(ClassLoaderCheckMain.class, true);
+    // ClassLoaderCheckSecond should NOT be loaded by the application
+    // classloader
+    ClassLoaderCheck.checkClassLoader(ClassLoaderCheckSecond.class, false);
+    // ClassLoaderCheckThird should be loaded by the application classloader
+    ClassLoaderCheck.checkClassLoader(ClassLoaderCheckThird.class, true);
+  }
+}

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckSecond.java

@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * A class {@link ClassLoaderCheckMain} depends on that should be loaded by the
+ * system classloader.
+ */
+public class ClassLoaderCheckSecond {}

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckThird.java

@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * A class {@link ClassLoaderCheckMain} depends on that should be loaded by the
+ * application classloader.
+ */
+public class ClassLoaderCheckThird {}

+ 6 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestApplicationClassLoader.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java

@@ -16,18 +16,15 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.yarn.util;
+package org.apache.hadoop.util;
 
 
+import static org.apache.hadoop.util.ApplicationClassLoader.constructUrlsFromClasspath;
+import static org.apache.hadoop.util.ApplicationClassLoader.isSystemClass;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
-import static org.apache.hadoop.yarn.util.ApplicationClassLoader.constructUrlsFromClasspath;
-import static org.apache.hadoop.yarn.util.ApplicationClassLoader.isSystemClass;
-
-import com.google.common.base.Splitter;
-import com.google.common.collect.Lists;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
@@ -43,6 +40,9 @@ import org.apache.hadoop.fs.FileUtil;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
 public class TestApplicationClassLoader {
 public class TestApplicationClassLoader {
   
   
   private static File testDir = new File(System.getProperty("test.build.data",
   private static File testDir = new File(System.getProperty("test.build.data",

+ 64 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java

@@ -17,23 +17,30 @@
  */
  */
 package org.apache.hadoop.util;
 package org.apache.hadoop.util;
 
 
-import junit.framework.TestCase;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.util.jar.JarOutputStream;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipEntry;
 
 
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.FileUtil;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
-import org.apache.hadoop.fs.FileUtil;
 
 
 public class TestRunJar extends TestCase {
 public class TestRunJar extends TestCase {
   private File TEST_ROOT_DIR;
   private File TEST_ROOT_DIR;
 
 
   private static final String TEST_JAR_NAME="test-runjar.jar";
   private static final String TEST_JAR_NAME="test-runjar.jar";
+  private static final String TEST_JAR_2_NAME = "test-runjar2.jar";
 
 
   @Override
   @Override
   @Before
   @Before
@@ -107,4 +114,59 @@ public class TestRunJar extends TestCase {
                new File(unjarDir, "foobaz.txt").exists());
                new File(unjarDir, "foobaz.txt").exists());
 
 
   }
   }
+
+  /**
+   * Tests the client classloader to verify the main class and its dependent
+   * class are loaded correctly by the application classloader, and others are
+   * loaded by the system classloader.
+   */
+  @Test
+  public void testClientClassLoader() throws Throwable {
+    RunJar runJar = spy(new RunJar());
+    // enable the client classloader
+    when(runJar.useClientClassLoader()).thenReturn(true);
+    // set the system classes and blacklist the test main class and the test
+    // third class so they can be loaded by the application classloader
+    String mainCls = ClassLoaderCheckMain.class.getName();
+    String thirdCls = ClassLoaderCheckThird.class.getName();
+    String systemClasses = "-" + mainCls + "," +
+        "-" + thirdCls + "," +
+        ApplicationClassLoader.DEFAULT_SYSTEM_CLASSES;
+    when(runJar.getSystemClasses()).thenReturn(systemClasses);
+
+    // create the test jar
+    File testJar = makeClassLoaderTestJar(mainCls, thirdCls);
+    // form the args
+    String[] args = new String[3];
+    args[0] = testJar.getAbsolutePath();
+    args[1] = mainCls;
+
+    // run RunJar
+    runJar.run(args);
+    // it should not throw an exception
+  }
+
+  private File makeClassLoaderTestJar(String... clsNames) throws IOException {
+    File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_2_NAME);
+    JarOutputStream jstream =
+        new JarOutputStream(new FileOutputStream(jarFile));
+    for (String clsName: clsNames) {
+      String name = clsName.replace('.', '/') + ".class";
+      InputStream entryInputStream = this.getClass().getResourceAsStream(
+          "/" + name);
+      ZipEntry entry = new ZipEntry(name);
+      jstream.putNextEntry(entry);
+      BufferedInputStream bufInputStream = new BufferedInputStream(
+          entryInputStream, 2048);
+      int count;
+      byte[] data = new byte[2048];
+      while ((count = bufInputStream.read(data, 0, 2048)) != -1) {
+        jstream.write(data, 0, count);
+      }
+      jstream.closeEntry();
+    }
+    jstream.close();
+
+    return jarFile;
+  }
 }
 }

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -1643,6 +1643,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     DirectoryListing dlisting = null;
     DirectoryListing dlisting = null;
     Nfs3FileAttributes postOpDirAttr = null;
     Nfs3FileAttributes postOpDirAttr = null;
     long dotdotFileId = 0;
     long dotdotFileId = 0;
+    HdfsFileStatus dotdotStatus = null;
     try {
     try {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
@@ -1678,7 +1679,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       if (cookie == 0) {
       if (cookie == 0) {
         // Get dotdot fileId
         // Get dotdot fileId
         String dotdotFileIdPath = dirFileIdPath + "/..";
         String dotdotFileIdPath = dirFileIdPath + "/..";
-        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
+        dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
 
 
         if (dotdotStatus == null) {
         if (dotdotStatus == null) {
           // This should not happen
           // This should not happen
@@ -1723,7 +1724,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(
           postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(
               postOpDirAttr.getFileId()));
               postOpDirAttr.getFileId()));
       entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..",
       entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..",
-          dotdotFileId, postOpDirAttr, new FileHandle(dotdotFileId));
+          dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus,
+              iug), new FileHandle(dotdotFileId));
 
 
       for (int i = 2; i < n + 2; i++) {
       for (int i = 2; i < n + 2; i++) {
         long fileId = fstatus[i - 2].getFileId();
         long fileId = fstatus[i - 2].getFileId();

+ 31 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -369,6 +369,12 @@ Trunk (Unreleased)
 
 
     HDFS-6839. Fix TestCLI to expect new output. (clamb)
     HDFS-6839. Fix TestCLI to expect new output. (clamb)
 
 
+    HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
+
+    HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
+    intermittently with various symptoms - debugging patch. (Yongjun Zhang via
+    Arpit Agarwal)
+
 Release 2.6.0 - UNRELEASED
 Release 2.6.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -528,6 +534,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6758. block writer should pass the expected block size to
     HDFS-6758. block writer should pass the expected block size to
     DataXceiverServer. (Arpit Agarwal)
     DataXceiverServer. (Arpit Agarwal)
 
 
+    HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
+    per volume. (Arpit Agarwal)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -649,6 +658,28 @@ Release 2.6.0 - UNRELEASED
     HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
     HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
     Liu via jing9)
     Liu via jing9)
 
 
+    HDFS-6890. NFS readdirplus doesn't return dotdot attributes (brandonli)
+
+    HDFS-6829. DFSAdmin refreshSuperUserGroupsConfiguration failed in
+    security cluster (zhaoyunjiong via Arpit Agarwal)
+
+    HDFS-4852. libhdfs documentation is out of date. (cnauroth)
+
+    HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
+    (Juan Yu and jing9 via jing9)
+
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 Release 2.5.0 - 2014-08-11
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh

@@ -20,7 +20,7 @@
 
 
 function hadoop_subproject_init
 function hadoop_subproject_init
 {
 {
-  if [ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]; then
+  if [[ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]]; then
     . "${HADOOP_CONF_DIR}/hdfs-env.sh"
     . "${HADOOP_CONF_DIR}/hdfs-env.sh"
   fi
   fi
   
   
@@ -49,7 +49,7 @@ function hadoop_subproject_init
   HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
   HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
   HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   
   
-  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME_DIR}"
+  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}"
   
   
   HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
   HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
@@ -71,12 +71,13 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
 fi
 fi
 
 
-if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
 else
   echo "ERROR: Hadoop common not found." 2>&1
   echo "ERROR: Hadoop common not found." 2>&1
   exit 1
   exit 1

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java

@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;

+ 37 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -29,6 +29,7 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.DF;
@@ -50,7 +51,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * It uses the {@link FsDatasetImpl} object for synchronization.
  * It uses the {@link FsDatasetImpl} object for synchronization.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-class FsVolumeImpl implements FsVolumeSpi {
+@VisibleForTesting
+public class FsVolumeImpl implements FsVolumeSpi {
   private final FsDatasetImpl dataset;
   private final FsDatasetImpl dataset;
   private final String storageID;
   private final String storageID;
   private final StorageType storageType;
   private final StorageType storageType;
@@ -59,6 +61,12 @@ class FsVolumeImpl implements FsVolumeSpi {
   private final File currentDir;    // <StorageDirectory>/current
   private final File currentDir;    // <StorageDirectory>/current
   private final DF usage;           
   private final DF usage;           
   private final long reserved;
   private final long reserved;
+
+  // Capacity configured. This is useful when we want to
+  // limit the visible capacity for tests. If negative, then we just
+  // query from the filesystem.
+  protected long configuredCapacity;
+
   /**
   /**
    * Per-volume worker pool that processes new blocks to cache.
    * Per-volume worker pool that processes new blocks to cache.
    * The maximum number of workers per volume is bounded (configurable via
    * The maximum number of workers per volume is bounded (configurable via
@@ -78,20 +86,26 @@ class FsVolumeImpl implements FsVolumeSpi {
     File parent = currentDir.getParentFile();
     File parent = currentDir.getParentFile();
     this.usage = new DF(parent, conf);
     this.usage = new DF(parent, conf);
     this.storageType = storageType;
     this.storageType = storageType;
+    this.configuredCapacity = -1;
+    cacheExecutor = initializeCacheExecutor(parent);
+  }
+
+  protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
     final int maxNumThreads = dataset.datanode.getConf().getInt(
     final int maxNumThreads = dataset.datanode.getConf().getInt(
         DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
         DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
-        DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT
-        );
+        DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);
+
     ThreadFactory workerFactory = new ThreadFactoryBuilder()
     ThreadFactory workerFactory = new ThreadFactoryBuilder()
         .setDaemon(true)
         .setDaemon(true)
         .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
         .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
         .build();
         .build();
-    cacheExecutor = new ThreadPoolExecutor(
+    ThreadPoolExecutor executor = new ThreadPoolExecutor(
         1, maxNumThreads,
         1, maxNumThreads,
         60, TimeUnit.SECONDS,
         60, TimeUnit.SECONDS,
         new LinkedBlockingQueue<Runnable>(),
         new LinkedBlockingQueue<Runnable>(),
         workerFactory);
         workerFactory);
-    cacheExecutor.allowCoreThreadTimeOut(true);
+    executor.allowCoreThreadTimeOut(true);
+    return executor;
   }
   }
   
   
   File getCurrentDir() {
   File getCurrentDir() {
@@ -130,9 +144,24 @@ class FsVolumeImpl implements FsVolumeSpi {
    * reserved capacity.
    * reserved capacity.
    * @return the unreserved number of bytes left in this filesystem. May be zero.
    * @return the unreserved number of bytes left in this filesystem. May be zero.
    */
    */
-  long getCapacity() {
-    long remaining = usage.getCapacity() - reserved;
-    return remaining > 0 ? remaining : 0;
+  @VisibleForTesting
+  public long getCapacity() {
+    if (configuredCapacity < 0) {
+      long remaining = usage.getCapacity() - reserved;
+      return remaining > 0 ? remaining : 0;
+    }
+
+    return configuredCapacity;
+  }
+
+  /**
+   * This function MUST NOT be used outside of tests.
+   *
+   * @param capacity
+   */
+  @VisibleForTesting
+  public void setCapacityForTesting(long capacity) {
+    this.configuredCapacity = capacity;
   }
   }
 
 
   @Override
   @Override

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java

@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java

@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java

@@ -722,6 +722,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
         counts.add(lastDiff.diff.destroyCreatedList(currentINode,
         counts.add(lastDiff.diff.destroyCreatedList(currentINode,
             collectedBlocks, removedINodes));
             collectedBlocks, removedINodes));
       }
       }
+      counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
+          collectedBlocks, removedINodes, priorDeleted, countDiffChange));
     } else {
     } else {
       // update prior
       // update prior
       prior = getDiffs().updatePrior(snapshot, prior);
       prior = getDiffs().updatePrior(snapshot, prior);
@@ -739,7 +741,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
       
       
       counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
       counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
           currentINode, collectedBlocks, removedINodes, countDiffChange));
           currentINode, collectedBlocks, removedINodes, countDiffChange));
-      
+      counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
+          collectedBlocks, removedINodes, priorDeleted, countDiffChange));
+
       // check priorDiff again since it may be created during the diff deletion
       // check priorDiff again since it may be created during the diff deletion
       if (prior != Snapshot.NO_SNAPSHOT_ID) {
       if (prior != Snapshot.NO_SNAPSHOT_ID) {
         DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
         DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
@@ -778,9 +782,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
         }
         }
       }
       }
     }
     }
-    counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
-        collectedBlocks, removedINodes, priorDeleted, countDiffChange));
-    
+
     if (currentINode.isQuotaSet()) {
     if (currentINode.isQuotaSet()) {
       currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
       currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -356,7 +356,7 @@ public class DFSAdmin extends FsShell {
    * Construct a DFSAdmin object.
    * Construct a DFSAdmin object.
    */
    */
   public DFSAdmin() {
   public DFSAdmin() {
-    this(null);
+    this(new HdfsConfiguration());
   }
   }
 
 
   /**
   /**

+ 18 - 11
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/LibHdfs.apt.vm

@@ -26,14 +26,17 @@ C API libhdfs
    (HDFS). It provides C APIs to a subset of the HDFS APIs to manipulate
    (HDFS). It provides C APIs to a subset of the HDFS APIs to manipulate
    HDFS files and the filesystem. libhdfs is part of the Hadoop
    HDFS files and the filesystem. libhdfs is part of the Hadoop
    distribution and comes pre-compiled in
    distribution and comes pre-compiled in
-   <<<${HADOOP_PREFIX}/libhdfs/libhdfs.so>>> .
+   <<<${HADOOP_HDFS_HOME}/lib/native/libhdfs.so>>> .  libhdfs is compatible with
+   Windows and can be built on Windows by running <<<mvn compile>>> within the
+   <<<hadoop-hdfs-project/hadoop-hdfs>>> directory of the source tree.
 
 
 * The APIs
 * The APIs
 
 
-   The libhdfs APIs are a subset of: {{{hadoop fs APIs}}}.
+   The libhdfs APIs are a subset of the
+   {{{../../api/org/apache/hadoop/fs/FileSystem.html}Hadoop FileSystem APIs}}.
 
 
    The header file for libhdfs describes each API in detail and is
    The header file for libhdfs describes each API in detail and is
-   available in <<<${HADOOP_PREFIX}/src/c++/libhdfs/hdfs.h>>>
+   available in <<<${HADOOP_HDFS_HOME}/include/hdfs.h>>>.
 
 
 * A Sample Program
 * A Sample Program
 
 
@@ -55,24 +58,28 @@ C API libhdfs
                fprintf(stderr, "Failed to 'flush' %s\n", writePath);
                fprintf(stderr, "Failed to 'flush' %s\n", writePath);
               exit(-1);
               exit(-1);
         }
         }
-       hdfsCloseFile(fs, writeFile);
+        hdfsCloseFile(fs, writeFile);
     }
     }
 ----
 ----
 
 
 * How To Link With The Library
 * How To Link With The Library
 
 
-   See the Makefile for <<<hdfs_test.c>>> in the libhdfs source directory
-   (<<<${HADOOP_PREFIX}/src/c++/libhdfs/Makefile>>>) or something like:
-   <<<gcc above_sample.c -I${HADOOP_PREFIX}/src/c++/libhdfs -L${HADOOP_PREFIX}/libhdfs -lhdfs -o above_sample>>>
+   See the CMake file for <<<test_libhdfs_ops.c>>> in the libhdfs source
+   directory (<<<hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt>>>) or
+   something like:
+   <<<gcc above_sample.c -I${HADOOP_HDFS_HOME}/include -L${HADOOP_HDFS_HOME}/lib/native -lhdfs -o above_sample>>>
 
 
 * Common Problems
 * Common Problems
 
 
    The most common problem is the <<<CLASSPATH>>> is not set properly when
    The most common problem is the <<<CLASSPATH>>> is not set properly when
    calling a program that uses libhdfs. Make sure you set it to all the
    calling a program that uses libhdfs. Make sure you set it to all the
-   Hadoop jars needed to run Hadoop itself. Currently, there is no way to
-   programmatically generate the classpath, but a good bet is to include
-   all the jar files in <<<${HADOOP_PREFIX}>>> and <<<${HADOOP_PREFIX}/lib>>> as well
-   as the right configuration directory containing <<<hdfs-site.xml>>>
+   Hadoop jars needed to run Hadoop itself as well as the right configuration
+   directory containing <<<hdfs-site.xml>>>.  It is not valid to use wildcard
+   syntax for specifying multiple jars.  It may be useful to run
+   <<<hadoop classpath --glob>>> or <<<hadoop classpath --jar <path>>>> to
+   generate the correct classpath for your deployment.  See
+   {{{../hadoop-common/CommandsManual.html#classpath}Hadoop Commands Reference}}
+   for more information on this command.
 
 
 * Thread Safe
 * Thread Safe
 
 

+ 100 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -55,7 +55,6 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.List;
 import java.util.List;
@@ -91,7 +90,9 @@ import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -131,11 +132,15 @@ public class MiniDFSCluster {
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
       = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
       = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
 
 
-  // Changing this value may break some tests that assume it is 2.
-  public static final int DIRS_PER_DATANODE = 2;
+  // Changing this default may break some tests that assume it is 2.
+  private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
 
 
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
 
+  public int getStoragesPerDatanode() {
+    return storagesPerDatanode;
+  }
+
   /**
   /**
    * Class to construct instances of MiniDFSClusters with specific options.
    * Class to construct instances of MiniDFSClusters with specific options.
    */
    */
@@ -145,6 +150,8 @@ public class MiniDFSCluster {
     private final Configuration conf;
     private final Configuration conf;
     private int numDataNodes = 1;
     private int numDataNodes = 1;
     private StorageType[][] storageTypes = null;
     private StorageType[][] storageTypes = null;
+    private StorageType[] storageTypes1D = null;
+    private int storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
     private boolean format = true;
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsSharedDirs = true;
     private boolean manageNameDfsSharedDirs = true;
@@ -155,6 +162,8 @@ public class MiniDFSCluster {
     private String[] racks = null; 
     private String[] racks = null; 
     private String [] hosts = null;
     private String [] hosts = null;
     private long [] simulatedCapacities = null;
     private long [] simulatedCapacities = null;
+    private long [][] storageCapacities = null;
+    private long [] storageCapacities1D = null;
     private String clusterId = null;
     private String clusterId = null;
     private boolean waitSafeMode = true;
     private boolean waitSafeMode = true;
     private boolean setupHostsFile = false;
     private boolean setupHostsFile = false;
@@ -192,17 +201,21 @@ public class MiniDFSCluster {
       return this;
       return this;
     }
     }
 
 
+    /**
+     * Default: DEFAULT_STORAGES_PER_DATANODE
+     */
+    public Builder storagesPerDatanode(int numStorages) {
+      this.storagesPerDatanode = numStorages;
+      return this;
+    }
+
     /**
     /**
      * Set the same storage type configuration for each datanode.
      * Set the same storage type configuration for each datanode.
      * If storageTypes is uninitialized or passed null then
      * If storageTypes is uninitialized or passed null then
      * StorageType.DEFAULT is used.
      * StorageType.DEFAULT is used.
      */
      */
     public Builder storageTypes(StorageType[] types) {
     public Builder storageTypes(StorageType[] types) {
-      assert types.length == DIRS_PER_DATANODE;
-      this.storageTypes = new StorageType[numDataNodes][types.length];
-      for (int i = 0; i < numDataNodes; ++i) {
-        this.storageTypes[i] = types;
-      }
+      this.storageTypes1D = types;
       return this;
       return this;
     }
     }
 
 
@@ -216,6 +229,26 @@ public class MiniDFSCluster {
       return this;
       return this;
     }
     }
 
 
+    /**
+     * Set the same storage capacity configuration for each datanode.
+     * If storageTypes is uninitialized or passed null then
+     * StorageType.DEFAULT is used.
+     */
+    public Builder storageCapacities(long[] capacities) {
+      this.storageCapacities1D = capacities;
+      return this;
+    }
+
+    /**
+     * Set custom storage capacity configuration for each datanode.
+     * If storageCapacities is uninitialized or passed null then
+     * capacity is limited by available disk space.
+     */
+    public Builder storageCapacities(long[][] capacities) {
+      this.storageCapacities = capacities;
+      return this;
+    }
+
     /**
     /**
      * Default: true
      * Default: true
      */
      */
@@ -289,6 +322,11 @@ public class MiniDFSCluster {
     }
     }
 
 
     /**
     /**
+     * Use SimulatedFSDataset and limit the capacity of each DN per
+     * the values passed in val.
+     *
+     * For limiting the capacity of volumes with real storage, see
+     * {@link FsVolumeImpl#setCapacityForTesting}
      * Default: null
      * Default: null
      */
      */
     public Builder simulatedCapacities(long[] val) {
     public Builder simulatedCapacities(long[] val) {
@@ -391,7 +429,28 @@ public class MiniDFSCluster {
     LOG.info("starting cluster: numNameNodes=" + numNameNodes
     LOG.info("starting cluster: numNameNodes=" + numNameNodes
         + ", numDataNodes=" + builder.numDataNodes);
         + ", numDataNodes=" + builder.numDataNodes);
     nameNodes = new NameNodeInfo[numNameNodes];
     nameNodes = new NameNodeInfo[numNameNodes];
+    this.storagesPerDatanode = builder.storagesPerDatanode;
+
+    // Duplicate the storageType setting for each DN.
+    if (builder.storageTypes == null && builder.storageTypes1D != null) {
+      assert builder.storageTypes1D.length == storagesPerDatanode;
+      builder.storageTypes = new StorageType[builder.numDataNodes][storagesPerDatanode];
       
       
+      for (int i = 0; i < builder.numDataNodes; ++i) {
+        builder.storageTypes[i] = builder.storageTypes1D;
+      }
+    }
+
+    // Duplicate the storageCapacity setting for each DN.
+    if (builder.storageCapacities == null && builder.storageCapacities1D != null) {
+      assert builder.storageCapacities1D.length == storagesPerDatanode;
+      builder.storageCapacities = new long[builder.numDataNodes][storagesPerDatanode];
+
+      for (int i = 0; i < builder.numDataNodes; ++i) {
+        builder.storageCapacities[i] = builder.storageCapacities1D;
+      }
+    }
+
     initMiniDFSCluster(builder.conf,
     initMiniDFSCluster(builder.conf,
                        builder.numDataNodes,
                        builder.numDataNodes,
                        builder.storageTypes,
                        builder.storageTypes,
@@ -404,6 +463,7 @@ public class MiniDFSCluster {
                        builder.dnOption,
                        builder.dnOption,
                        builder.racks,
                        builder.racks,
                        builder.hosts,
                        builder.hosts,
+                       builder.storageCapacities,
                        builder.simulatedCapacities,
                        builder.simulatedCapacities,
                        builder.clusterId,
                        builder.clusterId,
                        builder.waitSafeMode,
                        builder.waitSafeMode,
@@ -446,6 +506,7 @@ public class MiniDFSCluster {
   private boolean waitSafeMode = true;
   private boolean waitSafeMode = true;
   private boolean federation;
   private boolean federation;
   private boolean checkExitOnShutdown = true;
   private boolean checkExitOnShutdown = true;
+  protected final int storagesPerDatanode;
   
   
   /**
   /**
    * A unique instance identifier for the cluster. This
    * A unique instance identifier for the cluster. This
@@ -484,6 +545,7 @@ public class MiniDFSCluster {
    */
    */
   public MiniDFSCluster() {
   public MiniDFSCluster() {
     nameNodes = new NameNodeInfo[0]; // No namenode in the cluster
     nameNodes = new NameNodeInfo[0]; // No namenode in the cluster
+    storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
     synchronized (MiniDFSCluster.class) {
     synchronized (MiniDFSCluster.class) {
       instanceId = instanceCount++;
       instanceId = instanceCount++;
     }
     }
@@ -660,11 +722,12 @@ public class MiniDFSCluster {
                         String[] racks, String hosts[],
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
+    this.storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
     initMiniDFSCluster(conf, numDataNodes, null, format,
     initMiniDFSCluster(conf, numDataNodes, null, format,
-        manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, 
-        operation, null, racks, hosts,
-        simulatedCapacities, null, true, false,
-        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null);
+                       manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
+                       operation, null, racks, hosts,
+                       null, simulatedCapacities, null, true, false,
+                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null);
   }
   }
 
 
   private void initMiniDFSCluster(
   private void initMiniDFSCluster(
@@ -673,7 +736,8 @@ public class MiniDFSCluster {
       boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
       boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
       boolean manageDataDfsDirs, StartupOption startOpt,
       boolean manageDataDfsDirs, StartupOption startOpt,
       StartupOption dnStartOpt, String[] racks,
       StartupOption dnStartOpt, String[] racks,
-      String[] hosts, long[] simulatedCapacities, String clusterId,
+      String[] hosts,
+      long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
       boolean waitSafeMode, boolean setupHostsFile,
       MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
       MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeAddrConfig,
@@ -744,7 +808,7 @@ public class MiniDFSCluster {
       // Start the DataNodes
       // Start the DataNodes
       startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
       startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
           dnStartOpt != null ? dnStartOpt : startOpt,
           dnStartOpt != null ? dnStartOpt : startOpt,
-          racks, hosts, simulatedCapacities, setupHostsFile,
+          racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
           checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
           checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
       waitClusterUp();
       waitClusterUp();
       //make sure ProxyUsers uses the latest conf
       //make sure ProxyUsers uses the latest conf
@@ -1119,8 +1183,8 @@ public class MiniDFSCluster {
 
 
   String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
   String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
     StringBuilder sb = new StringBuilder();
     StringBuilder sb = new StringBuilder();
-    assert storageTypes == null || storageTypes.length == DIRS_PER_DATANODE;
-    for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
+    assert storageTypes == null || storageTypes.length == storagesPerDatanode;
+    for (int j = 0; j < storagesPerDatanode; ++j) {
       File dir = getInstanceStorageDir(dnIndex, j);
       File dir = getInstanceStorageDir(dnIndex, j);
       dir.mkdirs();
       dir.mkdirs();
       if (!dir.isDirectory()) {
       if (!dir.isDirectory()) {
@@ -1196,7 +1260,7 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
                              boolean setupHostsFile) throws IOException {
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
-        simulatedCapacities, setupHostsFile, false, false, null);
+        null, simulatedCapacities, setupHostsFile, false, false, null);
   }
   }
 
 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
@@ -1206,7 +1270,7 @@ public class MiniDFSCluster {
       boolean setupHostsFile,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig) throws IOException {
       boolean checkDataNodeAddrConfig) throws IOException {
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
-        simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
+        null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
   }
   }
 
 
   /**
   /**
@@ -1240,12 +1304,15 @@ public class MiniDFSCluster {
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       String[] racks, String[] hosts,
+      long[][] storageCapacities,
       long[] simulatedCapacities,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
       Configuration[] dnConfOverlays) throws IOException {
+    assert storageCapacities == null || simulatedCapacities == null;
     assert storageTypes == null || storageTypes.length == numDataNodes;
     assert storageTypes == null || storageTypes.length == numDataNodes;
+    assert storageCapacities == null || storageCapacities.length == numDataNodes;
 
 
     if (operation == StartupOption.RECOVER) {
     if (operation == StartupOption.RECOVER) {
       return;
       return;
@@ -1298,7 +1365,7 @@ public class MiniDFSCluster {
                         operation != StartupOption.ROLLBACK) ?
                         operation != StartupOption.ROLLBACK) ?
         null : new String[] {operation.getName()};
         null : new String[] {operation.getName()};
     
     
-    
+    DataNode[] dns = new DataNode[numDataNodes];
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
       Configuration dnConf = new HdfsConfiguration(conf);
       Configuration dnConf = new HdfsConfiguration(conf);
       if (dnConfOverlays != null) {
       if (dnConfOverlays != null) {
@@ -1389,10 +1456,24 @@ public class MiniDFSCluster {
       dn.runDatanodeDaemon();
       dn.runDatanodeDaemon();
       dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs,
       dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs,
           secureResources, dn.getIpcPort()));
           secureResources, dn.getIpcPort()));
+      dns[i - curDatanodesNum] = dn;
     }
     }
     curDatanodesNum += numDataNodes;
     curDatanodesNum += numDataNodes;
     this.numDataNodes += numDataNodes;
     this.numDataNodes += numDataNodes;
     waitActive();
     waitActive();
+
+    if (storageCapacities != null) {
+      for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) {
+        List<? extends FsVolumeSpi> volumes = dns[i].getFSDataset().getVolumes();
+        assert storageCapacities[i].length == storagesPerDatanode;
+        assert volumes.size() == storagesPerDatanode;
+
+        for (int j = 0; j < volumes.size(); ++j) {
+          FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
+          volume.setCapacityForTesting(storageCapacities[i][j]);
+        }
+      }
+    }
   }
   }
   
   
   
   

+ 24 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.List;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -31,6 +32,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -52,11 +55,15 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] nodeGroups, String[] hosts,
       String[] racks, String[] nodeGroups, String[] hosts,
+      long[][] storageCapacities,
       long[] simulatedCapacities,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig) throws IOException {
       boolean checkDataNodeHostConfig) throws IOException {
+
+    assert storageCapacities == null || simulatedCapacities == null;
     assert storageTypes == null || storageTypes.length == numDataNodes;
     assert storageTypes == null || storageTypes.length == numDataNodes;
+    assert storageCapacities == null || storageCapacities.length == numDataNodes;
 
 
     if (operation == StartupOption.RECOVER) {
     if (operation == StartupOption.RECOVER) {
       return;
       return;
@@ -109,6 +116,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
     operation != StartupOption.ROLLBACK) ?
     operation != StartupOption.ROLLBACK) ?
         null : new String[] {operation.getName()};
         null : new String[] {operation.getName()};
 
 
+    DataNode[] dns = new DataNode[numDataNodes];
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
       Configuration dnConf = new HdfsConfiguration(conf);
       Configuration dnConf = new HdfsConfiguration(conf);
       // Set up datanode address
       // Set up datanode address
@@ -181,10 +189,23 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
       }
       }
       dn.runDatanodeDaemon();
       dn.runDatanodeDaemon();
       dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
       dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
+      dns[i - curDatanodesNum] = dn;
     }
     }
     curDatanodesNum += numDataNodes;
     curDatanodesNum += numDataNodes;
     this.numDataNodes += numDataNodes;
     this.numDataNodes += numDataNodes;
     waitActive();
     waitActive();
+
+    if (storageCapacities != null) {
+      for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) {
+        List<? extends FsVolumeSpi> volumes = dns[i].getFSDataset().getVolumes();
+        assert volumes.size() == storagesPerDatanode;
+
+        for (int j = 0; j < volumes.size(); ++j) {
+          FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
+          volume.setCapacityForTesting(storageCapacities[i][j]);
+        }
+      }
+    }
   }
   }
 
 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
@@ -193,7 +214,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
       long[] simulatedCapacities,
       long[] simulatedCapacities,
       boolean setupHostsFile) throws IOException {
       boolean setupHostsFile) throws IOException {
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
     startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
-        hosts, simulatedCapacities, setupHostsFile, false, false);
+        hosts, null, simulatedCapacities, setupHostsFile, false, false);
   }
   }
 
 
   public void startDataNodes(Configuration conf, int numDataNodes, 
   public void startDataNodes(Configuration conf, int numDataNodes, 
@@ -209,13 +230,14 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       String[] racks, String[] hosts,
+      long[][] storageCapacities,
       long[] simulatedCapacities,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
       Configuration[] dnConfOverlays) throws IOException {
     startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
     startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
-        NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, 
+        NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
         checkDataNodeAddrConfig, checkDataNodeHostConfig);
         checkDataNodeAddrConfig, checkDataNodeHostConfig);
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -213,7 +213,7 @@ public class TestSafeMode {
       @Override
       @Override
       public Boolean get() {
       public Boolean get() {
         return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) ==
         return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) ==
-            MiniDFSCluster.DIRS_PER_DATANODE;
+            cluster.getStoragesPerDatanode();
       }
       }
     }, 10, 10000);
     }, 10, 10000);
 
 

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.datanode;
 package org.apache.hadoop.hdfs.server.datanode;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 
 
 
 
@@ -106,7 +105,7 @@ public class TestBlockHasMultipleReplicasOnSameDN {
     DataNode dn = cluster.getDataNodes().get(0);
     DataNode dn = cluster.getDataNodes().get(0);
     DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
     DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
     StorageBlockReport reports[] =
     StorageBlockReport reports[] =
-        new StorageBlockReport[MiniDFSCluster.DIRS_PER_DATANODE];
+        new StorageBlockReport[cluster.getStoragesPerDatanode()];
 
 
     ArrayList<Block> blocks = new ArrayList<Block>();
     ArrayList<Block> blocks = new ArrayList<Block>();
 
 
@@ -114,7 +113,7 @@ public class TestBlockHasMultipleReplicasOnSameDN {
       blocks.add(locatedBlock.getBlock().getLocalBlock());
       blocks.add(locatedBlock.getBlock().getLocalBlock());
     }
     }
 
 
-    for (int i = 0; i < MiniDFSCluster.DIRS_PER_DATANODE; ++i) {
+    for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) {
       BlockListAsLongs bll = new BlockListAsLongs(blocks, null);
       BlockListAsLongs bll = new BlockListAsLongs(blocks, null);
       FsVolumeSpi v = dn.getFSDataset().getVolumes().get(i);
       FsVolumeSpi v = dn.getFSDataset().getVolumes().get(i);
       DatanodeStorage dns = new DatanodeStorage(v.getStorageID());
       DatanodeStorage dns = new DatanodeStorage(v.getStorageID());

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java

@@ -130,7 +130,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
     ArgumentCaptor<StorageBlockReport[]> captor =
     ArgumentCaptor<StorageBlockReport[]> captor =
         ArgumentCaptor.forClass(StorageBlockReport[].class);
         ArgumentCaptor.forClass(StorageBlockReport[].class);
 
 
-    Mockito.verify(nnSpy, times(MiniDFSCluster.DIRS_PER_DATANODE)).blockReport(
+    Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
         any(DatanodeRegistration.class),
         any(DatanodeRegistration.class),
         anyString(),
         anyString(),
         captor.capture());
         captor.capture());
@@ -167,7 +167,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
         anyString(),
         anyString(),
         captor.capture());
         captor.capture());
 
 
-    verifyCapturedArguments(captor, MiniDFSCluster.DIRS_PER_DATANODE, BLOCKS_IN_FILE);
+    verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE);
   }
   }
 
 
   /**
   /**
@@ -194,7 +194,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
     ArgumentCaptor<StorageBlockReport[]> captor =
     ArgumentCaptor<StorageBlockReport[]> captor =
         ArgumentCaptor.forClass(StorageBlockReport[].class);
         ArgumentCaptor.forClass(StorageBlockReport[].class);
 
 
-    Mockito.verify(nnSpy, times(MiniDFSCluster.DIRS_PER_DATANODE)).blockReport(
+    Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
         any(DatanodeRegistration.class),
         any(DatanodeRegistration.class),
         anyString(),
         anyString(),
         captor.capture());
         captor.capture());

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java

@@ -58,6 +58,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -420,6 +421,33 @@ public class TestPipelinesFailover {
    */
    */
   @Test(timeout=STRESS_RUNTIME*3)
   @Test(timeout=STRESS_RUNTIME*3)
   public void testPipelineRecoveryStress() throws Exception {
   public void testPipelineRecoveryStress() throws Exception {
+
+    // The following section of code is to help debug HDFS-6694 about
+    // this test that fails from time to time due to "too many open files".
+    //
+    String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
+    ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+
+    System.out.println("HDFS-6694 Debug Data BEGIN===");
+    System.out.println("'ulimit -a' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"hostname"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'hostname' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"ifconfig"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'ifconfig' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"whoami"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'whoami' output:\n" + sce.getOutput());
+    System.out.println("===HDFS-6694 Debug Data END");
+
     HAStressTestHarness harness = new HAStressTestHarness();
     HAStressTestHarness harness = new HAStressTestHarness();
     // Disable permissions so that another user can recover the lease.
     // Disable permissions so that another user can recover the lease.
     harness.conf.setBoolean(
     harness.conf.setBoolean(

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -443,7 +443,7 @@ public class TestNameNodeMetrics {
     assertCounter("SyncsNumOps", 1L, rb);
     assertCounter("SyncsNumOps", 1L, rb);
     // Each datanode reports in when the cluster comes up
     // Each datanode reports in when the cluster comes up
     assertCounter("BlockReportNumOps",
     assertCounter("BlockReportNumOps",
-                  (long)DATANODE_COUNT*MiniDFSCluster.DIRS_PER_DATANODE, rb);
+                  (long)DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb);
     
     
     // Sleep for an interval+slop to let the percentiles rollover
     // Sleep for an interval+slop to let the percentiles rollover
     Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
     Thread.sleep((PERCENTILES_INTERVAL+1)*1000);

+ 76 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
@@ -558,7 +559,81 @@ public class TestSnapshotDeletion {
           + toDeleteFileInSnapshot.toString(), e);
           + toDeleteFileInSnapshot.toString(), e);
     }
     }
   }
   }
-  
+
+  /**
+   * Delete a snapshot that is taken before a directory deletion,
+   * directory diff list should be combined correctly.
+   */
+  @Test (timeout=60000)
+  public void testDeleteSnapshot1() throws Exception {
+    final Path root = new Path("/");
+
+    Path dir = new Path("/dir1");
+    Path file1 = new Path(dir, "file1");
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(root);
+    hdfs.createSnapshot(root, "s1");
+
+    Path file2 = new Path(dir, "file2");
+    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.createSnapshot(root, "s2");
+
+    // delete file
+    hdfs.delete(file1, true);
+    hdfs.delete(file2, true);
+
+    // delete directory
+    assertTrue(hdfs.delete(dir, false));
+
+    // delete second snapshot
+    hdfs.deleteSnapshot(root, "s2");
+
+    NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
+    NameNodeAdapter.saveNamespace(cluster.getNameNode());
+
+    // restart NN
+    cluster.restartNameNodes();
+  }
+
+  /**
+   * Delete a snapshot that is taken before a directory deletion (recursively),
+   * directory diff list should be combined correctly.
+   */
+  @Test (timeout=60000)
+  public void testDeleteSnapshot2() throws Exception {
+    final Path root = new Path("/");
+
+    Path dir = new Path("/dir1");
+    Path file1 = new Path(dir, "file1");
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(root);
+    hdfs.createSnapshot(root, "s1");
+
+    Path file2 = new Path(dir, "file2");
+    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
+    INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
+    long file2NodeId = file2Node.getId();
+
+    hdfs.createSnapshot(root, "s2");
+
+    // delete directory recursively
+    assertTrue(hdfs.delete(dir, true));
+    assertNotNull(fsdir.getInode(file2NodeId));
+
+    // delete second snapshot
+    hdfs.deleteSnapshot(root, "s2");
+    assertTrue(fsdir.getInode(file2NodeId) == null);
+
+    NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
+    NameNodeAdapter.saveNamespace(cluster.getNameNode());
+
+    // restart NN
+    cluster.restartNameNodes();
+  }
+
   /**
   /**
    * Test deleting snapshots in a more complicated scenario: need to combine
    * Test deleting snapshots in a more complicated scenario: need to combine
    * snapshot diffs, but no need to handle diffs distributed in a dir tree
    * snapshot diffs, but no need to handle diffs distributed in a dir tree

+ 27 - 3
hadoop-mapreduce-project/CHANGES.txt

@@ -187,6 +187,12 @@ Release 2.6.0 - UNRELEASED
     MAPREDUCE-5906. Inconsistent configuration in property
     MAPREDUCE-5906. Inconsistent configuration in property
       "mapreduce.reduce.shuffle.input.buffer.percent" (Akira AJISAKA via aw)
       "mapreduce.reduce.shuffle.input.buffer.percent" (Akira AJISAKA via aw)
 
 
+    MAPREDUCE-5974. Allow specifying multiple MapOutputCollectors with 
+    fallback. (Todd Lipcon via kasha)
+
+    MAPREDUCE-5130. Add missing job config options to mapred-default.xml
+    (Ray Chiang via Sandy Ryza)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -249,6 +255,27 @@ Release 2.6.0 - UNRELEASED
     MAPREDUCE-6012. DBInputSplit creates invalid ranges on Oracle. 
     MAPREDUCE-6012. DBInputSplit creates invalid ranges on Oracle. 
     (Wei Yan via kasha)
     (Wei Yan via kasha)
 
 
+    MAPREDUCE-6044. Fully qualified intermediate done dir path breaks per-user dir
+    creation on Windows. (zjshen)
+
+    MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings
+    (Chen He via jlowe)
+
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    MAPREDUCE-6033. Updated access check for displaying job information 
+    (Yu Gao via Eric Yang)
+
 Release 2.5.0 - 2014-08-11
 Release 2.5.0 - 2014-08-11
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -330,9 +357,6 @@ Release 2.5.0 - 2014-08-11
 
 
   BUG FIXES 
   BUG FIXES 
 
 
-    MAPREDUCE-6033. Updated access check for displaying job information 
-    (Yu Gao via Eric Yang)
-
     MAPREDUCE-5759. Remove unnecessary conf load in Limits (Sandy Ryza)
     MAPREDUCE-5759. Remove unnecessary conf load in Limits (Sandy Ryza)
 
 
     MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing.
     MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing.

+ 11 - 9
hadoop-mapreduce-project/bin/mapred-config.sh

@@ -20,7 +20,7 @@
 
 
 function hadoop_subproject_init
 function hadoop_subproject_init
 {
 {
-  if [ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then
+  if [[ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]]; then
     . "${HADOOP_CONF_DIR}/mapred-env.sh"
     . "${HADOOP_CONF_DIR}/mapred-env.sh"
   fi
   fi
   
   
@@ -49,7 +49,7 @@ function hadoop_subproject_init
   HADOOP_ROOT_LOGGER="${HADOOP_MAPRED_ROOT_LOGGER:-INFO,console}"
   HADOOP_ROOT_LOGGER="${HADOOP_MAPRED_ROOT_LOGGER:-INFO,console}"
   HADOOP_MAPRED_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   HADOOP_MAPRED_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   
   
-  HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME_DIR}"
+  HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}"
   
   
   HADOOP_IDENT_STRING="${HADOOP_MAPRED_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_IDENT_STRING="${HADOOP_MAPRED_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_MAPRED_IDENT_STRING="${HADOOP_IDENT_STRING}"
   HADOOP_MAPRED_IDENT_STRING="${HADOOP_IDENT_STRING}"
@@ -60,13 +60,15 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_mc_this}")" >/dev/null && pwd -P)
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_mc_this}")" >/dev/null && pwd -P)
 fi
 fi
 
 
-if [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
 fi
+

+ 29 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java

@@ -25,7 +25,6 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.never;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
@@ -53,6 +52,8 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -399,6 +400,33 @@ public class TestJobHistoryEventHandler {
     }
     }
   }
   }
 
 
+  @Test
+  public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
+    // Test relative path
+    Configuration conf = new Configuration();
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
+        "/mapred/history/done_intermediate");
+    conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
+    String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
+    Assert.assertEquals("/mapred/history/done_intermediate/" +
+        System.getProperty("user.name"), pathStr);
+
+    // Test fully qualified path
+    // Create default configuration pointing to the minicluster
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+        dfsCluster.getURI().toString());
+    FileOutputStream os = new FileOutputStream(coreSitePath);
+    conf.writeXml(os);
+    os.close();
+    // Simulate execution under a non-default namenode
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+            "file:///");
+    pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
+    Assert.assertEquals(dfsCluster.getURI().toString() +
+        "/mapred/history/done_intermediate/" + System.getProperty("user.name"),
+        pathStr);
+  }
+
   private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) {
   private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) {
     jheh.handle(event);
     jheh.handle(event);
   }
   }

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java

@@ -292,8 +292,8 @@ public class JobHistoryUtils {
    * @return the intermediate done directory for jobhistory files.
    * @return the intermediate done directory for jobhistory files.
    */
    */
   public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
   public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
-    return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
-        + UserGroupInformation.getCurrentUser().getShortUserName();
+    return new Path(getConfiguredHistoryIntermediateDoneDirPrefix(conf),
+        UserGroupInformation.getCurrentUser().getShortUserName()).toString();
   }
   }
 
 
   public static boolean shouldCreateNonUserDirectory(Configuration conf) {
   public static boolean shouldCreateNonUserDirectory(Configuration conf) {

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java

@@ -34,6 +34,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -56,6 +57,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.util.ApplicationClassLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -67,7 +69,6 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.util.ApplicationClassLoader;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.log4j.RollingFileAppender;
 import org.apache.log4j.RollingFileAppender;

+ 3 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.util.ApplicationClassLoader;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.util.ApplicationClassLoader;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
@@ -514,7 +514,8 @@ public class TestMRApps {
   @Test
   @Test
   public void testSystemClasses() {
   public void testSystemClasses() {
     final List<String> systemClasses =
     final List<String> systemClasses =
-        Arrays.asList(MRApps.getSystemClasses(new Configuration()));
+        Arrays.asList(StringUtils.getTrimmedStrings(
+        ApplicationClassLoader.DEFAULT_SYSTEM_CLASSES));
     for (String defaultXml : DEFAULT_XMLS) {
     for (String defaultXml : DEFAULT_XMLS) {
       assertTrue(defaultXml + " must be system resource",
       assertTrue(defaultXml + " must be system resource",
           ApplicationClassLoader.isSystemClass(defaultXml, systemClasses));
           ApplicationClassLoader.isSystemClass(defaultXml, systemClasses));

+ 18 - 76
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java

@@ -151,7 +151,9 @@ public class JobConf extends Configuration {
   /**
   /**
    * A value which if set for memory related configuration options,
    * A value which if set for memory related configuration options,
    * indicates that the options are turned off.
    * indicates that the options are turned off.
+   * Deprecated because it makes no sense in the context of MR2.
    */
    */
+  @Deprecated
   public static final long DISABLED_MEMORY_LIMIT = -1L;
   public static final long DISABLED_MEMORY_LIMIT = -1L;
 
 
   /**
   /**
@@ -1809,27 +1811,19 @@ public class JobConf extends Configuration {
    * Get memory required to run a map task of the job, in MB.
    * Get memory required to run a map task of the job, in MB.
    * 
    * 
    * If a value is specified in the configuration, it is returned.
    * If a value is specified in the configuration, it is returned.
-   * Else, it returns {@link #DISABLED_MEMORY_LIMIT}.
+   * Else, it returns {@link JobContext#DEFAULT_MAP_MEMORY_MB}.
    * <p/>
    * <p/>
    * For backward compatibility, if the job configuration sets the
    * For backward compatibility, if the job configuration sets the
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
    * after converting it from bytes to MB.
    * after converting it from bytes to MB.
    * @return memory required to run a map task of the job, in MB,
    * @return memory required to run a map task of the job, in MB,
-   *          or {@link #DISABLED_MEMORY_LIMIT} if unset.
    */
    */
   public long getMemoryForMapTask() {
   public long getMemoryForMapTask() {
     long value = getDeprecatedMemoryValue();
     long value = getDeprecatedMemoryValue();
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY,
-                          DISABLED_MEMORY_LIMIT));
-    }
-    // In case that M/R 1.x applications use the old property name
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
-                          DISABLED_MEMORY_LIMIT));
+    if (value < 0) {
+      return getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
+          JobContext.DEFAULT_MAP_MEMORY_MB);
     }
     }
     return value;
     return value;
   }
   }
@@ -1844,27 +1838,19 @@ public class JobConf extends Configuration {
    * Get memory required to run a reduce task of the job, in MB.
    * Get memory required to run a reduce task of the job, in MB.
    * 
    * 
    * If a value is specified in the configuration, it is returned.
    * If a value is specified in the configuration, it is returned.
-   * Else, it returns {@link #DISABLED_MEMORY_LIMIT}.
+   * Else, it returns {@link JobContext#DEFAULT_REDUCE_MEMORY_MB}.
    * <p/>
    * <p/>
    * For backward compatibility, if the job configuration sets the
    * For backward compatibility, if the job configuration sets the
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
    * after converting it from bytes to MB.
    * after converting it from bytes to MB.
-   * @return memory required to run a reduce task of the job, in MB,
-   *          or {@link #DISABLED_MEMORY_LIMIT} if unset.
+   * @return memory required to run a reduce task of the job, in MB.
    */
    */
   public long getMemoryForReduceTask() {
   public long getMemoryForReduceTask() {
     long value = getDeprecatedMemoryValue();
     long value = getDeprecatedMemoryValue();
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY,
-                        DISABLED_MEMORY_LIMIT));
-    }
-    // In case that M/R 1.x applications use the old property name
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
-                        DISABLED_MEMORY_LIMIT));
+    if (value < 0) {
+      return getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
+          JobContext.DEFAULT_REDUCE_MEMORY_MB);
     }
     }
     return value;
     return value;
   }
   }
@@ -1876,8 +1862,7 @@ public class JobConf extends Configuration {
   private long getDeprecatedMemoryValue() {
   private long getDeprecatedMemoryValue() {
     long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, 
     long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, 
         DISABLED_MEMORY_LIMIT);
         DISABLED_MEMORY_LIMIT);
-    oldValue = normalizeMemoryConfigValue(oldValue);
-    if (oldValue != DISABLED_MEMORY_LIMIT) {
+    if (oldValue > 0) {
       oldValue /= (1024*1024);
       oldValue /= (1024*1024);
     }
     }
     return oldValue;
     return oldValue;
@@ -1921,39 +1906,6 @@ public class JobConf extends Configuration {
     return val;
     return val;
   }
   }
 
 
-  /**
-   * Compute the number of slots required to run a single map task-attempt
-   * of this job.
-   * @param slotSizePerMap cluster-wide value of the amount of memory required
-   *                       to run a map-task
-   * @return the number of slots required to run a single map task-attempt
-   *          1 if memory parameters are disabled.
-   */
-  int computeNumSlotsPerMap(long slotSizePerMap) {
-    if ((slotSizePerMap==DISABLED_MEMORY_LIMIT) ||
-        (getMemoryForMapTask()==DISABLED_MEMORY_LIMIT)) {
-      return 1;
-    }
-    return (int)(Math.ceil((float)getMemoryForMapTask() / (float)slotSizePerMap));
-  }
-  
-  /**
-   * Compute the number of slots required to run a single reduce task-attempt
-   * of this job.
-   * @param slotSizePerReduce cluster-wide value of the amount of memory 
-   *                          required to run a reduce-task
-   * @return the number of slots required to run a single reduce task-attempt
-   *          1 if memory parameters are disabled
-   */
-  int computeNumSlotsPerReduce(long slotSizePerReduce) {
-    if ((slotSizePerReduce==DISABLED_MEMORY_LIMIT) ||
-        (getMemoryForReduceTask()==DISABLED_MEMORY_LIMIT)) {
-      return 1;
-    }
-    return 
-    (int)(Math.ceil((float)getMemoryForReduceTask() / (float)slotSizePerReduce));
-  }
-
   /** 
   /** 
    * Find a jar that contains a class of the same name, if any.
    * Find a jar that contains a class of the same name, if any.
    * It will return a jar file, even if that is not the first thing
    * It will return a jar file, even if that is not the first thing
@@ -1975,14 +1927,12 @@ public class JobConf extends Configuration {
    * set for map and reduce tasks of a job, in MB. 
    * set for map and reduce tasks of a job, in MB. 
    * <p/>
    * <p/>
    * For backward compatibility, if the job configuration sets the
    * For backward compatibility, if the job configuration sets the
-   * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
-   * from {@link #DISABLED_MEMORY_LIMIT}, that value is returned. 
+   * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY}, that value is returned. 
    * Otherwise, this method will return the larger of the values returned by 
    * Otherwise, this method will return the larger of the values returned by 
    * {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()}
    * {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()}
    * after converting them into bytes.
    * after converting them into bytes.
    *
    *
-   * @return Memory required to run a task of this job, in bytes,
-   *          or {@link #DISABLED_MEMORY_LIMIT}, if unset.
+   * @return Memory required to run a task of this job, in bytes.
    * @see #setMaxVirtualMemoryForTask(long)
    * @see #setMaxVirtualMemoryForTask(long)
    * @deprecated Use {@link #getMemoryForMapTask()} and
    * @deprecated Use {@link #getMemoryForMapTask()} and
    *             {@link #getMemoryForReduceTask()}
    *             {@link #getMemoryForReduceTask()}
@@ -1993,15 +1943,8 @@ public class JobConf extends Configuration {
       "getMaxVirtualMemoryForTask() is deprecated. " +
       "getMaxVirtualMemoryForTask() is deprecated. " +
       "Instead use getMemoryForMapTask() and getMemoryForReduceTask()");
       "Instead use getMemoryForMapTask() and getMemoryForReduceTask()");
 
 
-    long value = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT);
-    value = normalizeMemoryConfigValue(value);
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = Math.max(getMemoryForMapTask(), getMemoryForReduceTask());
-      value = normalizeMemoryConfigValue(value);
-      if (value != DISABLED_MEMORY_LIMIT) {
-        value *= 1024*1024;
-      }
-    }
+    long value = getLong(MAPRED_TASK_MAXVMEM_PROPERTY,
+        Math.max(getMemoryForMapTask(), getMemoryForReduceTask()) * 1024 * 1024);
     return value;
     return value;
   }
   }
 
 
@@ -2027,9 +1970,8 @@ public class JobConf extends Configuration {
   public void setMaxVirtualMemoryForTask(long vmem) {
   public void setMaxVirtualMemoryForTask(long vmem) {
     LOG.warn("setMaxVirtualMemoryForTask() is deprecated."+
     LOG.warn("setMaxVirtualMemoryForTask() is deprecated."+
       "Instead use setMemoryForMapTask() and setMemoryForReduceTask()");
       "Instead use setMemoryForMapTask() and setMemoryForReduceTask()");
-    if(vmem != DISABLED_MEMORY_LIMIT && vmem < 0) {
-      setMemoryForMapTask(DISABLED_MEMORY_LIMIT);
-      setMemoryForReduceTask(DISABLED_MEMORY_LIMIT);
+    if (vmem < 0) {
+      throw new IllegalArgumentException("Task memory allocation may not be < 0");
     }
     }
 
 
     if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) == null) {
     if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) == null) {

+ 28 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java

@@ -381,16 +381,35 @@ public class MapTask extends Task {
   private <KEY, VALUE> MapOutputCollector<KEY, VALUE>
   private <KEY, VALUE> MapOutputCollector<KEY, VALUE>
           createSortingCollector(JobConf job, TaskReporter reporter)
           createSortingCollector(JobConf job, TaskReporter reporter)
     throws IOException, ClassNotFoundException {
     throws IOException, ClassNotFoundException {
-    MapOutputCollector<KEY, VALUE> collector
-      = (MapOutputCollector<KEY, VALUE>)
-       ReflectionUtils.newInstance(
-                        job.getClass(JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR,
-                        MapOutputBuffer.class, MapOutputCollector.class), job);
-    LOG.info("Map output collector class = " + collector.getClass().getName());
     MapOutputCollector.Context context =
     MapOutputCollector.Context context =
-                           new MapOutputCollector.Context(this, job, reporter);
-    collector.init(context);
-    return collector;
+      new MapOutputCollector.Context(this, job, reporter);
+
+    Class<?>[] collectorClasses = job.getClasses(
+      JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR, MapOutputBuffer.class);
+    int remainingCollectors = collectorClasses.length;
+    for (Class clazz : collectorClasses) {
+      try {
+        if (!MapOutputCollector.class.isAssignableFrom(clazz)) {
+          throw new IOException("Invalid output collector class: " + clazz.getName() +
+            " (does not implement MapOutputCollector)");
+        }
+        Class<? extends MapOutputCollector> subclazz =
+          clazz.asSubclass(MapOutputCollector.class);
+        LOG.debug("Trying map output collector class: " + subclazz.getName());
+        MapOutputCollector<KEY, VALUE> collector =
+          ReflectionUtils.newInstance(subclazz, job);
+        collector.init(context);
+        LOG.info("Map output collector class = " + collector.getClass().getName());
+        return collector;
+      } catch (Exception e) {
+        String msg = "Unable to initialize MapOutputCollector " + clazz.getName();
+        if (--remainingCollectors > 0) {
+          msg += " (" + remainingCollectors + " more collector(s) to try)";
+        }
+        LOG.warn(msg, e);
+      }
+    }
+    throw new IOException("Unable to initialize any output collector");
   }
   }
 
 
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java

@@ -278,6 +278,8 @@ public class ConfigUtil {
         MRJobConfig.TASK_DEBUGOUT_LINES),
         MRJobConfig.TASK_DEBUGOUT_LINES),
       new DeprecationDelta("mapred.merge.recordsBeforeProgress",
       new DeprecationDelta("mapred.merge.recordsBeforeProgress",
         MRJobConfig.RECORDS_BEFORE_PROGRESS),
         MRJobConfig.RECORDS_BEFORE_PROGRESS),
+      new DeprecationDelta("mapred.merge.recordsBeforeProgress",
+        MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS),
       new DeprecationDelta("mapred.skip.attempts.to.start.skipping",
       new DeprecationDelta("mapred.skip.attempts.to.start.skipping",
         MRJobConfig.SKIP_START_ATTEMPTS),
         MRJobConfig.SKIP_START_ATTEMPTS),
       new DeprecationDelta("mapred.task.id",
       new DeprecationDelta("mapred.task.id",

+ 96 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml

@@ -185,11 +185,42 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>mapreduce.map.memory.mb</name>
+  <value>1024</value>
+  <description>The amount of memory to request from the scheduler for each
+  map task.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.map.cpu.vcores</name>
+  <value>1</value>
+  <description>The number of virtual cores to request from the scheduler for
+  each map task.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.reduce.memory.mb</name>
+  <value>1024</value>
+  <description>The amount of memory to request from the scheduler for each
+  reduce task.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.reduce.cpu.vcores</name>
+  <value>1</value>
+  <description>The number of virtual cores to request from the scheduler for
+  each reduce task.
+  </description>
+</property>
 
 
 <property>
 <property>
   <name>mapred.child.java.opts</name>
   <name>mapred.child.java.opts</name>
   <value>-Xmx200m</value>
   <value>-Xmx200m</value>
-  <description>Java opts for the task tracker child processes.  
+  <description>Java opts for the task processes.
   The following symbol, if present, will be interpolated: @taskid@ is replaced 
   The following symbol, if present, will be interpolated: @taskid@ is replaced 
   by current TaskID. Any other occurrences of '@' will go unchanged.
   by current TaskID. Any other occurrences of '@' will go unchanged.
   For example, to enable verbose gc logging to a file named for the taskid in
   For example, to enable verbose gc logging to a file named for the taskid in
@@ -203,17 +234,55 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<!-- This is commented out so that it won't override mapred.child.java.opts.
+<property>
+  <name>mapreduce.map.java.opts</name>
+  <value></value>
+  <description>Java opts only for the child processes that are maps. If set,
+  this will be used instead of mapred.child.java.opts.
+  </description>
+</property>
+-->
+
+<!-- This is commented out so that it won't override mapred.child.java.opts.
+<property>
+  <name>mapreduce.reduce.java.opts</name>
+  <value></value>
+  <description>Java opts only for the child processes that are reduces. If set,
+  this will be used instead of mapred.child.java.opts.
+  </description>
+</property>
+-->
+
 <property>
 <property>
   <name>mapred.child.env</name>
   <name>mapred.child.env</name>
   <value></value>
   <value></value>
-  <description>User added environment variables for the task tracker child 
-  processes. Example :
+  <description>User added environment variables for the task processes.
+  Example :
   1) A=foo  This will set the env variable A to foo
   1) A=foo  This will set the env variable A to foo
   2) B=$B:c This is inherit nodemanager's B env variable on Unix.
   2) B=$B:c This is inherit nodemanager's B env variable on Unix.
   3) B=%B%;c This is inherit nodemanager's B env variable on Windows.
   3) B=%B%;c This is inherit nodemanager's B env variable on Windows.
   </description>
   </description>
 </property>
 </property>
 
 
+<!-- This is commented out so that it won't override mapred.child.env.
+<property>
+  <name>mapreduce.map.env</name>
+  <value></value>
+  <description>User added environment variables for the map task processes.
+  </description>
+</property>
+-->
+
+<!-- This is commented out so that it won't override mapred.child.env.
+<property>
+  <name>mapreduce.reduce.env</name>
+  <value></value>
+  <description>User added environment variables for the reduce task processes.
+  </description>
+</property>
+-->
+
 <property>
 <property>
   <name>mapreduce.admin.user.env</name>
   <name>mapreduce.admin.user.env</name>
   <value></value>
   <value></value>
@@ -408,7 +477,9 @@
   <name>mapreduce.job.map.output.collector.class</name>
   <name>mapreduce.job.map.output.collector.class</name>
   <value>org.apache.hadoop.mapred.MapTask$MapOutputBuffer</value>
   <value>org.apache.hadoop.mapred.MapTask$MapOutputBuffer</value>
   <description>
   <description>
-    It defines the MapOutputCollector implementation to use.
+    The MapOutputCollector implementation(s) to use. This may be a comma-separated
+    list of class names, in which case the map task will try to initialize each
+    of the collectors in turn. The first to successfully initialize will be used.
   </description>
   </description>
 </property>
 </property>
  
  
@@ -488,6 +559,12 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>mapreduce.input.lineinputformat.linespermap</name>
+  <value>1</value>
+  <description>When using NLineInputFormat, the number of lines of input data
+  to include in each split.</description>
+</property>
 
 
 
 
 <property>
 <property>
@@ -921,6 +998,14 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>mapreduce.task.combine.progress.records</name>
+  <value>10000</value>
+  <description> The number of records to process during combine output collection
+   before sending a progress notification.
+  </description>
+</property>
+
 <property>
 <property>
   <name>mapreduce.job.reduce.slowstart.completedmaps</name>
   <name>mapreduce.job.reduce.slowstart.completedmaps</name>
   <value>0.05</value>
   <value>0.05</value>
@@ -1225,13 +1310,13 @@
 
 
 <property>
 <property>
    <name>mapreduce.job.classloader.system.classes</name>
    <name>mapreduce.job.classloader.system.classes</name>
-   <value>java.,javax.,org.w3c.dom.,org.xml.sax.,org.apache.commons.logging.,
-          org.apache.log4j.,org.apache.hadoop.,core-default.xml,
-          hdfs-default.xml,mapred-default.xml,yarn-default.xml</value>
-  <description>A comma-separated list of classes that should be loaded from the
-    system classpath, not the user-supplied JARs, when mapreduce.job.classloader
-    is enabled. Names ending in '.' (period) are treated as package names,
-    and names starting with a '-' are treated as negative matches.
+   <value></value>
+  <description>Used to override the default definition of the system classes for
+    the job classloader. The system classes are a comma-separated list of
+    classes that should be loaded from the system classpath, not the
+    user-supplied JARs, when mapreduce.job.classloader is enabled. Names ending
+    in '.' (period) are treated as package names, and names starting with a '-'
+    are treated as negative matches.
   </description>
   </description>
 </property>
 </property>
 
 

+ 6 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/PluggableShuffleAndPluggableSort.apt.vm

@@ -71,11 +71,16 @@ Hadoop MapReduce Next Generation - Pluggable Shuffle and Pluggable Sort
 *--------------------------------------+---------------------+-----------------+
 *--------------------------------------+---------------------+-----------------+
 | <<<mapreduce.job.reduce.shuffle.consumer.plugin.class>>> | <<<org.apache.hadoop.mapreduce.task.reduce.Shuffle>>>         | The <<<ShuffleConsumerPlugin>>> implementation to use |
 | <<<mapreduce.job.reduce.shuffle.consumer.plugin.class>>> | <<<org.apache.hadoop.mapreduce.task.reduce.Shuffle>>>         | The <<<ShuffleConsumerPlugin>>> implementation to use |
 *--------------------------------------+---------------------+-----------------+
 *--------------------------------------+---------------------+-----------------+
-| <<<mapreduce.job.map.output.collector.class>>>   | <<<org.apache.hadoop.mapred.MapTask$MapOutputBuffer>>> | The <<<MapOutputCollector>>> implementation to use |
+| <<<mapreduce.job.map.output.collector.class>>>   | <<<org.apache.hadoop.mapred.MapTask$MapOutputBuffer>>> | The <<<MapOutputCollector>>> implementation(s) to use |
 *--------------------------------------+---------------------+-----------------+
 *--------------------------------------+---------------------+-----------------+
 
 
   These properties can also be set in the <<<mapred-site.xml>>> to change the default values for all jobs.
   These properties can also be set in the <<<mapred-site.xml>>> to change the default values for all jobs.
 
 
+  The collector class configuration may specify a comma-separated list of collector implementations.
+  In this case, the map task will attempt to instantiate each in turn until one of the
+  implementations successfully initializes. This can be useful if a given collector
+  implementation is only compatible with certain types of keys or values, for example.
+
 ** NodeManager Configuration properties, <<<yarn-site.xml>>> in all nodes:
 ** NodeManager Configuration properties, <<<yarn-site.xml>>> in all nodes:
 
 
 *--------------------------------------+---------------------+-----------------+
 *--------------------------------------+---------------------+-----------------+
@@ -91,4 +96,3 @@ Hadoop MapReduce Next Generation - Pluggable Shuffle and Pluggable Sort
   <<<yarn.nodemanager.aux-services>>> property, for example <<<mapred.shufflex>>>.
   <<<yarn.nodemanager.aux-services>>> property, for example <<<mapred.shufflex>>>.
   Then the property defining the corresponding class must be
   Then the property defining the corresponding class must be
   <<<yarn.nodemanager.aux-services.mapreduce_shufflex.class>>>.
   <<<yarn.nodemanager.aux-services.mapreduce_shufflex.class>>>.
-  

+ 9 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java

@@ -140,18 +140,21 @@ public class TestJobConf {
     conf.setQueueName("qname");
     conf.setQueueName("qname");
     assertEquals("qname", conf.getQueueName());
     assertEquals("qname", conf.getQueueName());
 
 
-    assertEquals(1, conf.computeNumSlotsPerMap(100L));
-    assertEquals(1, conf.computeNumSlotsPerReduce(100L));
-
     conf.setMemoryForMapTask(100 * 1000);
     conf.setMemoryForMapTask(100 * 1000);
-    assertEquals(1000, conf.computeNumSlotsPerMap(100L));
+    assertEquals(100 * 1000, conf.getMemoryForMapTask());
     conf.setMemoryForReduceTask(1000 * 1000);
     conf.setMemoryForReduceTask(1000 * 1000);
-    assertEquals(1000, conf.computeNumSlotsPerReduce(1000L));
+    assertEquals(1000 * 1000, conf.getMemoryForReduceTask());
 
 
     assertEquals(-1, conf.getMaxPhysicalMemoryForTask());
     assertEquals(-1, conf.getMaxPhysicalMemoryForTask());
     assertEquals("The variable key is no longer used.",
     assertEquals("The variable key is no longer used.",
         JobConf.deprecatedString("key"));
         JobConf.deprecatedString("key"));
-
+    
+    // make sure mapreduce.map|reduce.java.opts are not set by default
+    // so that they won't override mapred.child.java.opts
+    assertEquals("mapreduce.map.java.opts should not be set by default",
+        null, conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS));
+    assertEquals("mapreduce.reduce.java.opts should not be set by default",
+        null, conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS));
   }
   }
 
 
   /**
   /**

+ 9 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java

@@ -108,6 +108,11 @@ public class TestJobConf {
     JobConf configuration = new JobConf();
     JobConf configuration = new JobConf();
     
     
     configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-3");
     configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-3");
+    Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB,
+        configuration.getMemoryForMapTask());
+    Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,
+        configuration.getMemoryForReduceTask());
+    
     configuration.set(MRJobConfig.MAP_MEMORY_MB, "4");
     configuration.set(MRJobConfig.MAP_MEMORY_MB, "4");
     configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "5");
     configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "5");
     Assert.assertEquals(4, configuration.getMemoryForMapTask());
     Assert.assertEquals(4, configuration.getMemoryForMapTask());
@@ -116,23 +121,16 @@ public class TestJobConf {
   }
   }
   
   
   /**
   /**
-   * Test that negative values for all memory configuration properties causes
-   * APIs to disable memory limits
+   * Test that negative values for new configuration keys get passed through.
    */
    */
   @Test
   @Test
   public void testNegativeValuesForMemoryParams() {
   public void testNegativeValuesForMemoryParams() {
     JobConf configuration = new JobConf();
     JobConf configuration = new JobConf();
-    
-    configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-4");
+        
     configuration.set(MRJobConfig.MAP_MEMORY_MB, "-5");
     configuration.set(MRJobConfig.MAP_MEMORY_MB, "-5");
     configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "-6");
     configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "-6");
-    
-    Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT,
-                        configuration.getMemoryForMapTask());
-    Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT,
-                        configuration.getMemoryForReduceTask());
-    Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT,
-                        configuration.getMaxVirtualMemoryForTask());
+    Assert.assertEquals(-5, configuration.getMemoryForMapTask());
+    Assert.assertEquals(-6, configuration.getMemoryForReduceTask());
   }
   }
   
   
   /**
   /**

+ 43 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java

@@ -17,13 +17,30 @@
  */
  */
 package org.apache.hadoop.mapred;
 package org.apache.hadoop.mapred;
 
 
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
 
 
-import junit.framework.TestCase;
-import java.io.*;
-import java.util.*;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 
 
 /**
 /**
  * Two different types of comparators can be used in MapReduce. One is used
  * Two different types of comparators can be used in MapReduce. One is used
@@ -37,8 +54,11 @@ import java.util.*;
  * 2. Test the common use case where values are grouped by keys but values 
  * 2. Test the common use case where values are grouped by keys but values 
  * within each key are grouped by a secondary key (a timestamp, for example). 
  * within each key are grouped by a secondary key (a timestamp, for example). 
  */
  */
-public class TestComparators extends TestCase 
-{
+public class TestComparators {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestComparators-mapred");
+
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobClient jc;
   JobClient jc;
   static Random rng = new Random();
   static Random rng = new Random();
@@ -292,9 +312,9 @@ public class TestComparators extends TestCase
     }
     }
   }
   }
 
 
-
+  @Before
   public void configure() throws Exception {
   public void configure() throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
     FileSystem fs = FileSystem.get(conf);
@@ -334,14 +354,18 @@ public class TestComparators extends TestCase
     
     
     jc = new JobClient(conf);
     jc = new JobClient(conf);
   }
   }
-  
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
   /**
   /**
    * Test the default comparator for Map/Reduce. 
    * Test the default comparator for Map/Reduce. 
    * Use the identity mapper and see if the keys are sorted at the end
    * Use the identity mapper and see if the keys are sorted at the end
    * @throws Exception
    * @throws Exception
    */
    */
-  public void testDefaultMRComparator() throws Exception { 
-    configure();
+  @Test
+  public void testDefaultMRComparator() throws Exception {
     conf.setMapperClass(IdentityMapper.class);
     conf.setMapperClass(IdentityMapper.class);
     conf.setReducerClass(AscendingKeysReducer.class);
     conf.setReducerClass(AscendingKeysReducer.class);
     
     
@@ -361,8 +385,8 @@ public class TestComparators extends TestCase
    * comparator. Keys should be sorted in reverse order in the reducer. 
    * comparator. Keys should be sorted in reverse order in the reducer. 
    * @throws Exception
    * @throws Exception
    */
    */
-  public void testUserMRComparator() throws Exception { 
-    configure();
+  @Test
+  public void testUserMRComparator() throws Exception {
     conf.setMapperClass(IdentityMapper.class);
     conf.setMapperClass(IdentityMapper.class);
     conf.setReducerClass(DescendingKeysReducer.class);
     conf.setReducerClass(DescendingKeysReducer.class);
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
@@ -384,8 +408,8 @@ public class TestComparators extends TestCase
    * values for a key should be sorted by the 'timestamp'. 
    * values for a key should be sorted by the 'timestamp'. 
    * @throws Exception
    * @throws Exception
    */
    */
-  public void testUserValueGroupingComparator() throws Exception { 
-    configure();
+  @Test
+  public void testUserValueGroupingComparator() throws Exception {
     conf.setMapperClass(RandomGenMapper.class);
     conf.setMapperClass(RandomGenMapper.class);
     conf.setReducerClass(AscendingGroupReducer.class);
     conf.setReducerClass(AscendingGroupReducer.class);
     conf.setOutputValueGroupingComparator(CompositeIntGroupFn.class);
     conf.setOutputValueGroupingComparator(CompositeIntGroupFn.class);
@@ -409,8 +433,8 @@ public class TestComparators extends TestCase
    * order. This lets us make sure that the right comparators are used. 
    * order. This lets us make sure that the right comparators are used. 
    * @throws Exception
    * @throws Exception
    */
    */
-  public void testAllUserComparators() throws Exception { 
-    configure();
+  @Test
+  public void testAllUserComparators() throws Exception {
     conf.setMapperClass(RandomGenMapper.class);
     conf.setMapperClass(RandomGenMapper.class);
     // use a decreasing comparator so keys are sorted in reverse order
     // use a decreasing comparator so keys are sorted in reverse order
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
@@ -430,6 +454,7 @@ public class TestComparators extends TestCase
    * Test a user comparator that relies on deserializing both arguments
    * Test a user comparator that relies on deserializing both arguments
    * for each compare.
    * for each compare.
    */
    */
+  @Test
   public void testBakedUserComparator() throws Exception {
   public void testBakedUserComparator() throws Exception {
     MyWritable a = new MyWritable(8, 8);
     MyWritable a = new MyWritable(8, 8);
     MyWritable b = new MyWritable(7, 9);
     MyWritable b = new MyWritable(7, 9);

+ 39 - 23
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java

@@ -17,21 +17,36 @@
  */
  */
 package org.apache.hadoop.mapred;
 package org.apache.hadoop.mapred;
 
 
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.mapred.lib.*;
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
-import junit.framework.TestCase;
-import java.io.*;
-import java.util.*;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.fail;
+
 
 
 /** 
 /** 
  * TestMapOutputType checks whether the Map task handles type mismatch
  * TestMapOutputType checks whether the Map task handles type mismatch
  * between mapper output and the type specified in
  * between mapper output and the type specified in
  * JobConf.MapOutputKeyType and JobConf.MapOutputValueType.
  * JobConf.MapOutputKeyType and JobConf.MapOutputValueType.
  */
  */
-public class TestMapOutputType extends TestCase 
-{
+public class TestMapOutputType {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapOutputType-mapred");
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobClient jc;
   JobClient jc;
   /** 
   /** 
@@ -75,9 +90,9 @@ public class TestMapOutputType extends TestCase
     }
     }
   }
   }
 
 
-
+  @Before
   public void configure() throws Exception {
   public void configure() throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
     FileSystem fs = FileSystem.get(conf);
@@ -101,17 +116,21 @@ public class TestMapOutputType extends TestCase
       throw new IOException("Mkdirs failed to create " + inDir.toString());
       throw new IOException("Mkdirs failed to create " + inDir.toString());
     }
     }
     Path inFile = new Path(inDir, "part0");
     Path inFile = new Path(inDir, "part0");
-    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, 
+    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile,
                                                            Text.class, Text.class);
                                                            Text.class, Text.class);
     writer.append(new Text("rec: 1"), new Text("Hello"));
     writer.append(new Text("rec: 1"), new Text("Hello"));
     writer.close();
     writer.close();
     
     
     jc = new JobClient(conf);
     jc = new JobClient(conf);
   }
   }
-  
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testKeyMismatch() throws Exception {
   public void testKeyMismatch() throws Exception {
-    configure();
-    
     //  Set bad MapOutputKeyClass and MapOutputValueClass
     //  Set bad MapOutputKeyClass and MapOutputValueClass
     conf.setMapOutputKeyClass(IntWritable.class);
     conf.setMapOutputKeyClass(IntWritable.class);
     conf.setMapOutputValueClass(IntWritable.class);
     conf.setMapOutputValueClass(IntWritable.class);
@@ -125,11 +144,9 @@ public class TestMapOutputType extends TestCase
       fail("Oops! The job was supposed to break due to an exception");
       fail("Oops! The job was supposed to break due to an exception");
     }
     }
   }
   }
-  
+
+  @Test
   public void testValueMismatch() throws Exception {
   public void testValueMismatch() throws Exception {
-    configure();
-  
-    // Set good MapOutputKeyClass, bad MapOutputValueClass    
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputValueClass(IntWritable.class);
     conf.setMapOutputValueClass(IntWritable.class);
     
     
@@ -142,11 +159,10 @@ public class TestMapOutputType extends TestCase
       fail("Oops! The job was supposed to break due to an exception");
       fail("Oops! The job was supposed to break due to an exception");
     }
     }
   }
   }
-  
-  public void testNoMismatch() throws Exception{ 
-    configure();
-    
-    //  Set good MapOutputKeyClass and MapOutputValueClass    
+
+  @Test
+  public void testNoMismatch() throws Exception{
+    //  Set good MapOutputKeyClass and MapOutputValueClass
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputValueClass(Text.class);
     conf.setMapOutputValueClass(Text.class);
      
      

+ 19 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java

@@ -24,7 +24,7 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.io.OutputStreamWriter;
-import java.util.Collections;
+import java.io.File;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -46,11 +47,11 @@ import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 
 /**********************************************************
 /**********************************************************
  * MapredLoadTest generates a bunch of work that exercises
  * MapredLoadTest generates a bunch of work that exercises
@@ -110,6 +111,10 @@ public class TestMapRed extends Configured implements Tool {
    * of numbers in random order, but where each number appears
    * of numbers in random order, but where each number appears
    * as many times as we were instructed.
    * as many times as we were instructed.
    */
    */
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapRed-mapred");
+
   static class RandomGenMapper
   static class RandomGenMapper
     implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
     implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
     
     
@@ -248,6 +253,11 @@ public class TestMapRed extends Configured implements Tool {
   private static int counts = 100;
   private static int counts = 100;
   private static Random r = new Random();
   private static Random r = new Random();
 
 
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
   /**
   /**
      public TestMapRed(int range, int counts, Configuration conf) throws IOException {
      public TestMapRed(int range, int counts, Configuration conf) throws IOException {
      this.range = range;
      this.range = range;
@@ -372,7 +382,7 @@ public class TestMapRed extends Configured implements Tool {
                                 boolean includeCombine
                                 boolean includeCombine
                                 ) throws Exception {
                                 ) throws Exception {
     JobConf conf = new JobConf(TestMapRed.class);
     JobConf conf = new JobConf(TestMapRed.class);
-    Path testdir = new Path("build/test/test.mapred.compress");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
     FileSystem fs = FileSystem.get(conf);
@@ -440,7 +450,7 @@ public class TestMapRed extends Configured implements Tool {
     //
     //
     // Generate distribution of ints.  This is the answer key.
     // Generate distribution of ints.  This is the answer key.
     //
     //
-    JobConf conf = null;
+    JobConf conf;
     //Check to get configuration and check if it is configured thro' Configured
     //Check to get configuration and check if it is configured thro' Configured
     //interface. This would happen when running testcase thro' command line.
     //interface. This would happen when running testcase thro' command line.
     if(getConf() == null) {
     if(getConf() == null) {
@@ -465,7 +475,7 @@ public class TestMapRed extends Configured implements Tool {
     // Write the answer key to a file.  
     // Write the answer key to a file.  
     //
     //
     FileSystem fs = FileSystem.get(conf);
     FileSystem fs = FileSystem.get(conf);
-    Path testdir = new Path("mapred.loadtest");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath(), "mapred.loadtest");
     if (!fs.mkdirs(testdir)) {
     if (!fs.mkdirs(testdir)) {
       throw new IOException("Mkdirs failed to create " + testdir.toString());
       throw new IOException("Mkdirs failed to create " + testdir.toString());
     }
     }
@@ -635,8 +645,8 @@ public class TestMapRed extends Configured implements Tool {
       in.close();
       in.close();
     }
     }
     int originalTotal = 0;
     int originalTotal = 0;
-    for (int i = 0; i < dist.length; i++) {
-      originalTotal += dist[i];
+    for (int aDist : dist) {
+      originalTotal += aDist;
     }
     }
     System.out.println("Original sum: " + originalTotal);
     System.out.println("Original sum: " + originalTotal);
     System.out.println("Recomputed sum: " + totalseen);
     System.out.println("Recomputed sum: " + totalseen);
@@ -727,7 +737,7 @@ public class TestMapRed extends Configured implements Tool {
   public void runJob(int items) {
   public void runJob(int items) {
     try {
     try {
       JobConf conf = new JobConf(TestMapRed.class);
       JobConf conf = new JobConf(TestMapRed.class);
-      Path testdir = new Path("build/test/test.mapred.spill");
+      Path testdir = new Path(TEST_DIR.getAbsolutePath());
       Path inDir = new Path(testdir, "in");
       Path inDir = new Path(testdir, "in");
       Path outDir = new Path(testdir, "out");
       Path outDir = new Path(testdir, "out");
       FileSystem fs = FileSystem.get(conf);
       FileSystem fs = FileSystem.get(conf);
@@ -777,7 +787,7 @@ public class TestMapRed extends Configured implements Tool {
       System.err.println("Usage: TestMapRed <range> <counts>");
       System.err.println("Usage: TestMapRed <range> <counts>");
       System.err.println();
       System.err.println();
       System.err.println("Note: a good test will have a " +
       System.err.println("Note: a good test will have a " +
-      		"<counts> value that is substantially larger than the <range>");
+          "<counts> value that is substantially larger than the <range>");
       return -1;
       return -1;
     }
     }
 
 

+ 23 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java

@@ -18,7 +18,6 @@
 
 
 package org.apache.hadoop.mapred.lib;
 package org.apache.hadoop.mapred.lib;
 
 
-import java.io.*;
 
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
@@ -35,9 +34,23 @@ import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.mapred.Utils;
 import org.apache.hadoop.mapred.Utils;
+import org.junit.After;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
 
 
 
 
 public class TestKeyFieldBasedComparator extends HadoopTestCase {
 public class TestKeyFieldBasedComparator extends HadoopTestCase {
+
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")),
+          "TestKeyFieldBasedComparator-lib");
   JobConf conf;
   JobConf conf;
   JobConf localConf;
   JobConf localConf;
   
   
@@ -50,8 +63,9 @@ public class TestKeyFieldBasedComparator extends HadoopTestCase {
     localConf = createJobConf();
     localConf = createJobConf();
     localConf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
     localConf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
   }
   }
+
   public void configure(String keySpec, int expect) throws Exception {
   public void configure(String keySpec, int expect) throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = getFileSystem();
     FileSystem fs = getFileSystem();
@@ -116,6 +130,13 @@ public class TestKeyFieldBasedComparator extends HadoopTestCase {
       reader.close();
       reader.close();
     }
     }
   }
   }
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testBasicUnixComparator() throws Exception {
   public void testBasicUnixComparator() throws Exception {
     configure("-k1,1n", 1);
     configure("-k1,1n", 1);
     configure("-k2,2n", 1);
     configure("-k2,2n", 1);

+ 23 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java

@@ -23,14 +23,14 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.io.OutputStreamWriter;
+import java.io.File;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.Random;
 import java.util.Random;
 
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
@@ -41,6 +41,10 @@ import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
 
 
 /**********************************************************
 /**********************************************************
  * MapredLoadTest generates a bunch of work that exercises
  * MapredLoadTest generates a bunch of work that exercises
@@ -75,8 +79,10 @@ import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
  * 7) A mapred job integrates all the count files into a single one.
  * 7) A mapred job integrates all the count files into a single one.
  *
  *
  **********************************************************/
  **********************************************************/
-public class TestMapReduce extends TestCase {
-  
+public class TestMapReduce {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapReduce-mapreduce");
   private static FileSystem fs;
   private static FileSystem fs;
   
   
   static {
   static {
@@ -215,6 +221,12 @@ public class TestMapReduce extends TestCase {
   private static int counts = 100;
   private static int counts = 100;
   private static Random r = new Random();
   private static Random r = new Random();
 
 
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testMapred() throws Exception {
   public void testMapred() throws Exception {
     launch();
     launch();
   }
   }
@@ -239,7 +251,7 @@ public class TestMapReduce extends TestCase {
     //
     //
     // Write the answer key to a file.  
     // Write the answer key to a file.  
     //
     //
-    Path testdir = new Path("mapred.loadtest");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     if (!fs.mkdirs(testdir)) {
     if (!fs.mkdirs(testdir)) {
       throw new IOException("Mkdirs failed to create " + testdir.toString());
       throw new IOException("Mkdirs failed to create " + testdir.toString());
     }
     }
@@ -488,13 +500,17 @@ public class TestMapReduce extends TestCase {
       System.err.println("Usage: TestMapReduce <range> <counts>");
       System.err.println("Usage: TestMapReduce <range> <counts>");
       System.err.println();
       System.err.println();
       System.err.println("Note: a good test will have a <counts> value" +
       System.err.println("Note: a good test will have a <counts> value" +
-        " that is substantially larger than the <range>");
+          " that is substantially larger than the <range>");
       return;
       return;
     }
     }
 
 
     int i = 0;
     int i = 0;
     range = Integer.parseInt(argv[i++]);
     range = Integer.parseInt(argv[i++]);
     counts = Integer.parseInt(argv[i++]);
     counts = Integer.parseInt(argv[i++]);
-    launch();
+    try {
+      launch();
+    } finally {
+      FileUtil.fullyDelete(TEST_DIR);
+    }
   }
   }
 }
 }

+ 2 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java

@@ -84,13 +84,13 @@ import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ApplicationClassLoader;
 import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.apache.hadoop.yarn.util.ApplicationClassLoader;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
@@ -242,8 +242,7 @@ public class TestMRJobs {
       // to test AM loading user classes such as output format class, we want
       // to test AM loading user classes such as output format class, we want
       // to blacklist them from the system classes (they need to be prepended
       // to blacklist them from the system classes (they need to be prepended
       // as the first match wins)
       // as the first match wins)
-      String systemClasses =
-          sleepConf.get(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
+      String systemClasses = ApplicationClassLoader.DEFAULT_SYSTEM_CLASSES;
       // exclude the custom classes from system classes
       // exclude the custom classes from system classes
       systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
       systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
           CustomSpeculator.class.getName() + "," +
           CustomSpeculator.class.getName() + "," +

+ 6 - 6
hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java

@@ -97,10 +97,10 @@ public class TestHighRamJob {
     // check if the high ram properties are not set
     // check if the high ram properties are not set
     assertEquals(expectedMapMB, 
     assertEquals(expectedMapMB, 
                  simulatedConf.getLong(MRJobConfig.MAP_MEMORY_MB,
                  simulatedConf.getLong(MRJobConfig.MAP_MEMORY_MB,
-                                       JobConf.DISABLED_MEMORY_LIMIT));
+                                       MRJobConfig.DEFAULT_MAP_MEMORY_MB));
     assertEquals(expectedReduceMB, 
     assertEquals(expectedReduceMB, 
                  simulatedConf.getLong(MRJobConfig.REDUCE_MEMORY_MB, 
                  simulatedConf.getLong(MRJobConfig.REDUCE_MEMORY_MB, 
-                                       JobConf.DISABLED_MEMORY_LIMIT));
+                                       MRJobConfig.DEFAULT_MAP_MEMORY_MB));
   }
   }
   
   
   /**
   /**
@@ -114,10 +114,10 @@ public class TestHighRamJob {
     
     
     // test : check high ram emulation disabled
     // test : check high ram emulation disabled
     gridmixConf.setBoolean(GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE, false);
     gridmixConf.setBoolean(GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE, false);
-    testHighRamConfig(10, 20, 5, 10, JobConf.DISABLED_MEMORY_LIMIT, 
-                      JobConf.DISABLED_MEMORY_LIMIT, 
-                      JobConf.DISABLED_MEMORY_LIMIT, 
-                      JobConf.DISABLED_MEMORY_LIMIT, gridmixConf);
+    testHighRamConfig(10, 20, 5, 10, MRJobConfig.DEFAULT_MAP_MEMORY_MB, 
+                      MRJobConfig.DEFAULT_REDUCE_MEMORY_MB, 
+                      MRJobConfig.DEFAULT_MAP_MEMORY_MB, 
+                      MRJobConfig.DEFAULT_REDUCE_MEMORY_MB, gridmixConf);
     
     
     // test : check with high ram enabled (default) and no scaling
     // test : check with high ram enabled (default) and no scaling
     gridmixConf = new Configuration();
     gridmixConf = new Configuration();

+ 32 - 0
hadoop-yarn-project/CHANGES.txt

@@ -18,6 +18,8 @@ Trunk - Unreleased
     YARN-2216 TestRMApplicationHistoryWriter sometimes fails in trunk.
     YARN-2216 TestRMApplicationHistoryWriter sometimes fails in trunk.
     (Zhijie Shen via xgong)
     (Zhijie Shen via xgong)
 
 
+    YARN-2436. [post-HADOOP-9902] yarn application help doesn't work (aw)
+
 Release 2.6.0 - UNRELEASED
 Release 2.6.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -53,6 +55,9 @@ Release 2.6.0 - UNRELEASED
     YARN-2174. Enable HTTPs for the writer REST API of TimelineServer.
     YARN-2174. Enable HTTPs for the writer REST API of TimelineServer.
     (Zhijie Shen via jianhe)
     (Zhijie Shen via jianhe)
 
 
+    YARN-2393. FairScheduler: Add the notion of steady fair share. 
+    (Wei Yan via kasha)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
     YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
@@ -149,6 +154,12 @@ Release 2.6.0 - UNRELEASED
     YARN-2389. Added functionality for schedulers to kill all applications in a
     YARN-2389. Added functionality for schedulers to kill all applications in a
     queue. (Subramaniam Venkatraman Krishnan via jianhe)
     queue. (Subramaniam Venkatraman Krishnan via jianhe)
 
 
+    YARN-1326. RM should log using RMStore at startup time. 
+    (Tsuyoshi Ozawa via kasha)
+
+    YARN-2182. Updated ContainerId#toString() to append RM Epoch number.
+    (Tsuyoshi OZAWA via jianhe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -226,6 +237,27 @@ Release 2.6.0 - UNRELEASED
     YARN-1919. Potential NPE in EmbeddedElectorService#stop. 
     YARN-1919. Potential NPE in EmbeddedElectorService#stop. 
     (Tsuyoshi Ozawa via kasha)
     (Tsuyoshi Ozawa via kasha)
 
 
+    YARN-2424. LCE should support non-cgroups, non-secure mode (Chris Douglas 
+    via aw)
+
+    YARN-2434. RM should not recover containers from previously failed attempt
+    when AM restart is not enabled (Jian He via jlowe)
+
+    YARN-2035. FileSystemApplicationHistoryStore should not make working dir
+    when it already exists. (Jonathan Eagles via zjshen)
+
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 Release 2.5.0 - 2014-08-11
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/bin/yarn

@@ -73,6 +73,7 @@ case "${COMMAND}" in
   application|applicationattempt|container)
   application|applicationattempt|container)
     CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
     CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
     YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
     YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
+    set -- "${COMMAND}" "$@"
   ;;
   ;;
   classpath)
   classpath)
     hadoop_finalize
     hadoop_finalize

+ 8 - 8
hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh

@@ -80,14 +80,14 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_yc_this}")" >/dev/null && pwd -P)
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_yc_this}")" >/dev/null && pwd -P)
 fi
 fi
 
 
-if [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
 fi
-

+ 7 - 0
hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml

@@ -344,4 +344,11 @@
       <Class name="org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider"/>
       <Class name="org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider"/>
       <Bug pattern="DC_DOUBLECHECK" />
       <Bug pattern="DC_DOUBLECHECK" />
   </Match>
   </Match>
+
+  <!-- ApplicationClassLoader is deprecated and moved to hadoop-common; ignore
+       warning on the identical name as it should be removed later -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.util.ApplicationClassLoader"/>
+    <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
+  </Match>
 </FindBugsFilter>
 </FindBugsFilter>

+ 7 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java

@@ -83,7 +83,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{
  
  
   
   
   // TODO: fail the app submission if attempts are more than 10 or something
   // TODO: fail the app submission if attempts are more than 10 or something
-  private static final ThreadLocal<NumberFormat> appAttemptIdFormat =
+  private static final ThreadLocal<NumberFormat> appAttemptIdAndEpochFormat =
       new ThreadLocal<NumberFormat>() {
       new ThreadLocal<NumberFormat>() {
         @Override
         @Override
         public NumberFormat initialValue() {
         public NumberFormat initialValue() {
@@ -153,9 +153,13 @@ public abstract class ContainerId implements Comparable<ContainerId>{
     sb.append(ApplicationId.appIdFormat.get().format(appId.getId()))
     sb.append(ApplicationId.appIdFormat.get().format(appId.getId()))
         .append("_");
         .append("_");
     sb.append(
     sb.append(
-        appAttemptIdFormat.get().format(
+        appAttemptIdAndEpochFormat.get().format(
             getApplicationAttemptId().getAttemptId())).append("_");
             getApplicationAttemptId().getAttemptId())).append("_");
-    sb.append(containerIdFormat.get().format(getId()));
+    sb.append(containerIdFormat.get().format(0x3fffff & getId()));
+    int epoch = getId() >> 22;
+    if (epoch > 0) {
+      sb.append("_").append(appAttemptIdAndEpochFormat.get().format(epoch));
+    }
     return sb.toString();
     return sb.toString();
   }
   }
 
 

+ 9 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -836,6 +836,15 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_LINUX_CONTAINER_GROUP =
   public static final String NM_LINUX_CONTAINER_GROUP =
     NM_PREFIX + "linux-container-executor.group";
     NM_PREFIX + "linux-container-executor.group";
 
 
+  /**
+   * True if linux-container-executor should limit itself to one user
+   * when running in non-secure mode.
+   */
+  public static final String NM_NONSECURE_MODE_LIMIT_USERS = NM_PREFIX +
+     "linux-container-executor.nonsecure-mode.limit-users";
+
+  public static final boolean DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS = true;
+
   /**
   /**
    * The UNIX user that containers will run as when Linux-container-executor
    * The UNIX user that containers will run as when Linux-container-executor
    * is used in nonsecure mode (a use case for this is using cgroups).
    * is used in nonsecure mode (a use case for this is using cgroups).

+ 10 - 160
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ApplicationClassLoader.java

@@ -18,180 +18,30 @@
 
 
 package org.apache.hadoop.yarn.util;
 package org.apache.hadoop.yarn.util;
 
 
-import java.io.File;
-import java.io.FilenameFilter;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Splitter;
-
 /**
 /**
- * A {@link URLClassLoader} for YARN application isolation. Classes from
- * the application JARs are loaded in preference to the parent loader.
+ * This type has been deprecated in favor of
+ * {@link org.apache.hadoop.util.ApplicationClassLoader}. All new uses of
+ * ApplicationClassLoader should use that type instead.
  */
  */
 @Public
 @Public
 @Unstable
 @Unstable
-public class ApplicationClassLoader extends URLClassLoader {
-
-  private static final Log LOG =
-    LogFactory.getLog(ApplicationClassLoader.class.getName());
-  
-  private static final FilenameFilter JAR_FILENAME_FILTER =
-    new FilenameFilter() {
-      @Override
-      public boolean accept(File dir, String name) {
-        return name.endsWith(".jar") || name.endsWith(".JAR");
-      }
-  };
-  
-  private ClassLoader parent;
-  private List<String> systemClasses;
-
+@Deprecated
+public class ApplicationClassLoader extends
+    org.apache.hadoop.util.ApplicationClassLoader {
   public ApplicationClassLoader(URL[] urls, ClassLoader parent,
   public ApplicationClassLoader(URL[] urls, ClassLoader parent,
       List<String> systemClasses) {
       List<String> systemClasses) {
-    super(urls, parent);
-    this.parent = parent;
-    if (parent == null) {
-      throw new IllegalArgumentException("No parent classloader!");
-    }
-    this.systemClasses = systemClasses;
+    super(urls, parent, systemClasses);
   }
   }
-  
+
   public ApplicationClassLoader(String classpath, ClassLoader parent,
   public ApplicationClassLoader(String classpath, ClassLoader parent,
       List<String> systemClasses) throws MalformedURLException {
       List<String> systemClasses) throws MalformedURLException {
-    this(constructUrlsFromClasspath(classpath), parent, systemClasses);
-  }
-  
-  @VisibleForTesting
-  static URL[] constructUrlsFromClasspath(String classpath)
-      throws MalformedURLException {
-    List<URL> urls = new ArrayList<URL>();
-    for (String element : Splitter.on(File.pathSeparator).split(classpath)) {
-      if (element.endsWith("/*")) {
-        String dir = element.substring(0, element.length() - 1);
-        File[] files = new File(dir).listFiles(JAR_FILENAME_FILTER);
-        if (files != null) {
-          for (File file : files) {
-            urls.add(file.toURI().toURL());
-          }
-        }
-      } else {
-        File file = new File(element);
-        if (file.exists()) {
-          urls.add(new File(element).toURI().toURL());
-        }
-      }
-    }
-    return urls.toArray(new URL[urls.size()]);
-  }
-
-  @Override
-  public URL getResource(String name) {
-    URL url = null;
-    
-    if (!isSystemClass(name, systemClasses)) {
-      url= findResource(name);
-      if (url == null && name.startsWith("/")) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Remove leading / off " + name);
-        }
-        url= findResource(name.substring(1));
-      }
-    }
-
-    if (url == null) {
-      url= parent.getResource(name);
-    }
-
-    if (url != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("getResource("+name+")=" + url);
-      }
-    }
-    
-    return url;
-  }
-
-  @Override
-  public Class<?> loadClass(String name) throws ClassNotFoundException {
-    return this.loadClass(name, false);
-  }
-
-  @Override
-  protected synchronized Class<?> loadClass(String name, boolean resolve)
-      throws ClassNotFoundException {
-    
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Loading class: " + name);
-    }
-
-    Class<?> c = findLoadedClass(name);
-    ClassNotFoundException ex = null;
-
-    if (c == null && !isSystemClass(name, systemClasses)) {
-      // Try to load class from this classloader's URLs. Note that this is like
-      // the servlet spec, not the usual Java 2 behaviour where we ask the
-      // parent to attempt to load first.
-      try {
-        c = findClass(name);
-        if (LOG.isDebugEnabled() && c != null) {
-          LOG.debug("Loaded class: " + name + " ");
-        }
-      } catch (ClassNotFoundException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(e);
-        }
-        ex = e;
-      }
-    }
-
-    if (c == null) { // try parent
-      c = parent.loadClass(name);
-      if (LOG.isDebugEnabled() && c != null) {
-        LOG.debug("Loaded class from parent: " + name + " ");
-      }
-    }
-
-    if (c == null) {
-      throw ex != null ? ex : new ClassNotFoundException(name);
-    }
-
-    if (resolve) {
-      resolveClass(c);
-    }
-
-    return c;
-  }
-
-  @VisibleForTesting
-  public static boolean isSystemClass(String name, List<String> systemClasses) {
-    if (systemClasses != null) {
-      String canonicalName = name.replace('/', '.');
-      while (canonicalName.startsWith(".")) {
-        canonicalName=canonicalName.substring(1);
-      }
-      for (String c : systemClasses) {
-        boolean result = true;
-        if (c.startsWith("-")) {
-          c = c.substring(1);
-          result = false;
-        }
-        if (c.endsWith(".") && canonicalName.startsWith(c)) {
-          return result;
-        } else if (canonicalName.equals(c)) {
-          return result;
-        }
-      }
-    }
-    return false;
+    super(classpath, parent, systemClasses);
   }
   }
-}
+}

+ 16 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -991,8 +991,22 @@
   </property>
   </property>
 
 
   <property>
   <property>
-    <description>The UNIX user that containers will run as when Linux-container-executor
-    is used in nonsecure mode (a use case for this is using cgroups).</description>
+    <description>This determines which of the two modes that LCE should use on
+      a non-secure cluster.  If this value is set to true, then all containers
+      will be launched as the user specified in
+      yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user.  If
+      this value is set to false, then containers will run as the user who
+      submitted the application.</description>
+    <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>The UNIX user that containers will run as when
+      Linux-container-executor is used in nonsecure mode (a use case for this
+      is using cgroups) if the
+      yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is
+      set to true.</description>
     <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user</name>
     <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user</name>
     <value>nobody</value>
     <value>nobody</value>
   </property>
   </property>

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java

@@ -54,7 +54,9 @@ public class TestContainerId {
     long ts = System.currentTimeMillis();
     long ts = System.currentTimeMillis();
     ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811);
     ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811);
     Assert.assertEquals("container_10_0001_01_000001", c1.toString());
     Assert.assertEquals("container_10_0001_01_000001", c1.toString());
-    Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",
+    Assert.assertEquals(479987, 0x003fffff & c6.getId());
+    Assert.assertEquals(6, c6.getId() >> 22);
+    Assert.assertEquals("container_" + ts + "_36473_4365472_479987_06",
         c6.toString());
         c6.toString());
   }
   }
 
 

+ 11 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java

@@ -110,15 +110,23 @@ public class FileSystemApplicationHistoryStore extends AbstractService
     super(FileSystemApplicationHistoryStore.class.getName());
     super(FileSystemApplicationHistoryStore.class.getName());
   }
   }
 
 
+  protected FileSystem getFileSystem(Path path, Configuration conf) throws Exception {
+    return path.getFileSystem(conf);
+  }
+
   @Override
   @Override
   public void serviceInit(Configuration conf) throws Exception {
   public void serviceInit(Configuration conf) throws Exception {
     Path fsWorkingPath =
     Path fsWorkingPath =
         new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI));
         new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI));
     rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME);
     rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME);
     try {
     try {
-      fs = fsWorkingPath.getFileSystem(conf);
-      fs.mkdirs(rootDirPath);
-      fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
+      fs = getFileSystem(fsWorkingPath, conf);
+
+      if (!fs.isDirectory(rootDirPath)) {
+        fs.mkdirs(rootDirPath);
+        fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
+      }
+
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.error("Error when initializing FileSystemHistoryStorage", e);
       LOG.error("Error when initializing FileSystemHistoryStorage", e);
       throw e;
       throw e;

+ 61 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java

@@ -20,9 +20,17 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
+import java.net.URISyntaxException;
 
 
 import org.junit.Assert;
 import org.junit.Assert;
 
 
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -53,6 +61,11 @@ public class TestFileSystemApplicationHistoryStore extends
   @Before
   @Before
   public void setup() throws Exception {
   public void setup() throws Exception {
     fs = new RawLocalFileSystem();
     fs = new RawLocalFileSystem();
+    initStore(fs);
+  }
+
+  private void initStore(final FileSystem fs) throws IOException,
+      URISyntaxException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     fs.initialize(new URI("/"), conf);
     fs.initialize(new URI("/"), conf);
     fsWorkingPath =
     fsWorkingPath =
@@ -61,7 +74,12 @@ public class TestFileSystemApplicationHistoryStore extends
     fs.delete(fsWorkingPath, true);
     fs.delete(fsWorkingPath, true);
     conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
     conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
       fsWorkingPath.toString());
       fsWorkingPath.toString());
-    store = new FileSystemApplicationHistoryStore();
+    store = new FileSystemApplicationHistoryStore() {
+      @Override
+      protected FileSystem getFileSystem(Path path, Configuration conf) {
+        return fs;
+      }
+    };
     store.init(conf);
     store.init(conf);
     store.start();
     store.start();
   }
   }
@@ -243,4 +261,46 @@ public class TestFileSystemApplicationHistoryStore extends
     testWriteHistoryData(3, false, true);
     testWriteHistoryData(3, false, true);
     testReadHistoryData(3, false, true);
     testReadHistoryData(3, false, true);
   }
   }
+
+  @Test
+  public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
+    LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
+    tearDown();
+
+    // Setup file system to inject startup conditions
+    FileSystem fs = spy(new RawLocalFileSystem());
+    doReturn(true).when(fs).isDirectory(any(Path.class));
+
+    try {
+      initStore(fs);
+    } catch (Exception e) {
+      Assert.fail("Exception should not be thrown: " + e);
+    }
+
+    // Make sure that directory creation was not attempted
+    verify(fs, times(1)).isDirectory(any(Path.class));
+    verify(fs, times(0)).mkdirs(any(Path.class));
+  }
+
+  @Test
+  public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
+    LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
+    tearDown();
+
+    // Setup file system to inject startup conditions
+    FileSystem fs = spy(new RawLocalFileSystem());
+    doReturn(false).when(fs).isDirectory(any(Path.class));
+    doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
+
+    try {
+      initStore(fs);
+      Assert.fail("Exception should have been thrown");
+    } catch (Exception e) {
+      // Expected failure
+    }
+
+    // Make sure that directory creation was attempted
+    verify(fs, times(1)).isDirectory(any(Path.class));
+    verify(fs, times(1)).mkdirs(any(Path.class));
+  }
 }
 }

+ 15 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java

@@ -57,8 +57,8 @@ public class LinuxContainerExecutor extends ContainerExecutor {
   private LCEResourcesHandler resourcesHandler;
   private LCEResourcesHandler resourcesHandler;
   private boolean containerSchedPriorityIsSet = false;
   private boolean containerSchedPriorityIsSet = false;
   private int containerSchedPriorityAdjustment = 0;
   private int containerSchedPriorityAdjustment = 0;
-  
-  
+  private boolean containerLimitUsers;
+
   @Override
   @Override
   public void setConf(Configuration conf) {
   public void setConf(Configuration conf) {
     super.setConf(conf);
     super.setConf(conf);
@@ -81,6 +81,13 @@ public class LinuxContainerExecutor extends ContainerExecutor {
     nonsecureLocalUserPattern = Pattern.compile(
     nonsecureLocalUserPattern = Pattern.compile(
         conf.get(YarnConfiguration.NM_NONSECURE_MODE_USER_PATTERN_KEY,
         conf.get(YarnConfiguration.NM_NONSECURE_MODE_USER_PATTERN_KEY,
             YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_USER_PATTERN));        
             YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_USER_PATTERN));        
+    containerLimitUsers = conf.getBoolean(
+      YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS,
+      YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS);
+    if (!containerLimitUsers) {
+      LOG.warn(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS +
+          ": impersonation without authentication enabled");
+    }
   }
   }
 
 
   void verifyUsernamePattern(String user) {
   void verifyUsernamePattern(String user) {
@@ -92,7 +99,12 @@ public class LinuxContainerExecutor extends ContainerExecutor {
   }
   }
 
 
   String getRunAsUser(String user) {
   String getRunAsUser(String user) {
-    return UserGroupInformation.isSecurityEnabled() ? user : nonsecureLocalUser;
+    if (UserGroupInformation.isSecurityEnabled() ||
+       !containerLimitUsers) {
+      return user;
+    } else {
+      return nonsecureLocalUser;
+    }
   }
   }
 
 
   /**
   /**

+ 7 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java

@@ -279,6 +279,13 @@ public class TestLinuxContainerExecutor {
       lce.setConf(conf);
       lce.setConf(conf);
       Assert.assertEquals("bar", lce.getRunAsUser("foo"));
       Assert.assertEquals("bar", lce.getRunAsUser("foo"));
 
 
+      //nonsecure without limits
+      conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
+      conf.setBoolean(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, false);
+      lce = new LinuxContainerExecutor();
+      lce.setConf(conf);
+      Assert.assertEquals("foo", lce.getRunAsUser("foo"));
+
       //secure
       //secure
       conf = new YarnConfiguration();
       conf = new YarnConfiguration();
       conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
       conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,

+ 8 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java

@@ -17,17 +17,20 @@
 */
 */
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 
 public class RMStateStoreFactory {
 public class RMStateStoreFactory {
+  private static final Log LOG = LogFactory.getLog(RMStateStoreFactory.class);
   
   
   public static RMStateStore getStore(Configuration conf) {
   public static RMStateStore getStore(Configuration conf) {
-    RMStateStore store = ReflectionUtils.newInstance(
-        conf.getClass(YarnConfiguration.RM_STORE, 
-            MemoryRMStateStore.class, RMStateStore.class), 
-            conf);
-    return store;
+    Class<? extends RMStateStore> storeClass =
+        conf.getClass(YarnConfiguration.RM_STORE,
+            MemoryRMStateStore.class, RMStateStore.class);
+    LOG.info("Using RMStateStore implementation - " + storeClass);
+    return ReflectionUtils.newInstance(storeClass, conf);
   }
   }
 }
 }

+ 13 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java

@@ -273,6 +273,19 @@ public abstract class AbstractYarnScheduler
       SchedulerApplicationAttempt schedulerAttempt =
       SchedulerApplicationAttempt schedulerAttempt =
           schedulerApp.getCurrentAppAttempt();
           schedulerApp.getCurrentAppAttempt();
 
 
+      if (!rmApp.getApplicationSubmissionContext()
+        .getKeepContainersAcrossApplicationAttempts()) {
+        // Do not recover containers for stopped attempt or previous attempt.
+        if (schedulerAttempt.isStopped()
+            || !schedulerAttempt.getApplicationAttemptId().equals(
+              container.getContainerId().getApplicationAttemptId())) {
+          LOG.info("Skip recovering container " + container
+              + " for already stopped attempt.");
+          killOrphanContainerOnNode(nm, container);
+          continue;
+        }
+      }
+
       // create container
       // create container
       RMContainer rmContainer = recoverAndCreateContainer(container, nm);
       RMContainer rmContainer = recoverAndCreateContainer(container, nm);
 
 

+ 0 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java

@@ -717,12 +717,6 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     this.fairShare = fairShare;
     this.fairShare = fairShare;
   }
   }
 
 
-  @Override
-  public boolean isActive() {
-    return true;
-  }
-
-
   @Override
   @Override
   public void updateDemand() {
   public void updateDemand() {
     demand = Resources.createResource(0);
     demand = Resources.createResource(0);

+ 10 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 
 
@@ -68,6 +67,16 @@ public class FSParentQueue extends FSQueue {
     }
     }
   }
   }
 
 
+  public void recomputeSteadyShares() {
+    policy.computeSteadyShares(childQueues, getSteadyFairShare());
+    for (FSQueue childQueue : childQueues) {
+      childQueue.getMetrics().setSteadyFairShare(childQueue.getSteadyFairShare());
+      if (childQueue instanceof FSParentQueue) {
+        ((FSParentQueue) childQueue).recomputeSteadyShares();
+      }
+    }
+  }
+
   @Override
   @Override
   public Resource getDemand() {
   public Resource getDemand() {
     return demand;
     return demand;

+ 16 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 @Unstable
 @Unstable
 public abstract class FSQueue implements Queue, Schedulable {
 public abstract class FSQueue implements Queue, Schedulable {
   private Resource fairShare = Resources.createResource(0, 0);
   private Resource fairShare = Resources.createResource(0, 0);
+  private Resource steadyFairShare = Resources.createResource(0, 0);
   private final String name;
   private final String name;
   protected final FairScheduler scheduler;
   protected final FairScheduler scheduler;
   private final FSQueueMetrics metrics;
   private final FSQueueMetrics metrics;
@@ -151,7 +152,17 @@ public abstract class FSQueue implements Queue, Schedulable {
     this.fairShare = fairShare;
     this.fairShare = fairShare;
     metrics.setFairShare(fairShare);
     metrics.setFairShare(fairShare);
   }
   }
-  
+
+  /** Get the steady fair share assigned to this Schedulable. */
+  public Resource getSteadyFairShare() {
+    return steadyFairShare;
+  }
+
+  public void setSteadyFairShare(Resource steadyFairShare) {
+    this.steadyFairShare = steadyFairShare;
+    metrics.setSteadyFairShare(steadyFairShare);
+  }
+
   public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
   public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
     return scheduler.getAllocationConfiguration().hasAccess(name, acl, user);
     return scheduler.getAllocationConfiguration().hasAccess(name, acl, user);
   }
   }
@@ -161,7 +172,7 @@ public abstract class FSQueue implements Queue, Schedulable {
    * queue's current share
    * queue's current share
    */
    */
   public abstract void recomputeShares();
   public abstract void recomputeShares();
-  
+
   /**
   /**
    * Gets the children of this queue, if any.
    * Gets the children of this queue, if any.
    */
    */
@@ -194,7 +205,9 @@ public abstract class FSQueue implements Queue, Schedulable {
     return true;
     return true;
   }
   }
 
 
-  @Override
+  /**
+   * Returns true if queue has at least one app running.
+   */
   public boolean isActive() {
   public boolean isActive() {
     return getNumRunnableApps() > 0;
     return getNumRunnableApps() > 0;
   }
   }

+ 16 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java

@@ -33,6 +33,8 @@ public class FSQueueMetrics extends QueueMetrics {
 
 
   @Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB;
   @Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB;
   @Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores;
   @Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores;
+  @Metric("Steady fair share of memory in MB") MutableGaugeInt steadyFairShareMB;
+  @Metric("Steady fair share of CPU in vcores") MutableGaugeInt steadyFairShareVCores;
   @Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB;
   @Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB;
   @Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores;
   @Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores;
   @Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB;
   @Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB;
@@ -55,7 +57,20 @@ public class FSQueueMetrics extends QueueMetrics {
   public int getFairShareVirtualCores() {
   public int getFairShareVirtualCores() {
     return fairShareVCores.value();
     return fairShareVCores.value();
   }
   }
-  
+
+  public void setSteadyFairShare(Resource resource) {
+    steadyFairShareMB.set(resource.getMemory());
+    steadyFairShareVCores.set(resource.getVirtualCores());
+  }
+
+  public int getSteadyFairShareMB() {
+    return steadyFairShareMB.value();
+  }
+
+  public int getSteadyFairShareVCores() {
+    return steadyFairShareVCores.value();
+  }
+
   public void setMinShare(Resource resource) {
   public void setMinShare(Resource resource) {
     minShareMB.set(resource.getMemory());
     minShareMB.set(resource.getMemory());
     minShareVCores.set(resource.getVirtualCores());
     minShareVCores.set(resource.getVirtualCores());

+ 4 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java

@@ -851,6 +851,8 @@ public class FairScheduler extends
     Resources.addTo(clusterResource, node.getTotalCapability());
     Resources.addTo(clusterResource, node.getTotalCapability());
     updateRootQueueMetrics();
     updateRootQueueMetrics();
 
 
+    queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
+    queueMgr.getRootQueue().recomputeSteadyShares();
     LOG.info("Added node " + node.getNodeAddress() +
     LOG.info("Added node " + node.getNodeAddress() +
         " cluster capacity: " + clusterResource);
         " cluster capacity: " + clusterResource);
   }
   }
@@ -885,6 +887,8 @@ public class FairScheduler extends
     }
     }
 
 
     nodes.remove(rmNode.getNodeID());
     nodes.remove(rmNode.getNodeID());
+    queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
+    queueMgr.getRootQueue().recomputeSteadyShares();
     LOG.info("Removed node " + rmNode.getNodeAddress() +
     LOG.info("Removed node " + rmNode.getNodeAddress() +
         " cluster capacity: " + clusterResource);
         " cluster capacity: " + clusterResource);
   }
   }

+ 9 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java

@@ -118,6 +118,11 @@ public class QueueManager {
       if (queue == null && create) {
       if (queue == null && create) {
         // if the queue doesn't exist,create it and return
         // if the queue doesn't exist,create it and return
         queue = createQueue(name, queueType);
         queue = createQueue(name, queueType);
+
+        // Update steady fair share for all queues
+        if (queue != null) {
+          rootQueue.recomputeSteadyShares();
+        }
       }
       }
       return queue;
       return queue;
     }
     }
@@ -190,7 +195,7 @@ public class QueueManager {
         parent = newParent;
         parent = newParent;
       }
       }
     }
     }
-    
+
     return parent;
     return parent;
   }
   }
 
 
@@ -376,5 +381,8 @@ public class QueueManager {
             + queue.getName(), ex);
             + queue.getName(), ex);
       }
       }
     }
     }
+
+    // Update steady fair shares for all queues
+    rootQueue.recomputeSteadyShares();
   }
   }
 }
 }

+ 0 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java

@@ -24,7 +24,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
-import org.apache.hadoop.yarn.util.resource.Resources;
 
 
 /**
 /**
  * A Schedulable represents an entity that can be scheduled such as an
  * A Schedulable represents an entity that can be scheduled such as an
@@ -102,10 +101,4 @@ public interface Schedulable {
 
 
   /** Assign a fair share to this Schedulable. */
   /** Assign a fair share to this Schedulable. */
   public void setFairShare(Resource fairShare);
   public void setFairShare(Resource fairShare);
-
-  /**
-   * Returns true if queue has atleast one app running. Always returns true for
-   * AppSchedulables.
-   */
-  public boolean isActive();
 }
 }

+ 21 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java

@@ -17,10 +17,6 @@
  */
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
 
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.concurrent.ConcurrentHashMap;
-
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -29,6 +25,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.Dom
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 
 
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.concurrent.ConcurrentHashMap;
+
 @Public
 @Public
 @Evolving
 @Evolving
 public abstract class SchedulingPolicy {
 public abstract class SchedulingPolicy {
@@ -131,8 +131,10 @@ public abstract class SchedulingPolicy {
   public abstract Comparator<Schedulable> getComparator();
   public abstract Comparator<Schedulable> getComparator();
 
 
   /**
   /**
-   * Computes and updates the shares of {@link Schedulable}s as per the
-   * {@link SchedulingPolicy}, to be used later at schedule time.
+   * Computes and updates the shares of {@link Schedulable}s as per
+   * the {@link SchedulingPolicy}, to be used later for scheduling decisions.
+   * The shares computed are instantaneous and only consider queues with
+   * running applications.
    * 
    * 
    * @param schedulables {@link Schedulable}s whose shares are to be updated
    * @param schedulables {@link Schedulable}s whose shares are to be updated
    * @param totalResources Total {@link Resource}s in the cluster
    * @param totalResources Total {@link Resource}s in the cluster
@@ -140,6 +142,19 @@ public abstract class SchedulingPolicy {
   public abstract void computeShares(
   public abstract void computeShares(
       Collection<? extends Schedulable> schedulables, Resource totalResources);
       Collection<? extends Schedulable> schedulables, Resource totalResources);
 
 
+  /**
+   * Computes and updates the steady shares of {@link FSQueue}s as per the
+   * {@link SchedulingPolicy}. The steady share does not differentiate
+   * between queues with and without running applications under them. The
+   * steady share is not used for scheduling, it is displayed on the Web UI
+   * for better visibility.
+   *
+   * @param queues {@link FSQueue}s whose shares are to be updated
+   * @param totalResources Total {@link Resource}s in the cluster
+   */
+  public abstract void computeSteadyShares(
+      Collection<? extends FSQueue> queues, Resource totalResources);
+
   /**
   /**
    * Check if the resource usage is over the fair share under this policy
    * Check if the resource usage is over the fair share under this policy
    *
    *

+ 28 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java

@@ -22,6 +22,7 @@ import java.util.Collection;
 
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 
 
 /**
 /**
@@ -49,14 +50,29 @@ public class ComputeFairShares {
       ResourceType type) {
       ResourceType type) {
     Collection<Schedulable> activeSchedulables = new ArrayList<Schedulable>();
     Collection<Schedulable> activeSchedulables = new ArrayList<Schedulable>();
     for (Schedulable sched : schedulables) {
     for (Schedulable sched : schedulables) {
-      if (sched.isActive()) {
-        activeSchedulables.add(sched);
-      } else {
+      if ((sched instanceof FSQueue) && !((FSQueue) sched).isActive()) {
         setResourceValue(0, sched.getFairShare(), type);
         setResourceValue(0, sched.getFairShare(), type);
+      } else {
+        activeSchedulables.add(sched);
       }
       }
     }
     }
 
 
-    computeSharesInternal(activeSchedulables, totalResources, type);
+    computeSharesInternal(activeSchedulables, totalResources, type, false);
+  }
+
+  /**
+   * Compute the steady fair share of the given queues. The steady fair
+   * share is an allocation of shares considering all queues, i.e.,
+   * active and inactive.
+   *
+   * @param queues
+   * @param totalResources
+   * @param type
+   */
+  public static void computeSteadyShares(
+      Collection<? extends FSQueue> queues, Resource totalResources,
+      ResourceType type) {
+    computeSharesInternal(queues, totalResources, type, true);
   }
   }
 
 
   /**
   /**
@@ -102,7 +118,7 @@ public class ComputeFairShares {
    */
    */
   private static void computeSharesInternal(
   private static void computeSharesInternal(
       Collection<? extends Schedulable> schedulables, Resource totalResources,
       Collection<? extends Schedulable> schedulables, Resource totalResources,
-      ResourceType type) {
+      ResourceType type, boolean isSteadyShare) {
     if (schedulables.isEmpty()) {
     if (schedulables.isEmpty()) {
       return;
       return;
     }
     }
@@ -145,7 +161,13 @@ public class ComputeFairShares {
     }
     }
     // Set the fair shares based on the value of R we've converged to
     // Set the fair shares based on the value of R we've converged to
     for (Schedulable sched : schedulables) {
     for (Schedulable sched : schedulables) {
-      setResourceValue(computeShare(sched, right, type), sched.getFairShare(), type);
+      if (isSteadyShare) {
+        setResourceValue(computeShare(sched, right, type),
+            ((FSQueue) sched).getSteadyFairShare(), type);
+      } else {
+        setResourceValue(
+            computeShare(sched, right, type), sched.getFairShare(), type);
+      }
     }
     }
   }
   }
 
 

+ 9 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -68,6 +69,14 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
       ComputeFairShares.computeShares(schedulables, totalResources, type);
       ComputeFairShares.computeShares(schedulables, totalResources, type);
     }
     }
   }
   }
+
+  @Override
+  public void computeSteadyShares(Collection<? extends FSQueue> queues,
+      Resource totalResources) {
+    for (ResourceType type : ResourceType.values()) {
+      ComputeFairShares.computeSteadyShares(queues, totalResources, type);
+    }
+  }
   
   
   @Override
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {

+ 8 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -119,6 +120,13 @@ public class FairSharePolicy extends SchedulingPolicy {
     ComputeFairShares.computeShares(schedulables, totalResources, ResourceType.MEMORY);
     ComputeFairShares.computeShares(schedulables, totalResources, ResourceType.MEMORY);
   }
   }
 
 
+  @Override
+  public void computeSteadyShares(Collection<? extends FSQueue> queues,
+      Resource totalResources) {
+    ComputeFairShares.computeSteadyShares(queues, totalResources,
+        ResourceType.MEMORY);
+  }
+
   @Override
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
     return Resources.greaterThan(RESOURCE_CALCULATOR, null, usage, fairShare);
     return Resources.greaterThan(RESOURCE_CALCULATOR, null, usage, fairShare);

+ 8 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java

@@ -24,6 +24,7 @@ import java.util.Comparator;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -87,6 +88,13 @@ public class FifoPolicy extends SchedulingPolicy {
     earliest.setFairShare(Resources.clone(totalResources));
     earliest.setFairShare(Resources.clone(totalResources));
   }
   }
 
 
+  @Override
+  public void computeSteadyShares(Collection<? extends FSQueue> queues,
+      Resource totalResources) {
+    // Nothing needs to do, as leaf queue doesn't have to calculate steady
+    // fair shares for applications.
+  }
+
   @Override
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
     throw new UnsupportedOperationException(
     throw new UnsupportedOperationException(

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java

@@ -44,6 +44,7 @@ public class AboutBlock extends HtmlBlock {
       _("Cluster ID:", cinfo.getClusterId()).
       _("Cluster ID:", cinfo.getClusterId()).
       _("ResourceManager state:", cinfo.getState()).
       _("ResourceManager state:", cinfo.getState()).
       _("ResourceManager HA state:", cinfo.getHAState()).
       _("ResourceManager HA state:", cinfo.getHAState()).
+      _("ResourceManager RMStateStore:", cinfo.getRMStateStore()).
       _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
       _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
       _("ResourceManager version:", cinfo.getRMBuildVersion() +
       _("ResourceManager version:", cinfo.getRMBuildVersion() +
           " on " + cinfo.getRMVersionBuiltOn()).
           " on " + cinfo.getRMVersionBuiltOn()).

+ 8 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 
 
 @XmlRootElement
 @XmlRootElement
@@ -35,6 +36,7 @@ public class ClusterInfo {
   protected long startedOn;
   protected long startedOn;
   protected STATE state;
   protected STATE state;
   protected HAServiceProtocol.HAServiceState haState;
   protected HAServiceProtocol.HAServiceState haState;
+  protected String rmStateStoreName;
   protected String resourceManagerVersion;
   protected String resourceManagerVersion;
   protected String resourceManagerBuildVersion;
   protected String resourceManagerBuildVersion;
   protected String resourceManagerVersionBuiltOn;
   protected String resourceManagerVersionBuiltOn;
@@ -51,6 +53,8 @@ public class ClusterInfo {
     this.id = ts;
     this.id = ts;
     this.state = rm.getServiceState();
     this.state = rm.getServiceState();
     this.haState = rm.getRMContext().getHAServiceState();
     this.haState = rm.getRMContext().getHAServiceState();
+    this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
+        .getName();
     this.startedOn = ts;
     this.startedOn = ts;
     this.resourceManagerVersion = YarnVersionInfo.getVersion();
     this.resourceManagerVersion = YarnVersionInfo.getVersion();
     this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
     this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
@@ -68,6 +72,10 @@ public class ClusterInfo {
     return this.haState.toString();
     return this.haState.toString();
   }
   }
 
 
+  public String getRMStateStore() {
+    return this.rmStateStoreName;
+  }
+
   public String getRMVersion() {
   public String getRMVersion() {
     return this.resourceManagerVersion;
     return this.resourceManagerVersion;
   }
   }

+ 13 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java

@@ -513,6 +513,19 @@ public class TestWorkPreservingRMRestart {
     // just-recovered containers.
     // just-recovered containers.
     assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
     assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
     assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
     assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
+
+    rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(), 2, nm1);
+
+    MockNM nm2 =
+        new MockNM("127.1.1.1:4321", 8192, rm2.getResourceTrackerService());
+    NMContainerStatus previousAttemptContainer =
+        TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 4,
+          ContainerState.RUNNING);
+    nm2.registerNode(Arrays.asList(previousAttemptContainer), null);
+    // Wait for RM to settle down on recovering containers;
+    Thread.sleep(3000);
+    // check containers from previous failed attempt should not be recovered.
+    assertNull(scheduler.getRMContainer(previousAttemptContainer.getContainerId()));
   }
   }
 
 
   // Apps already completed before RM restart. Restarted RM scheduler should not
   // Apps already completed before RM restart. Restarted RM scheduler should not

+ 0 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java

@@ -100,11 +100,6 @@ public class FakeSchedulable implements Schedulable {
     this.fairShare = fairShare;
     this.fairShare = fairShare;
   }
   }
 
 
-  @Override
-  public boolean isActive() {
-    return true;
-  }
-
   @Override
   @Override
   public Resource getDemand() {
   public Resource getDemand() {
     return null;
     return null;

+ 136 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java

@@ -292,14 +292,19 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     createSchedulingRequest(10 * 1024, "root.default", "user1");
     createSchedulingRequest(10 * 1024, "root.default", "user1");
 
 
     scheduler.update();
     scheduler.update();
+    scheduler.getQueueManager().getRootQueue()
+        .setSteadyFairShare(scheduler.getClusterResource());
+    scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
 
 
     Collection<FSLeafQueue> queues = scheduler.getQueueManager().getLeafQueues();
     Collection<FSLeafQueue> queues = scheduler.getQueueManager().getLeafQueues();
     assertEquals(3, queues.size());
     assertEquals(3, queues.size());
     
     
-    // Divided three ways - betwen the two queues and the default queue
+    // Divided three ways - between the two queues and the default queue
     for (FSLeafQueue p : queues) {
     for (FSLeafQueue p : queues) {
       assertEquals(3414, p.getFairShare().getMemory());
       assertEquals(3414, p.getFairShare().getMemory());
       assertEquals(3414, p.getMetrics().getFairShareMB());
       assertEquals(3414, p.getMetrics().getFairShareMB());
+      assertEquals(3414, p.getSteadyFairShare().getMemory());
+      assertEquals(3414, p.getMetrics().getSteadyFairShareMB());
     }
     }
   }
   }
   
   
@@ -323,6 +328,9 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     createSchedulingRequest(10 * 1024, "root.default", "user1");
     createSchedulingRequest(10 * 1024, "root.default", "user1");
 
 
     scheduler.update();
     scheduler.update();
+    scheduler.getQueueManager().getRootQueue()
+        .setSteadyFairShare(scheduler.getClusterResource());
+    scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
 
 
     QueueManager queueManager = scheduler.getQueueManager();
     QueueManager queueManager = scheduler.getQueueManager();
     Collection<FSLeafQueue> queues = queueManager.getLeafQueues();
     Collection<FSLeafQueue> queues = queueManager.getLeafQueues();
@@ -333,10 +341,16 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true);
     FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true);
     assertEquals(capacity / 2, queue1.getFairShare().getMemory());
     assertEquals(capacity / 2, queue1.getFairShare().getMemory());
     assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB());
     assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB());
+    assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemory());
+    assertEquals(capacity / 2, queue1.getMetrics().getSteadyFairShareMB());
     assertEquals(capacity / 4, queue2.getFairShare().getMemory());
     assertEquals(capacity / 4, queue2.getFairShare().getMemory());
     assertEquals(capacity / 4, queue2.getMetrics().getFairShareMB());
     assertEquals(capacity / 4, queue2.getMetrics().getFairShareMB());
+    assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemory());
+    assertEquals(capacity / 4, queue2.getMetrics().getSteadyFairShareMB());
     assertEquals(capacity / 4, queue3.getFairShare().getMemory());
     assertEquals(capacity / 4, queue3.getFairShare().getMemory());
     assertEquals(capacity / 4, queue3.getMetrics().getFairShareMB());
     assertEquals(capacity / 4, queue3.getMetrics().getFairShareMB());
+    assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemory());
+    assertEquals(capacity / 4, queue3.getMetrics().getSteadyFairShareMB());
   }
   }
 
 
   @Test
   @Test
@@ -771,6 +785,9 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     createSchedulingRequest(10 * 1024, "root.default", "user3");
     createSchedulingRequest(10 * 1024, "root.default", "user3");
 
 
     scheduler.update();
     scheduler.update();
+    scheduler.getQueueManager().getRootQueue()
+        .setSteadyFairShare(scheduler.getClusterResource());
+    scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
 
 
     Collection<FSLeafQueue> leafQueues = scheduler.getQueueManager()
     Collection<FSLeafQueue> leafQueues = scheduler.getQueueManager()
         .getLeafQueues();
         .getLeafQueues();
@@ -780,12 +797,128 @@ public class TestFairScheduler extends FairSchedulerTestBase {
           || leaf.getName().equals("root.parentq.user2")) {
           || leaf.getName().equals("root.parentq.user2")) {
         // assert that the fair share is 1/4th node1's capacity
         // assert that the fair share is 1/4th node1's capacity
         assertEquals(capacity / 4, leaf.getFairShare().getMemory());
         assertEquals(capacity / 4, leaf.getFairShare().getMemory());
+        // assert that the steady fair share is 1/4th node1's capacity
+        assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemory());
         // assert weights are equal for both the user queues
         // assert weights are equal for both the user queues
         assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0);
         assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0);
       }
       }
     }
     }
   }
   }
-  
+
+  @Test
+  public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
+    out.println("<queue name=\"root\">");
+    out.println("  <schedulingPolicy>drf</schedulingPolicy>");
+    out.println("  <queue name=\"child1\">");
+    out.println("    <weight>1</weight>");
+    out.println("  </queue>");
+    out.println("  <queue name=\"child2\">");
+    out.println("    <weight>1</weight>");
+    out.println("  </queue>");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // The steady fair share for all queues should be 0
+    QueueManager queueManager = scheduler.getQueueManager();
+    assertEquals(0, queueManager.getLeafQueue("child1", false)
+        .getSteadyFairShare().getMemory());
+    assertEquals(0, queueManager.getLeafQueue("child2", false)
+        .getSteadyFairShare().getMemory());
+
+    // Add one node
+    RMNode node1 =
+        MockNodes
+            .newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+    assertEquals(6144, scheduler.getClusterResource().getMemory());
+
+    // The steady fair shares for all queues should be updated
+    assertEquals(2048, queueManager.getLeafQueue("child1", false)
+        .getSteadyFairShare().getMemory());
+    assertEquals(2048, queueManager.getLeafQueue("child2", false)
+        .getSteadyFairShare().getMemory());
+
+    // Reload the allocation configuration file
+    out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
+    out.println("<queue name=\"root\">");
+    out.println("  <schedulingPolicy>drf</schedulingPolicy>");
+    out.println("  <queue name=\"child1\">");
+    out.println("    <weight>1</weight>");
+    out.println("  </queue>");
+    out.println("  <queue name=\"child2\">");
+    out.println("    <weight>2</weight>");
+    out.println("  </queue>");
+    out.println("  <queue name=\"child3\">");
+    out.println("    <weight>2</weight>");
+    out.println("  </queue>");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // The steady fair shares for all queues should be updated
+    assertEquals(1024, queueManager.getLeafQueue("child1", false)
+        .getSteadyFairShare().getMemory());
+    assertEquals(2048, queueManager.getLeafQueue("child2", false)
+        .getSteadyFairShare().getMemory());
+    assertEquals(2048, queueManager.getLeafQueue("child3", false)
+        .getSteadyFairShare().getMemory());
+
+    // Remove the node, steady fair shares should back to 0
+    NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent2);
+    assertEquals(0, scheduler.getClusterResource().getMemory());
+    assertEquals(0, queueManager.getLeafQueue("child1", false)
+        .getSteadyFairShare().getMemory());
+    assertEquals(0, queueManager.getLeafQueue("child2", false)
+        .getSteadyFairShare().getMemory());
+  }
+
+  @Test
+  public void testSteadyFairShareWithQueueCreatedRuntime() throws Exception {
+    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        SimpleGroupsMapping.class, GroupMappingServiceProvider.class);
+    conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add one node
+    RMNode node1 =
+        MockNodes
+            .newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+    assertEquals(6144, scheduler.getClusterResource().getMemory());
+    assertEquals(6144, scheduler.getQueueManager().getRootQueue()
+        .getSteadyFairShare().getMemory());
+    assertEquals(6144, scheduler.getQueueManager()
+        .getLeafQueue("default", false).getSteadyFairShare().getMemory());
+
+    // Submit one application
+    ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1);
+    createApplicationWithAMResource(appAttemptId1, "default", "user1", null);
+    assertEquals(3072, scheduler.getQueueManager()
+        .getLeafQueue("default", false).getSteadyFairShare().getMemory());
+    assertEquals(3072, scheduler.getQueueManager()
+        .getLeafQueue("user1", false).getSteadyFairShare().getMemory());
+  }
+
   /**
   /**
    * Make allocation requests and ensure they are reflected in queue demand.
    * Make allocation requests and ensure they are reflected in queue demand.
    */
    */
@@ -873,7 +1006,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
   }
   }
 
 
   @Test
   @Test
-  public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAXException, 
+  public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAXException,
       AllocationConfigurationException, ParserConfigurationException {
       AllocationConfigurationException, ParserConfigurationException {
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
 
 

+ 60 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java

@@ -109,13 +109,15 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
 
 
     for (FSLeafQueue leaf : leafQueues) {
     for (FSLeafQueue leaf : leafQueues) {
       if (leaf.getName().startsWith("root.parentA")) {
       if (leaf.getName().startsWith("root.parentA")) {
-        assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity
-            * 100, 0);
+        assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity,
+            0);
       } else if (leaf.getName().startsWith("root.parentB")) {
       } else if (leaf.getName().startsWith("root.parentB")) {
-        assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity
-            * 100, 0.1);
+        assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity,
+            0);
       }
       }
     }
     }
+
+    verifySteadyFairShareMemory(leafQueues, nodeCapacity);
   }
   }
 
 
   @Test
   @Test
@@ -135,14 +137,15 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
         100,
         100,
         (double) scheduler.getQueueManager()
         (double) scheduler.getQueueManager()
             .getLeafQueue("root.parentA.childA1", false).getFairShare()
             .getLeafQueue("root.parentA.childA1", false).getFairShare()
-            .getMemory()
-            / nodeCapacity * 100, 0.1);
+            .getMemory() / nodeCapacity * 100, 0.1);
     assertEquals(
     assertEquals(
         0,
         0,
         (double) scheduler.getQueueManager()
         (double) scheduler.getQueueManager()
             .getLeafQueue("root.parentA.childA2", false).getFairShare()
             .getLeafQueue("root.parentA.childA2", false).getFairShare()
-            .getMemory()
-            / nodeCapacity * 100, 0.1);
+            .getMemory() / nodeCapacity, 0.1);
+
+    verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
+        nodeCapacity);
   }
   }
 
 
   @Test
   @Test
@@ -167,6 +170,9 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
               .getMemory()
               .getMemory()
               / nodeCapacity * 100, .9);
               / nodeCapacity * 100, .9);
     }
     }
+
+    verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
+        nodeCapacity);
   }
   }
 
 
   @Test
   @Test
@@ -206,6 +212,9 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
             .getLeafQueue("root.parentB.childB1", false).getFairShare()
             .getLeafQueue("root.parentB.childB1", false).getFairShare()
             .getMemory()
             .getMemory()
             / nodeCapacity * 100, .9);
             / nodeCapacity * 100, .9);
+
+    verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
+        nodeCapacity);
   }
   }
 
 
   @Test
   @Test
@@ -253,6 +262,9 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
             .getLeafQueue("root.parentA.childA2", false).getFairShare()
             .getLeafQueue("root.parentA.childA2", false).getFairShare()
             .getMemory()
             .getMemory()
             / nodeCapacity * 100, 0.1);
             / nodeCapacity * 100, 0.1);
+
+    verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
+        nodeCapacity);
   }
   }
 
 
   @Test
   @Test
@@ -304,5 +316,45 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase {
             .getLeafQueue("root.parentB.childB1", false).getFairShare()
             .getLeafQueue("root.parentB.childB1", false).getFairShare()
             .getVirtualCores()
             .getVirtualCores()
             / nodeVCores * 100, .9);
             / nodeVCores * 100, .9);
+    Collection<FSLeafQueue> leafQueues = scheduler.getQueueManager()
+        .getLeafQueues();
+
+    for (FSLeafQueue leaf : leafQueues) {
+      if (leaf.getName().startsWith("root.parentA")) {
+        assertEquals(0.2,
+            (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001);
+        assertEquals(0.2,
+            (double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,
+            0.001);
+      } else if (leaf.getName().startsWith("root.parentB")) {
+        assertEquals(0.05,
+            (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001);
+        assertEquals(0.1,
+            (double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,
+            0.001);
+      }
+    }
+  }
+
+  /**
+   * Verify whether steady fair shares for all leaf queues still follow
+   * their weight, not related to active/inactive status.
+   *
+   * @param leafQueues
+   * @param nodeCapacity
+   */
+  private void verifySteadyFairShareMemory(Collection<FSLeafQueue> leafQueues,
+      int nodeCapacity) {
+    for (FSLeafQueue leaf : leafQueues) {
+      if (leaf.getName().startsWith("root.parentA")) {
+        assertEquals(0.2,
+            (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity,
+            0.001);
+      } else if (leaf.getName().startsWith("root.parentB")) {
+        assertEquals(0.05,
+            (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity,
+            0.001);
+      }
+    }
   }
   }
 }
 }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java

@@ -284,7 +284,7 @@ public class TestRMWebServices extends JerseyTest {
       Exception {
       Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("clusterInfo");
     JSONObject info = json.getJSONObject("clusterInfo");
-    assertEquals("incorrect number of elements", 10, info.length());
+    assertEquals("incorrect number of elements", 11, info.length());
     verifyClusterGeneric(info.getLong("id"), info.getLong("startedOn"),
     verifyClusterGeneric(info.getLong("id"), info.getLong("startedOn"),
         info.getString("state"), info.getString("haState"),
         info.getString("state"), info.getString("haState"),
         info.getString("hadoopVersionBuiltOn"),
         info.getString("hadoopVersionBuiltOn"),