浏览代码

Preparing for hadoop-2.1.0-alpha release.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.1.0-alpha@1362186 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 13 年之前
父节点
当前提交
22ccc73f01
共有 100 个文件被更改,包括 2118 次插入1110 次删除
  1. 33 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 9 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  3. 1 1
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml
  5. 203 70
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  6. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  7. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  8. 8 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  9. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  10. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  11. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  12. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  13. 22 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  14. 47 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  15. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
  16. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  17. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  18. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  19. 9 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  20. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  21. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java
  22. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
  23. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
  24. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  25. 18 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
  26. 165 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
  27. 60 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java
  28. 310 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
  29. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
  30. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  31. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
  32. 124 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  33. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  34. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
  35. 14 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
  36. 95 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
  37. 10 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  38. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
  39. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
  40. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  41. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  42. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
  43. 52 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
  44. 1 1
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
  45. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
  46. 73 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  47. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
  48. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
  49. 14 13
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
  50. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
  51. 6 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
  52. 9 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
  53. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
  54. 4 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
  55. 7 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java
  56. 23 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
  57. 55 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
  58. 46 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
  59. 4 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java
  60. 5 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/Timer.java
  61. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
  62. 5 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
  63. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
  64. 135 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
  65. 125 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleQuantiles.java
  66. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
  67. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
  68. 2 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
  69. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
  70. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
  71. 9 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestMultithreadedTestUtil.java
  72. 49 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
  73. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
  74. 11 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
  75. 5 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java
  76. 3 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java
  77. 1 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java
  78. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
  79. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm
  80. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
  81. 17 16
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java
  82. 10 8
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java
  83. 11 10
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java
  84. 12 10
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java
  85. 65 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  86. 12 7
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  87. 42 16
      hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
  88. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
  89. 21 36
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
  90. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
  91. 0 312
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build-contrib.xml
  92. 0 87
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build.xml
  93. 0 18
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/global_footer.mk
  94. 0 51
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/global_header.mk
  95. 0 71
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/ivy.xml
  96. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/ivy/libraries.properties
  97. 0 122
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_connect.c
  98. 7 27
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  99. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  100. 15 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

+ 33 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -63,6 +63,14 @@ Release 2.0.1-alpha - UNRELEASED
     HADOOP-8533. Remove parallel call ununsed capability in RPC.
     HADOOP-8533. Remove parallel call ununsed capability in RPC.
     (Brandon Li via suresh)
     (Brandon Li via suresh)
 
 
+    HADOOP-8423. MapFile.Reader.get() crashes jvm or throws
+    EOFException on Snappy or LZO block-compressed data
+    (todd via harsh)
+
+    HADOOP-8541. Better high-percentile latency metrics. (Andrew Wang via atm)
+
+    HADOOP-8362. Improve exception message when Configuration.set() is called with a null key or value. (Madhukara Phatak and Suresh Srinivas via harsh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -137,6 +145,19 @@ Release 2.0.1-alpha - UNRELEASED
     HADOOP-8566. AvroReflectSerializer.accept(Class) throws a NPE if the class has no 
     HADOOP-8566. AvroReflectSerializer.accept(Class) throws a NPE if the class has no 
     package (primitive types and arrays). (tucu)
     package (primitive types and arrays). (tucu)
 
 
+    HADOOP-8586. Fixup a bunch of SPNEGO misspellings. (eli)
+
+    HADOOP-3886. Error in javadoc of Reporter, Mapper and Progressable
+    (Jingguo Yao via harsh)
+
+    HADOOP-8587. HarFileSystem access of harMetaCache isn't threadsafe. (eli)
+
+    HADOOP-8585. Fix initialization circularity between UserGroupInformation
+    and HadoopConfiguration. (Colin Patrick McCabe via atm)
+
+    HADOOP-8552. Conflict: Same security.log.file for multiple users. 
+    (kkambatl via tucu)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -348,6 +369,13 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
     HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
     methods that don't take a Path argument. (eli)
     methods that don't take a Path argument. (eli)
 
 
+    HADOOP-7818. DiskChecker#checkDir should fail if the directory is
+    not executable. (Madhukara Phatak via harsh)
+
+    HADOOP-8531. SequenceFile Writer can throw out a better error if a
+    serializer or deserializer isn't available
+    (Madhukara Phatak via harsh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh)
     HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh)
@@ -561,6 +589,8 @@ Release 0.23.3 - UNRELEASED
 
 
     HADOOP-8110. Fix trash checkpoint collisions (Jason Lowe via daryn)
     HADOOP-8110. Fix trash checkpoint collisions (Jason Lowe via daryn)
 
 
+    HADOOP-8525. Provide Improved Traceability for Configuration (bobby)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -615,6 +645,9 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8129. ViewFileSystemTestSetup setupForViewFileSystem is erring
     HADOOP-8129. ViewFileSystemTestSetup setupForViewFileSystem is erring
     (Ahmed Radwan and Ravi Prakash via bobby)
     (Ahmed Radwan and Ravi Prakash via bobby)
 
 
+    HADOOP-8573. Configuration tries to read from an inputstream resource 
+    multiple times (Robert Evans via tgraves)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   NEW FEATURES
   NEW FEATURES

+ 9 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -295,4 +295,13 @@
       <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
     </Match>
     </Match>
 
 
+    <!--
+       Manually checked, misses child thread manually syncing on parent's intrinsic lock.
+    -->
+     <Match>
+       <Class name="org.apache.hadoop.metrics2.lib.MutableQuantiles" />
+       <Field name="previousSnapshot" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
+
  </FindBugsFilter>
  </FindBugsFilter>

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -106,7 +106,7 @@ hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
 log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
 log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml

@@ -110,7 +110,7 @@
 
 
       <p><code>hadoop.http.authentication.kerberos.principal</code>: Indicates the Kerberos 
       <p><code>hadoop.http.authentication.kerberos.principal</code>: Indicates the Kerberos 
       principal to be used for HTTP endpoint when using 'kerberos' authentication.
       principal to be used for HTTP endpoint when using 'kerberos' authentication.
-      The principal short name must be <code>HTTP</code> per Kerberos HTTP SPENGO specification.
+      The principal short name must be <code>HTTP</code> per Kerberos HTTP SPNEGO specification.
       The default value is <code>HTTP/_HOST@$LOCALHOST</code>, where <code>_HOST</code> -if present-
       The default value is <code>HTTP/_HOST@$LOCALHOST</code>, where <code>_HOST</code> -if present-
       is replaced with bind address of the HTTP server.
       is replaced with bind address of the HTTP server.
       </p>
       </p>

+ 203 - 70
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -40,9 +40,11 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.ListIterator;
 import java.util.Map;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.Set;
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.StringTokenizer;
@@ -75,7 +77,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.jackson.JsonGenerator;
-import org.w3c.dom.Comment;
 import org.w3c.dom.DOMException;
 import org.w3c.dom.DOMException;
 import org.w3c.dom.Document;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Element;
@@ -83,6 +84,7 @@ import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.w3c.dom.NodeList;
 import org.w3c.dom.Text;
 import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
 import org.xml.sax.SAXException;
+import com.google.common.base.Preconditions;
 
 
 /** 
 /** 
  * Provides access to configuration parameters.
  * Provides access to configuration parameters.
@@ -158,17 +160,45 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
 
 
   private boolean quietmode = true;
   private boolean quietmode = true;
   
   
+  private static class Resource {
+    private final Object resource;
+    private final String name;
+    
+    public Resource(Object resource) {
+      this(resource, resource.toString());
+    }
+    
+    public Resource(Object resource, String name) {
+      this.resource = resource;
+      this.name = name;
+    }
+    
+    public String getName(){
+      return name;
+    }
+    
+    public Object getResource() {
+      return resource;
+    }
+    
+    @Override
+    public String toString() {
+      return name;
+    }
+  }
+  
   /**
   /**
    * List of configuration resources.
    * List of configuration resources.
    */
    */
-  private ArrayList<Object> resources = new ArrayList<Object>();
-
+  private ArrayList<Resource> resources = new ArrayList<Resource>();
+  
   /**
   /**
    * The value reported as the setting resource when a key is set
    * The value reported as the setting resource when a key is set
-   * by code rather than a file resource.
+   * by code rather than a file resource by dumpConfiguration.
    */
    */
   static final String UNKNOWN_RESOURCE = "Unknown";
   static final String UNKNOWN_RESOURCE = "Unknown";
 
 
+
   /**
   /**
    * List of configuration parameters marked <b>final</b>. 
    * List of configuration parameters marked <b>final</b>. 
    */
    */
@@ -202,7 +232,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Stores the mapping of key to the resource which modifies or loads 
    * Stores the mapping of key to the resource which modifies or loads 
    * the key most recently
    * the key most recently
    */
    */
-  private HashMap<String, String> updatingResource;
+  private HashMap<String, String[]> updatingResource;
  
  
   /**
   /**
    * Class to keep the information about the keys which replace the deprecated
    * Class to keep the information about the keys which replace the deprecated
@@ -369,7 +399,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return alternate name.
    * @return alternate name.
    */
    */
   private String[] getAlternateNames(String name) {
   private String[] getAlternateNames(String name) {
-    String oldName, altNames[] = null;
+    String altNames[] = null;
     DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
     DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
     if (keyInfo == null) {
     if (keyInfo == null) {
       altNames = (reverseDeprecatedKeyMap.get(name) != null ) ? 
       altNames = (reverseDeprecatedKeyMap.get(name) != null ) ? 
@@ -485,7 +515,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    */
    */
   public Configuration(boolean loadDefaults) {
   public Configuration(boolean loadDefaults) {
     this.loadDefaults = loadDefaults;
     this.loadDefaults = loadDefaults;
-    updatingResource = new HashMap<String, String>();
+    updatingResource = new HashMap<String, String[]>();
     synchronized(Configuration.class) {
     synchronized(Configuration.class) {
       REGISTRY.put(this, null);
       REGISTRY.put(this, null);
     }
     }
@@ -498,7 +528,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    */
    */
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   public Configuration(Configuration other) {
   public Configuration(Configuration other) {
-   this.resources = (ArrayList)other.resources.clone();
+   this.resources = (ArrayList<Resource>) other.resources.clone();
    synchronized(other) {
    synchronized(other) {
      if (other.properties != null) {
      if (other.properties != null) {
        this.properties = (Properties)other.properties.clone();
        this.properties = (Properties)other.properties.clone();
@@ -508,7 +538,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
        this.overlay = (Properties)other.overlay.clone();
        this.overlay = (Properties)other.overlay.clone();
      }
      }
 
 
-     this.updatingResource = new HashMap<String, String>(other.updatingResource);
+     this.updatingResource = new HashMap<String, String[]>(other.updatingResource);
    }
    }
    
    
     this.finalParameters = new HashSet<String>(other.finalParameters);
     this.finalParameters = new HashSet<String>(other.finalParameters);
@@ -546,7 +576,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *             with that name.
    *             with that name.
    */
    */
   public void addResource(String name) {
   public void addResource(String name) {
-    addResourceObject(name);
+    addResourceObject(new Resource(name));
   }
   }
 
 
   /**
   /**
@@ -560,7 +590,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *            the classpath.
    *            the classpath.
    */
    */
   public void addResource(URL url) {
   public void addResource(URL url) {
-    addResourceObject(url);
+    addResourceObject(new Resource(url));
   }
   }
 
 
   /**
   /**
@@ -574,7 +604,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *             the classpath.
    *             the classpath.
    */
    */
   public void addResource(Path file) {
   public void addResource(Path file) {
-    addResourceObject(file);
+    addResourceObject(new Resource(file));
   }
   }
 
 
   /**
   /**
@@ -583,10 +613,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * The properties of this resource will override properties of previously 
    * The properties of this resource will override properties of previously 
    * added resources, unless they were marked <a href="#Final">final</a>. 
    * added resources, unless they were marked <a href="#Final">final</a>. 
    * 
    * 
-   * @param in InputStream to deserialize the object from. 
+   * WARNING: The contents of the InputStream will be cached, by this method. 
+   * So use this sparingly because it does increase the memory consumption.
+   * 
+   * @param in InputStream to deserialize the object from. In will be read from
+   * when a get or set is called next.  After it is read the stream will be
+   * closed. 
    */
    */
   public void addResource(InputStream in) {
   public void addResource(InputStream in) {
-    addResourceObject(in);
+    addResourceObject(new Resource(in));
+  }
+
+  /**
+   * Add a configuration resource. 
+   * 
+   * The properties of this resource will override properties of previously 
+   * added resources, unless they were marked <a href="#Final">final</a>. 
+   * 
+   * @param in InputStream to deserialize the object from.
+   * @param name the name of the resource because InputStream.toString is not
+   * very descriptive some times.  
+   */
+  public void addResource(InputStream in, String name) {
+    addResourceObject(new Resource(in, name));
   }
   }
   
   
   
   
@@ -603,7 +652,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     finalParameters.clear();                      // clear site-limits
     finalParameters.clear();                      // clear site-limits
   }
   }
   
   
-  private synchronized void addResourceObject(Object resource) {
+  private synchronized void addResourceObject(Resource resource) {
     resources.add(resource);                      // add to resources
     resources.add(resource);                      // add to resources
     reloadConfiguration();
     reloadConfiguration();
   }
   }
@@ -715,17 +764,46 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param value property value.
    * @param value property value.
    */
    */
   public void set(String name, String value) {
   public void set(String name, String value) {
+    set(name, value, null);
+  }
+  
+  /** 
+   * Set the <code>value</code> of the <code>name</code> property. If 
+   * <code>name</code> is deprecated or there is a deprecated name associated to it,
+   * it sets the value to both names.
+   * 
+   * @param name property name.
+   * @param value property value.
+   * @param source the place that this configuration value came from 
+   * (For debugging).
+   * @throws IllegalArgumentException when the value or name is null.
+   */
+  public void set(String name, String value, String source) {
+    Preconditions.checkArgument(
+        name != null,
+        "Property name must not be null");
+    Preconditions.checkArgument(
+        value != null,
+        "Property value must not be null");
     if (deprecatedKeyMap.isEmpty()) {
     if (deprecatedKeyMap.isEmpty()) {
       getProps();
       getProps();
     }
     }
     getOverlay().setProperty(name, value);
     getOverlay().setProperty(name, value);
     getProps().setProperty(name, value);
     getProps().setProperty(name, value);
-    updatingResource.put(name, UNKNOWN_RESOURCE);
+    if(source == null) {
+      updatingResource.put(name, new String[] {"programatically"});
+    } else {
+      updatingResource.put(name, new String[] {source});
+    }
     String[] altNames = getAlternateNames(name);
     String[] altNames = getAlternateNames(name);
     if (altNames != null && altNames.length > 0) {
     if (altNames != null && altNames.length > 0) {
+      String altSource = "because " + name + " is deprecated";
       for(String altName : altNames) {
       for(String altName : altNames) {
-    	getOverlay().setProperty(altName, value);
-        getProps().setProperty(altName, value);
+        if(!altName.equals(name)) {
+          getOverlay().setProperty(altName, value);
+          getProps().setProperty(altName, value);
+          updatingResource.put(altName, new String[] {altSource});
+        }
       }
       }
     }
     }
     warnOnceIfDeprecated(name);
     warnOnceIfDeprecated(name);
@@ -1035,17 +1113,22 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   }
   }
 
 
   /**
   /**
-   * Gets the absolute path to the resource object (file, URL, etc.), for a given
-   * property name.
+   * Gets information about why a property was set.  Typically this is the 
+   * path to the resource objects (file, URL, etc.) the property came from, but
+   * it can also indicate that it was set programatically, or because of the
+   * command line.
    *
    *
    * @param name - The property name to get the source of.
    * @param name - The property name to get the source of.
-   * @return null - If the property or its source wasn't found or if the property
-   * was defined in code (i.e. in a Configuration instance, not from a physical
-   * resource). Otherwise, returns the absolute path of the resource that loaded
-   * the property name, as a String.
+   * @return null - If the property or its source wasn't found. Otherwise, 
+   * returns a list of the sources of the resource.  The older sources are
+   * the first ones in the list.  So for example if a configuration is set from
+   * the command line, and then written out to a file that is read back in the
+   * first entry would indicate that it was set from the command line, while
+   * the second one would indicate the file that the new configuration was read
+   * in from.
    */
    */
   @InterfaceStability.Unstable
   @InterfaceStability.Unstable
-  public synchronized String getPropertySource(String name) {
+  public synchronized String[] getPropertySources(String name) {
     if (properties == null) {
     if (properties == null) {
       // If properties is null, it means a resource was newly added
       // If properties is null, it means a resource was newly added
       // but the props were cleared so as to load it upon future
       // but the props were cleared so as to load it upon future
@@ -1057,11 +1140,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     if (properties == null || updatingResource == null) {
     if (properties == null || updatingResource == null) {
       return null;
       return null;
     } else {
     } else {
-      String source = updatingResource.get(name);
-      if (source == null || source.equals(UNKNOWN_RESOURCE)) {
+      String[] source = updatingResource.get(name);
+      if(source == null) {
         return null;
         return null;
       } else {
       } else {
-        return source;
+        return Arrays.copyOf(source, source.length);
       }
       }
     }
     }
   }
   }
@@ -1664,11 +1747,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   protected synchronized Properties getProps() {
   protected synchronized Properties getProps() {
     if (properties == null) {
     if (properties == null) {
       properties = new Properties();
       properties = new Properties();
+      HashMap<String, String[]> backup = 
+        new HashMap<String, String[]>(updatingResource);
       loadResources(properties, resources, quietmode);
       loadResources(properties, resources, quietmode);
       if (overlay!= null) {
       if (overlay!= null) {
         properties.putAll(overlay);
         properties.putAll(overlay);
         for (Map.Entry<Object,Object> item: overlay.entrySet()) {
         for (Map.Entry<Object,Object> item: overlay.entrySet()) {
-          updatingResource.put((String) item.getKey(), UNKNOWN_RESOURCE);
+          String key = (String)item.getKey();
+          updatingResource.put(key, backup.get(key));
         }
         }
       }
       }
     }
     }
@@ -1714,26 +1800,33 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   }
   }
 
 
   private void loadResources(Properties properties,
   private void loadResources(Properties properties,
-                             ArrayList resources,
+                             ArrayList<Resource> resources,
                              boolean quiet) {
                              boolean quiet) {
     if(loadDefaults) {
     if(loadDefaults) {
       for (String resource : defaultResources) {
       for (String resource : defaultResources) {
-        loadResource(properties, resource, quiet);
+        loadResource(properties, new Resource(resource), quiet);
       }
       }
     
     
       //support the hadoop-site.xml as a deprecated case
       //support the hadoop-site.xml as a deprecated case
       if(getResource("hadoop-site.xml")!=null) {
       if(getResource("hadoop-site.xml")!=null) {
-        loadResource(properties, "hadoop-site.xml", quiet);
+        loadResource(properties, new Resource("hadoop-site.xml"), quiet);
       }
       }
     }
     }
     
     
-    for (Object resource : resources) {
-      loadResource(properties, resource, quiet);
+    for (int i = 0; i < resources.size(); i++) {
+      Resource ret = loadResource(properties, resources.get(i), quiet);
+      if (ret != null) {
+        resources.set(i, ret);
+      }
     }
     }
   }
   }
   
   
-  private void loadResource(Properties properties, Object name, boolean quiet) {
+  private Resource loadResource(Properties properties, Resource wrapper, boolean quiet) {
+    String name = UNKNOWN_RESOURCE;
     try {
     try {
+      Object resource = wrapper.getResource();
+      name = wrapper.getName();
+      
       DocumentBuilderFactory docBuilderFactory 
       DocumentBuilderFactory docBuilderFactory 
         = DocumentBuilderFactory.newInstance();
         = DocumentBuilderFactory.newInstance();
       //ignore all comments inside the xml file
       //ignore all comments inside the xml file
@@ -1752,27 +1845,28 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
       DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
       Document doc = null;
       Document doc = null;
       Element root = null;
       Element root = null;
-
-      if (name instanceof URL) {                  // an URL resource
-        URL url = (URL)name;
+      boolean returnCachedProperties = false;
+      
+      if (resource instanceof URL) {                  // an URL resource
+        URL url = (URL)resource;
         if (url != null) {
         if (url != null) {
           if (!quiet) {
           if (!quiet) {
             LOG.info("parsing " + url);
             LOG.info("parsing " + url);
           }
           }
           doc = builder.parse(url.toString());
           doc = builder.parse(url.toString());
         }
         }
-      } else if (name instanceof String) {        // a CLASSPATH resource
-        URL url = getResource((String)name);
+      } else if (resource instanceof String) {        // a CLASSPATH resource
+        URL url = getResource((String)resource);
         if (url != null) {
         if (url != null) {
           if (!quiet) {
           if (!quiet) {
             LOG.info("parsing " + url);
             LOG.info("parsing " + url);
           }
           }
           doc = builder.parse(url.toString());
           doc = builder.parse(url.toString());
         }
         }
-      } else if (name instanceof Path) {          // a file resource
+      } else if (resource instanceof Path) {          // a file resource
         // Can't use FileSystem API or we get an infinite loop
         // Can't use FileSystem API or we get an infinite loop
         // since FileSystem uses Configuration API.  Use java.io.File instead.
         // since FileSystem uses Configuration API.  Use java.io.File instead.
-        File file = new File(((Path)name).toUri().getPath())
+        File file = new File(((Path)resource).toUri().getPath())
           .getAbsoluteFile();
           .getAbsoluteFile();
         if (file.exists()) {
         if (file.exists()) {
           if (!quiet) {
           if (!quiet) {
@@ -1785,25 +1879,32 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
             in.close();
             in.close();
           }
           }
         }
         }
-      } else if (name instanceof InputStream) {
+      } else if (resource instanceof InputStream) {
         try {
         try {
-          doc = builder.parse((InputStream)name);
+          doc = builder.parse((InputStream)resource);
+          returnCachedProperties = true;
         } finally {
         } finally {
-          ((InputStream)name).close();
+          ((InputStream)resource).close();
         }
         }
-      } else if (name instanceof Element) {
-        root = (Element)name;
+      } else if (resource instanceof Properties) {
+        overlay(properties, (Properties)resource);
+      } else if (resource instanceof Element) {
+        root = (Element)resource;
       }
       }
 
 
       if (doc == null && root == null) {
       if (doc == null && root == null) {
         if (quiet)
         if (quiet)
-          return;
-        throw new RuntimeException(name + " not found");
+          return null;
+        throw new RuntimeException(resource + " not found");
       }
       }
 
 
       if (root == null) {
       if (root == null) {
         root = doc.getDocumentElement();
         root = doc.getDocumentElement();
       }
       }
+      Properties toAddTo = properties;
+      if(returnCachedProperties) {
+        toAddTo = new Properties();
+      }
       if (!"configuration".equals(root.getTagName()))
       if (!"configuration".equals(root.getTagName()))
         LOG.fatal("bad conf file: top-level element not <configuration>");
         LOG.fatal("bad conf file: top-level element not <configuration>");
       NodeList props = root.getChildNodes();
       NodeList props = root.getChildNodes();
@@ -1813,7 +1914,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           continue;
           continue;
         Element prop = (Element)propNode;
         Element prop = (Element)propNode;
         if ("configuration".equals(prop.getTagName())) {
         if ("configuration".equals(prop.getTagName())) {
-          loadResource(properties, prop, quiet);
+          loadResource(toAddTo, new Resource(prop, name), quiet);
           continue;
           continue;
         }
         }
         if (!"property".equals(prop.getTagName()))
         if (!"property".equals(prop.getTagName()))
@@ -1822,6 +1923,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         String attr = null;
         String attr = null;
         String value = null;
         String value = null;
         boolean finalParameter = false;
         boolean finalParameter = false;
+        LinkedList<String> source = new LinkedList<String>();
         for (int j = 0; j < fields.getLength(); j++) {
         for (int j = 0; j < fields.getLength(); j++) {
           Node fieldNode = fields.item(j);
           Node fieldNode = fields.item(j);
           if (!(fieldNode instanceof Element))
           if (!(fieldNode instanceof Element))
@@ -1833,7 +1935,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
             value = ((Text)field.getFirstChild()).getData();
             value = ((Text)field.getFirstChild()).getData();
           if ("final".equals(field.getTagName()) && field.hasChildNodes())
           if ("final".equals(field.getTagName()) && field.hasChildNodes())
             finalParameter = "true".equals(((Text)field.getFirstChild()).getData());
             finalParameter = "true".equals(((Text)field.getFirstChild()).getData());
+          if ("source".equals(field.getTagName()) && field.hasChildNodes())
+            source.add(((Text)field.getFirstChild()).getData());
         }
         }
+        source.add(name);
         
         
         // Ignore this parameter if it has already been marked as 'final'
         // Ignore this parameter if it has already been marked as 'final'
         if (attr != null) {
         if (attr != null) {
@@ -1842,36 +1947,49 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
             keyInfo.accessed = false;
             keyInfo.accessed = false;
             for (String key:keyInfo.newKeys) {
             for (String key:keyInfo.newKeys) {
               // update new keys with deprecated key's value 
               // update new keys with deprecated key's value 
-              loadProperty(properties, name, key, value, finalParameter);
+              loadProperty(toAddTo, name, key, value, finalParameter, 
+                  source.toArray(new String[source.size()]));
             }
             }
           }
           }
           else {
           else {
-            loadProperty(properties, name, attr, value, finalParameter);
+            loadProperty(toAddTo, name, attr, value, finalParameter, 
+                source.toArray(new String[source.size()]));
           }
           }
         }
         }
       }
       }
-        
+      
+      if (returnCachedProperties) {
+        overlay(properties, toAddTo);
+        return new Resource(toAddTo, name);
+      }
+      return null;
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.fatal("error parsing conf file: " + e);
+      LOG.fatal("error parsing conf " + name, e);
       throw new RuntimeException(e);
       throw new RuntimeException(e);
     } catch (DOMException e) {
     } catch (DOMException e) {
-      LOG.fatal("error parsing conf file: " + e);
+      LOG.fatal("error parsing conf " + name, e);
       throw new RuntimeException(e);
       throw new RuntimeException(e);
     } catch (SAXException e) {
     } catch (SAXException e) {
-      LOG.fatal("error parsing conf file: " + e);
+      LOG.fatal("error parsing conf " + name, e);
       throw new RuntimeException(e);
       throw new RuntimeException(e);
     } catch (ParserConfigurationException e) {
     } catch (ParserConfigurationException e) {
-      LOG.fatal("error parsing conf file: " + e);
+      LOG.fatal("error parsing conf " + name , e);
       throw new RuntimeException(e);
       throw new RuntimeException(e);
     }
     }
   }
   }
 
 
-  private void loadProperty(Properties properties, Object name, String attr,
-      String value, boolean finalParameter) {
+  private void overlay(Properties to, Properties from) {
+    for (Entry<Object, Object> entry: from.entrySet()) {
+      to.put(entry.getKey(), entry.getValue());
+    }
+  }
+  
+  private void loadProperty(Properties properties, String name, String attr,
+      String value, boolean finalParameter, String[] source) {
     if (value != null) {
     if (value != null) {
       if (!finalParameters.contains(attr)) {
       if (!finalParameters.contains(attr)) {
         properties.setProperty(attr, value);
         properties.setProperty(attr, value);
-        updatingResource.put(attr, name.toString());
+        updatingResource.put(attr, source);
       } else {
       } else {
         LOG.warn(name+":an attempt to override final parameter: "+attr
         LOG.warn(name+":an attempt to override final parameter: "+attr
             +";  Ignoring.");
             +";  Ignoring.");
@@ -1943,11 +2061,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       Element propNode = doc.createElement("property");
       Element propNode = doc.createElement("property");
       conf.appendChild(propNode);
       conf.appendChild(propNode);
 
 
-      if (updatingResource != null) {
-        Comment commentNode = doc.createComment(
-          "Loaded from " + updatingResource.get(name));
-        propNode.appendChild(commentNode);
-      }
       Element nameNode = doc.createElement("name");
       Element nameNode = doc.createElement("name");
       nameNode.appendChild(doc.createTextNode(name));
       nameNode.appendChild(doc.createTextNode(name));
       propNode.appendChild(nameNode);
       propNode.appendChild(nameNode);
@@ -1956,6 +2069,17 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       valueNode.appendChild(doc.createTextNode(value));
       valueNode.appendChild(doc.createTextNode(value));
       propNode.appendChild(valueNode);
       propNode.appendChild(valueNode);
 
 
+      if (updatingResource != null) {
+        String[] sources = updatingResource.get(name);
+        if(sources != null) {
+          for(String s : sources) {
+            Element sourceNode = doc.createElement("source");
+            sourceNode.appendChild(doc.createTextNode(s));
+            propNode.appendChild(sourceNode);
+          }
+        }
+      }
+      
       conf.appendChild(doc.createTextNode("\n"));
       conf.appendChild(doc.createTextNode("\n"));
     }
     }
     return doc;
     return doc;
@@ -1988,8 +2112,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
                                        config.get((String) item.getKey()));
                                        config.get((String) item.getKey()));
         dumpGenerator.writeBooleanField("isFinal",
         dumpGenerator.writeBooleanField("isFinal",
                                         config.finalParameters.contains(item.getKey()));
                                         config.finalParameters.contains(item.getKey()));
-        dumpGenerator.writeStringField("resource",
-                                       config.updatingResource.get(item.getKey()));
+        String[] resources = config.updatingResource.get(item.getKey());
+        String resource = UNKNOWN_RESOURCE;
+        if(resources != null && resources.length > 0) {
+          resource = resources[0];
+        }
+        dumpGenerator.writeStringField("resource", resource);
         dumpGenerator.writeEndObject();
         dumpGenerator.writeEndObject();
       }
       }
     }
     }
@@ -2029,7 +2157,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     toString(resources, sb);
     toString(resources, sb);
     return sb.toString();
     return sb.toString();
   }
   }
-
+  
   private <T> void toString(List<T> resources, StringBuilder sb) {
   private <T> void toString(List<T> resources, StringBuilder sb) {
     ListIterator<T> i = resources.listIterator();
     ListIterator<T> i = resources.listIterator();
     while (i.hasNext()) {
     while (i.hasNext()) {
@@ -2066,8 +2194,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     clear();
     clear();
     int size = WritableUtils.readVInt(in);
     int size = WritableUtils.readVInt(in);
     for(int i=0; i < size; ++i) {
     for(int i=0; i < size; ++i) {
-      set(org.apache.hadoop.io.Text.readString(in), 
-          org.apache.hadoop.io.Text.readString(in));
+      String key = org.apache.hadoop.io.Text.readString(in);
+      String value = org.apache.hadoop.io.Text.readString(in);
+      set(key, value); 
+      String sources[] = WritableUtils.readCompressedStringArray(in);
+      updatingResource.put(key, sources);
     }
     }
   }
   }
 
 
@@ -2078,6 +2209,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     for(Map.Entry<Object, Object> item: props.entrySet()) {
     for(Map.Entry<Object, Object> item: props.entrySet()) {
       org.apache.hadoop.io.Text.writeString(out, (String) item.getKey());
       org.apache.hadoop.io.Text.writeString(out, (String) item.getKey());
       org.apache.hadoop.io.Text.writeString(out, (String) item.getValue());
       org.apache.hadoop.io.Text.writeString(out, (String) item.getValue());
+      WritableUtils.writeCompressedStringArray(out, 
+          updatingResource.get(item.getKey()));
     }
     }
   }
   }
   
   

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -166,5 +166,12 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
     "hadoop.http.staticuser.user";
     "hadoop.http.staticuser.user";
   public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
   public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
     "dr.who";
     "dr.who";
+
+  /* Path to the Kerberos ticket cache.  Setting this will force
+   * UserGroupInformation to use only this ticket cache file when creating a
+   * FileSystem instance.
+   */
+  public static final String KERBEROS_TICKET_CACHE_PATH =
+      "hadoop.security.kerberos.ticket.cache.path";
 }
 }
 
 

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * A daemon thread that waits for the next file system to renew.
  * A daemon thread that waits for the next file system to renew.
@@ -62,7 +63,7 @@ public class DelegationTokenRenewer<T extends FileSystem & DelegationTokenRenewe
     /** Get the delay until this event should happen. */
     /** Get the delay until this event should happen. */
     @Override
     @Override
     public long getDelay(final TimeUnit unit) {
     public long getDelay(final TimeUnit unit) {
-      final long millisLeft = renewalTime - System.currentTimeMillis();
+      final long millisLeft = renewalTime - Time.now();
       return unit.convert(millisLeft, TimeUnit.MILLISECONDS);
       return unit.convert(millisLeft, TimeUnit.MILLISECONDS);
     }
     }
 
 
@@ -92,7 +93,7 @@ public class DelegationTokenRenewer<T extends FileSystem & DelegationTokenRenewe
      * @param newTime the new time
      * @param newTime the new time
      */
      */
     private void updateRenewalTime() {
     private void updateRenewalTime() {
-      renewalTime = RENEW_CYCLE + System.currentTimeMillis();
+      renewalTime = RENEW_CYCLE + Time.now();
     }
     }
 
 
     /**
     /**

+ 8 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -137,12 +137,10 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
    */
   public static FileSystem get(final URI uri, final Configuration conf,
   public static FileSystem get(final URI uri, final Configuration conf,
         final String user) throws IOException, InterruptedException {
         final String user) throws IOException, InterruptedException {
-    UserGroupInformation ugi;
-    if (user == null) {
-      ugi = UserGroupInformation.getCurrentUser();
-    } else {
-      ugi = UserGroupInformation.createRemoteUser(user);
-    }
+    String ticketCachePath =
+      conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
+    UserGroupInformation ugi =
+        UserGroupInformation.getBestUGI(ticketCachePath, user);
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
       public FileSystem run() throws IOException {
       public FileSystem run() throws IOException {
         return get(uri, conf);
         return get(uri, conf);
@@ -314,12 +312,10 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
    */
   public static FileSystem newInstance(final URI uri, final Configuration conf,
   public static FileSystem newInstance(final URI uri, final Configuration conf,
       final String user) throws IOException, InterruptedException {
       final String user) throws IOException, InterruptedException {
-    UserGroupInformation ugi;
-    if (user == null) {
-      ugi = UserGroupInformation.getCurrentUser();
-    } else {
-      ugi = UserGroupInformation.createRemoteUser(user);
-    }
+    String ticketCachePath =
+      conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
+    UserGroupInformation ugi =
+        UserGroupInformation.getBestUGI(ticketCachePath, user);
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
       public FileSystem run() throws IOException {
       public FileSystem run() throws IOException {
         return newInstance(uri,conf); 
         return newInstance(uri,conf); 

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -24,11 +24,11 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URLDecoder;
 import java.net.URLDecoder;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.concurrent.ConcurrentHashMap;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -52,7 +52,8 @@ import org.apache.hadoop.util.Progressable;
 public class HarFileSystem extends FilterFileSystem {
 public class HarFileSystem extends FilterFileSystem {
   public static final int VERSION = 3;
   public static final int VERSION = 3;
 
 
-  private static final Map<URI, HarMetaData> harMetaCache = new HashMap<URI, HarMetaData>();
+  private static final Map<URI, HarMetaData> harMetaCache =
+      new ConcurrentHashMap<URI, HarMetaData>();
 
 
   // uri representation of this Har filesystem
   // uri representation of this Har filesystem
   private URI uri;
   private URI uri;
@@ -1055,7 +1056,7 @@ public class HarFileSystem extends FilterFileSystem {
       FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
       FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
       archiveIndexTimestamp = archiveStat.getModificationTime();
       archiveIndexTimestamp = archiveStat.getModificationTime();
       LineReader aLin;
       LineReader aLin;
-      String retStr = null;
+
       // now start reading the real index file
       // now start reading the real index file
       for (Store s: stores) {
       for (Store s: stores) {
         read = 0;
         read = 0;

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Time;
 
 
 /** Provides a <i>trash</i> feature.  Files are moved to a user's trash
 /** Provides a <i>trash</i> feature.  Files are moved to a user's trash
  * directory, a subdirectory of their home directory named ".Trash".  Files are
  * directory, a subdirectory of their home directory named ".Trash".  Files are
@@ -136,7 +137,7 @@ public class TrashPolicyDefault extends TrashPolicy {
         String orig = trashPath.toString();
         String orig = trashPath.toString();
         
         
         while(fs.exists(trashPath)) {
         while(fs.exists(trashPath)) {
-          trashPath = new Path(orig + System.currentTimeMillis());
+          trashPath = new Path(orig + Time.now());
         }
         }
         
         
         if (fs.rename(path, trashPath))           // move to current trash
         if (fs.rename(path, trashPath))           // move to current trash
@@ -187,7 +188,7 @@ public class TrashPolicyDefault extends TrashPolicy {
       return;
       return;
     }
     }
 
 
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     for (int i = 0; i < dirs.length; i++) {
     for (int i = 0; i < dirs.length; i++) {
       Path path = dirs[i].getPath();
       Path path = dirs[i].getPath();
       String dir = path.toUri().getPath();
       String dir = path.toUri().getPath();
@@ -248,7 +249,7 @@ public class TrashPolicyDefault extends TrashPolicy {
     public void run() {
     public void run() {
       if (emptierInterval == 0)
       if (emptierInterval == 0)
         return;                                   // trash disabled
         return;                                   // trash disabled
-      long now = System.currentTimeMillis();
+      long now = Time.now();
       long end;
       long end;
       while (true) {
       while (true) {
         end = ceiling(now, emptierInterval);
         end = ceiling(now, emptierInterval);
@@ -259,7 +260,7 @@ public class TrashPolicyDefault extends TrashPolicy {
         }
         }
 
 
         try {
         try {
-          now = System.currentTimeMillis();
+          now = Time.now();
           if (now >= end) {
           if (now >= end) {
 
 
             FileStatus[] homes = null;
             FileStatus[] homes = null;

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * ViewFileSystem (extends the FileSystem interface) implements a client-side
  * ViewFileSystem (extends the FileSystem interface) implements a client-side
@@ -146,7 +147,7 @@ public class ViewFileSystem extends FileSystem {
    */
    */
   public ViewFileSystem() throws IOException {
   public ViewFileSystem() throws IOException {
     ugi = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.getCurrentUser();
-    creationTime = System.currentTimeMillis();
+    creationTime = Time.now();
   }
   }
 
 
   /**
   /**

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
 
 
 
 
 /**
 /**
@@ -192,7 +193,7 @@ public class ViewFs extends AbstractFileSystem {
   ViewFs(final URI theUri, final Configuration conf) throws IOException,
   ViewFs(final URI theUri, final Configuration conf) throws IOException,
       URISyntaxException {
       URISyntaxException {
     super(theUri, FsConstants.VIEWFS_SCHEME, false, -1);
     super(theUri, FsConstants.VIEWFS_SCHEME, false, -1);
-    creationTime = System.currentTimeMillis();
+    creationTime = Time.now();
     ugi = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.getCurrentUser();
     config = conf;
     config = conf;
     // Now build  client side view (i.e. client side mount table) from config.
     // Now build  client side view (i.e. client side mount table) from config.

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -153,6 +153,28 @@ public class IOUtils {
     }
     }
   }
   }
   
   
+  /**
+   * Utility wrapper for reading from {@link InputStream}. It catches any errors
+   * thrown by the underlying stream (either IO or decompression-related), and
+   * re-throws as an IOException.
+   * 
+   * @param is - InputStream to be read from
+   * @param buf - buffer the data is read into
+   * @param off - offset within buf
+   * @param len - amount of data to be read
+   * @return number of bytes read
+   */
+  public static int wrappedReadForCompressedData(InputStream is, byte[] buf,
+      int off, int len) throws IOException {
+    try {
+      return is.read(buf, off, len);
+    } catch (IOException ie) {
+      throw ie;
+    } catch (Throwable t) {
+      throw new IOException("Error while reading compressed data", t);
+    }
+  }
+
   /**
   /**
    * Reads len bytes in a loop.
    * Reads len bytes in a loop.
    *
    *

+ 47 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.PriorityQueue;
+import org.apache.hadoop.util.Time;
 
 
 /** 
 /** 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
@@ -835,7 +836,7 @@ public class SequenceFile {
     {
     {
       try {                                       
       try {                                       
         MessageDigest digester = MessageDigest.getInstance("MD5");
         MessageDigest digester = MessageDigest.getInstance("MD5");
-        long time = System.currentTimeMillis();
+        long time = Time.now();
         digester.update((new UID()+"@"+time).getBytes());
         digester.update((new UID()+"@"+time).getBytes());
         sync = digester.digest();
         sync = digester.digest();
       } catch (Exception e) {
       } catch (Exception e) {
@@ -1160,8 +1161,26 @@ public class SequenceFile {
       this.metadata = metadata;
       this.metadata = metadata;
       SerializationFactory serializationFactory = new SerializationFactory(conf);
       SerializationFactory serializationFactory = new SerializationFactory(conf);
       this.keySerializer = serializationFactory.getSerializer(keyClass);
       this.keySerializer = serializationFactory.getSerializer(keyClass);
+      if (this.keySerializer == null) {
+        throw new IOException(
+            "Could not find a serializer for the Key class: '"
+                + keyClass.getCanonicalName() + "'. "
+                + "Please ensure that the configuration '" +
+                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                + "properly configured, if you're using"
+                + "custom serialization.");
+      }
       this.keySerializer.open(buffer);
       this.keySerializer.open(buffer);
       this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
       this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
+      if (this.uncompressedValSerializer == null) {
+        throw new IOException(
+            "Could not find a serializer for the Value class: '"
+                + valClass.getCanonicalName() + "'. "
+                + "Please ensure that the configuration '" +
+                CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                + "properly configured, if you're using"
+                + "custom serialization.");
+      }
       this.uncompressedValSerializer.open(buffer);
       this.uncompressedValSerializer.open(buffer);
       if (this.codec != null) {
       if (this.codec != null) {
         ReflectionUtils.setConf(this.codec, this.conf);
         ReflectionUtils.setConf(this.codec, this.conf);
@@ -1170,6 +1189,15 @@ public class SequenceFile {
         this.deflateOut = 
         this.deflateOut = 
           new DataOutputStream(new BufferedOutputStream(deflateFilter));
           new DataOutputStream(new BufferedOutputStream(deflateFilter));
         this.compressedValSerializer = serializationFactory.getSerializer(valClass);
         this.compressedValSerializer = serializationFactory.getSerializer(valClass);
+        if (this.compressedValSerializer == null) {
+          throw new IOException(
+              "Could not find a serializer for the Value class: '"
+                  + valClass.getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using"
+                  + "custom serialization.");
+        }
         this.compressedValSerializer.open(deflateOut);
         this.compressedValSerializer.open(deflateOut);
       }
       }
       writeFileHeader();
       writeFileHeader();
@@ -1897,6 +1925,15 @@ public class SequenceFile {
           new SerializationFactory(conf);
           new SerializationFactory(conf);
         this.keyDeserializer =
         this.keyDeserializer =
           getDeserializer(serializationFactory, getKeyClass());
           getDeserializer(serializationFactory, getKeyClass());
+        if (this.keyDeserializer == null) {
+          throw new IOException(
+              "Could not find a deserializer for the Key class: '"
+                  + getKeyClass().getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using "
+                  + "custom serialization.");
+        }
         if (!blockCompressed) {
         if (!blockCompressed) {
           this.keyDeserializer.open(valBuffer);
           this.keyDeserializer.open(valBuffer);
         } else {
         } else {
@@ -1904,6 +1941,15 @@ public class SequenceFile {
         }
         }
         this.valDeserializer =
         this.valDeserializer =
           getDeserializer(serializationFactory, getValueClass());
           getDeserializer(serializationFactory, getValueClass());
+        if (this.valDeserializer == null) {
+          throw new IOException(
+              "Could not find a deserializer for the Value class: '"
+                  + getValueClass().getCanonicalName() + "'. "
+                  + "Please ensure that the configuration '" +
+                  CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+                  + "properly configured, if you're using "
+                  + "custom serialization.");
+        }
         this.valDeserializer.open(valIn);
         this.valDeserializer.open(valIn);
       }
       }
     }
     }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -127,6 +127,8 @@ public class BlockDecompressorStream extends DecompressorStream {
   }
   }
 
 
   public void resetState() throws IOException {
   public void resetState() throws IOException {
+    originalBlockSize = 0;
+    noUncompressedBytes = 0;
     super.resetState();
     super.resetState();
   }
   }
 
 

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -75,6 +75,7 @@ import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
 
 
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * parameter, and return a {@link Writable} as their value.  A service runs on
@@ -316,7 +317,7 @@ public class Client {
 
 
     /** Update lastActivity with the current time. */
     /** Update lastActivity with the current time. */
     private void touch() {
     private void touch() {
-      lastActivity.set(System.currentTimeMillis());
+      lastActivity.set(Time.now());
     }
     }
 
 
     /**
     /**
@@ -762,7 +763,7 @@ public class Client {
     private synchronized boolean waitForWork() {
     private synchronized boolean waitForWork() {
       if (calls.isEmpty() && !shouldCloseConnection.get()  && running.get())  {
       if (calls.isEmpty() && !shouldCloseConnection.get()  && running.get())  {
         long timeout = maxIdleTime-
         long timeout = maxIdleTime-
-              (System.currentTimeMillis()-lastActivity.get());
+              (Time.now()-lastActivity.get());
         if (timeout>0) {
         if (timeout>0) {
           try {
           try {
             wait(timeout);
             wait(timeout);
@@ -792,7 +793,7 @@ public class Client {
      * since last I/O activity is equal to or greater than the ping interval
      * since last I/O activity is equal to or greater than the ping interval
      */
      */
     private synchronized void sendPing() throws IOException {
     private synchronized void sendPing() throws IOException {
-      long curTime = System.currentTimeMillis();
+      long curTime = Time.now();
       if ( curTime - lastActivity.get() >= pingInterval) {
       if ( curTime - lastActivity.get() >= pingInterval) {
         lastActivity.set(curTime);
         lastActivity.set(curTime);
         synchronized (out) {
         synchronized (out) {

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ProtoUtil;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.BlockingService;
@@ -185,7 +186,7 @@ public class ProtobufRpcEngine implements RpcEngine {
         throws ServiceException {
         throws ServiceException {
       long startTime = 0;
       long startTime = 0;
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        startTime = System.currentTimeMillis();
+        startTime = Time.now();
       }
       }
 
 
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
@@ -198,7 +199,7 @@ public class ProtobufRpcEngine implements RpcEngine {
       }
       }
 
 
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        long callTime = System.currentTimeMillis() - startTime;
+        long callTime = Time.now() - startTime;
         LOG.debug("Call: " + method.getName() + " " + callTime);
         LOG.debug("Call: " + method.getName() + " " + callTime);
       }
       }
       
       
@@ -426,10 +427,10 @@ public class ProtobufRpcEngine implements RpcEngine {
             .mergeFrom(rpcRequest.getRequest()).build();
             .mergeFrom(rpcRequest.getRequest()).build();
         Message result;
         Message result;
         try {
         try {
-          long startTime = System.currentTimeMillis();
+          long startTime = Time.now();
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           result = service.callBlockingMethod(methodDescriptor, null, param);
           result = service.callBlockingMethod(methodDescriptor, null, param);
-          int processingTime = (int) (System.currentTimeMillis() - startTime);
+          int processingTime = (int) (Time.now() - startTime);
           int qTime = (int) (startTime - receiveTime);
           int qTime = (int) (startTime - receiveTime);
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
             LOG.info("Served: " + methodName + " queueTime= " + qTime +
             LOG.info("Served: " + methodName + " queueTime= " + qTime +

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.BlockingService;
 
 
@@ -369,7 +370,7 @@ public class RPC {
                                int rpcTimeout,
                                int rpcTimeout,
                                RetryPolicy connectionRetryPolicy,
                                RetryPolicy connectionRetryPolicy,
                                long timeout) throws IOException { 
                                long timeout) throws IOException { 
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     IOException ioe;
     IOException ioe;
     while (true) {
     while (true) {
       try {
       try {
@@ -387,7 +388,7 @@ public class RPC {
         ioe = nrthe;
         ioe = nrthe;
       }
       }
       // check if timed out
       // check if timed out
-      if (System.currentTimeMillis()-timeout >= startTime) {
+      if (Time.now()-timeout >= startTime) {
         throw ioe;
         throw ioe;
       }
       }
 
 

+ 9 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -95,6 +95,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
@@ -411,7 +412,7 @@ public abstract class Server {
       this.callId = id;
       this.callId = id;
       this.rpcRequest = param;
       this.rpcRequest = param;
       this.connection = connection;
       this.connection = connection;
-      this.timestamp = System.currentTimeMillis();
+      this.timestamp = Time.now();
       this.rpcResponse = null;
       this.rpcResponse = null;
       this.rpcKind = kind;
       this.rpcKind = kind;
     }
     }
@@ -561,7 +562,7 @@ public abstract class Server {
      */
      */
     private void cleanupConnections(boolean force) {
     private void cleanupConnections(boolean force) {
       if (force || numConnections > thresholdIdleConnections) {
       if (force || numConnections > thresholdIdleConnections) {
-        long currentTime = System.currentTimeMillis();
+        long currentTime = Time.now();
         if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
         if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
           return;
           return;
         }
         }
@@ -597,7 +598,7 @@ public abstract class Server {
           }
           }
           else i++;
           else i++;
         }
         }
-        lastCleanupRunTime = System.currentTimeMillis();
+        lastCleanupRunTime = Time.now();
       }
       }
     }
     }
 
 
@@ -682,7 +683,7 @@ public abstract class Server {
         try {
         try {
           reader.startAdd();
           reader.startAdd();
           SelectionKey readKey = reader.registerChannel(channel);
           SelectionKey readKey = reader.registerChannel(channel);
-          c = new Connection(readKey, channel, System.currentTimeMillis());
+          c = new Connection(readKey, channel, Time.now());
           readKey.attach(c);
           readKey.attach(c);
           synchronized (connectionList) {
           synchronized (connectionList) {
             connectionList.add(numConnections, c);
             connectionList.add(numConnections, c);
@@ -704,7 +705,7 @@ public abstract class Server {
       if (c == null) {
       if (c == null) {
         return;  
         return;  
       }
       }
-      c.setLastContact(System.currentTimeMillis());
+      c.setLastContact(Time.now());
       
       
       try {
       try {
         count = c.readAndProcess();
         count = c.readAndProcess();
@@ -726,7 +727,7 @@ public abstract class Server {
         c = null;
         c = null;
       }
       }
       else {
       else {
-        c.setLastContact(System.currentTimeMillis());
+        c.setLastContact(Time.now());
       }
       }
     }   
     }   
 
 
@@ -805,7 +806,7 @@ public abstract class Server {
               LOG.info(getName() + ": doAsyncWrite threw exception " + e);
               LOG.info(getName() + ": doAsyncWrite threw exception " + e);
             }
             }
           }
           }
-          long now = System.currentTimeMillis();
+          long now = Time.now();
           if (now < lastPurgeTime + PURGE_INTERVAL) {
           if (now < lastPurgeTime + PURGE_INTERVAL) {
             continue;
             continue;
           }
           }
@@ -951,7 +952,7 @@ public abstract class Server {
             
             
             if (inHandler) {
             if (inHandler) {
               // set the serve time when the response has to be sent later
               // set the serve time when the response has to be sent later
-              call.timestamp = System.currentTimeMillis();
+              call.timestamp = Time.now();
               
               
               incPending();
               incPending();
               try {
               try {

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
@@ -218,13 +219,13 @@ public class WritableRpcEngine implements RpcEngine {
       throws Throwable {
       throws Throwable {
       long startTime = 0;
       long startTime = 0;
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        startTime = System.currentTimeMillis();
+        startTime = Time.now();
       }
       }
 
 
       ObjectWritable value = (ObjectWritable)
       ObjectWritable value = (ObjectWritable)
         client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
         client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        long callTime = System.currentTimeMillis() - startTime;
+        long callTime = Time.now() - startTime;
         LOG.debug("Call: " + method.getName() + " " + callTime);
         LOG.debug("Call: " + method.getName() + " " + callTime);
       }
       }
       return value.get();
       return value.get();
@@ -464,7 +465,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
           // Invoke the protocol method
           // Invoke the protocol method
 
 
-          long startTime = System.currentTimeMillis();
+          long startTime = Time.now();
           Method method = 
           Method method = 
               protocolImpl.protocolClass.getMethod(call.getMethodName(),
               protocolImpl.protocolClass.getMethod(call.getMethodName(),
               call.getParameterClasses());
               call.getParameterClasses());
@@ -472,7 +473,7 @@ public class WritableRpcEngine implements RpcEngine {
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           Object value = 
           Object value = 
               method.invoke(protocolImpl.protocolImpl, call.getParameters());
               method.invoke(protocolImpl.protocolImpl, call.getParameters());
-          int processingTime = (int) (System.currentTimeMillis() - startTime);
+          int processingTime = (int) (Time.now() - startTime);
           int qTime = (int) (startTime-receivedTime);
           int qTime = (int) (startTime-receivedTime);
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("Served: " + call.getMethodName() +
             LOG.debug("Served: " + call.getMethodName() +

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.util.Time;
 
 
 class MetricsRecordBuilderImpl extends MetricsRecordBuilder {
 class MetricsRecordBuilderImpl extends MetricsRecordBuilder {
   private final MetricsCollector parent;
   private final MetricsCollector parent;
@@ -44,7 +45,7 @@ class MetricsRecordBuilderImpl extends MetricsRecordBuilder {
                            MetricsFilter rf, MetricsFilter mf,
                            MetricsFilter rf, MetricsFilter mf,
                            boolean acceptable) {
                            boolean acceptable) {
     this.parent = parent;
     this.parent = parent;
-    timestamp = System.currentTimeMillis();
+    timestamp = Time.now();
     recInfo = info;
     recInfo = info;
     metrics = Lists.newArrayList();
     metrics = Lists.newArrayList();
     tags = Lists.newArrayList();
     tags = Lists.newArrayList();

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * An adapter class for metrics sink and associated filters
  * An adapter class for metrics sink and associated filters
@@ -158,7 +159,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
     }
     }
     if (ts > 0) {
     if (ts > 0) {
       sink.flush();
       sink.flush();
-      latency.add(System.currentTimeMillis() - ts);
+      latency.add(Time.now() - ts);
     }
     }
     LOG.debug("Done");
     LOG.debug("Done");
   }
   }

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java

@@ -40,6 +40,8 @@ import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.MetricsTag;
 import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
 import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.util.Time;
+
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 
 
 /**
 /**
@@ -152,9 +154,9 @@ class MetricsSourceAdapter implements DynamicMBean {
   private void updateJmxCache() {
   private void updateJmxCache() {
     boolean getAllMetrics = false;
     boolean getAllMetrics = false;
     synchronized(this) {
     synchronized(this) {
-      if (System.currentTimeMillis() - jmxCacheTS >= jmxCacheTTL) {
+      if (Time.now() - jmxCacheTS >= jmxCacheTTL) {
         // temporarilly advance the expiry while updating the cache
         // temporarilly advance the expiry while updating the cache
-        jmxCacheTS = System.currentTimeMillis() + jmxCacheTTL;
+        jmxCacheTS = Time.now() + jmxCacheTTL;
         if (lastRecs == null) {
         if (lastRecs == null) {
           getAllMetrics = true;
           getAllMetrics = true;
         }
         }
@@ -175,7 +177,7 @@ class MetricsSourceAdapter implements DynamicMBean {
       if (oldCacheSize < newCacheSize) {
       if (oldCacheSize < newCacheSize) {
         updateInfoCache();
         updateInfoCache();
       }
       }
-      jmxCacheTS = System.currentTimeMillis();
+      jmxCacheTS = Time.now();
       lastRecs = null;  // in case regular interval update is not running
       lastRecs = null;  // in case regular interval update is not running
     }
     }
   }
   }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -60,6 +60,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder;
 import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder;
 import org.apache.hadoop.metrics2.lib.MutableStat;
 import org.apache.hadoop.metrics2.lib.MutableStat;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * A base class for metrics system singletons
  * A base class for metrics system singletons
@@ -372,10 +373,10 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
 
 
   private void snapshotMetrics(MetricsSourceAdapter sa,
   private void snapshotMetrics(MetricsSourceAdapter sa,
                                MetricsBufferBuilder bufferBuilder) {
                                MetricsBufferBuilder bufferBuilder) {
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     bufferBuilder.add(sa.name(), sa.getMetrics(collector, false));
     bufferBuilder.add(sa.name(), sa.getMetrics(collector, false));
     collector.clear();
     collector.clear();
-    snapshotStat.add(System.currentTimeMillis() - startTime);
+    snapshotStat.add(Time.now() - startTime);
     LOG.debug("Snapshotted source "+ sa.name());
     LOG.debug("Snapshotted source "+ sa.name());
   }
   }
 
 
@@ -386,9 +387,9 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   synchronized void publishMetrics(MetricsBuffer buffer) {
   synchronized void publishMetrics(MetricsBuffer buffer) {
     int dropped = 0;
     int dropped = 0;
     for (MetricsSinkAdapter sa : sinks.values()) {
     for (MetricsSinkAdapter sa : sinks.values()) {
-      long startTime = System.currentTimeMillis();
+      long startTime = Time.now();
       dropped += sa.putMetrics(buffer, logicalTime) ? 0 : 1;
       dropped += sa.putMetrics(buffer, logicalTime) ? 0 : 1;
-      publishStat.add(System.currentTimeMillis() - startTime);
+      publishStat.add(Time.now() - startTime);
     }
     }
     droppedPubAll.incr(dropped);
     droppedPubAll.incr(dropped);
   }
   }

+ 18 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java

@@ -180,6 +180,24 @@ public class MetricsRegistry {
     return ret;
     return ret;
   }
   }
 
 
+  /**
+   * Create a mutable metric that estimates quantiles of a stream of values
+   * @param name of the metric
+   * @param desc metric description
+   * @param sampleName of the metric (e.g., "Ops")
+   * @param valueName of the metric (e.g., "Time" or "Latency")
+   * @param interval rollover interval of estimator in seconds
+   * @return a new quantile estimator object
+   */
+  public synchronized MutableQuantiles newQuantiles(String name, String desc,
+      String sampleName, String valueName, int interval) {
+    checkMetricName(name);
+    MutableQuantiles ret = 
+        new MutableQuantiles(name, desc, sampleName, valueName, interval);
+    metricsMap.put(name, ret);
+    return ret;
+  }
+  
   /**
   /**
    * Create a mutable metric with stats
    * Create a mutable metric with stats
    * @param name  of the metric
    * @param name  of the metric

+ 165 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.lib;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.util.Quantile;
+import org.apache.hadoop.metrics2.util.SampleQuantiles;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Watches a stream of long values, maintaining online estimates of specific
+ * quantiles with provably low error bounds. This is particularly useful for
+ * accurate high-percentile (e.g. 95th, 99th) latency metrics.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MutableQuantiles extends MutableMetric {
+
+  static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
+      new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
+      new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
+
+  private final MetricsInfo numInfo;
+  private final MetricsInfo[] quantileInfos;
+  private final int interval;
+
+  private SampleQuantiles estimator;
+  private long previousCount = 0;
+
+  @VisibleForTesting
+  protected Map<Quantile, Long> previousSnapshot = null;
+
+  private final ScheduledExecutorService scheduler = Executors
+      .newScheduledThreadPool(1);
+
+  /**
+   * Instantiates a new {@link MutableQuantiles} for a metric that rolls itself
+   * over on the specified time interval.
+   * 
+   * @param name
+   *          of the metric
+   * @param description
+   *          long-form textual description of the metric
+   * @param sampleName
+   *          type of items in the stream (e.g., "Ops")
+   * @param valueName
+   *          type of the values
+   * @param interval
+   *          rollover interval (in seconds) of the estimator
+   */
+  public MutableQuantiles(String name, String description, String sampleName,
+      String valueName, int interval) {
+    String ucName = StringUtils.capitalize(name);
+    String usName = StringUtils.capitalize(sampleName);
+    String uvName = StringUtils.capitalize(valueName);
+    String desc = StringUtils.uncapitalize(description);
+    String lsName = StringUtils.uncapitalize(sampleName);
+    String lvName = StringUtils.uncapitalize(valueName);
+
+    numInfo = info(ucName + "Num" + usName, String.format(
+        "Number of %s for %s with %ds interval", lsName, desc, interval));
+    // Construct the MetricsInfos for the quantiles, converting to percentiles
+    quantileInfos = new MetricsInfo[quantiles.length];
+    String nameTemplate = ucName + "%dthPercentile" + interval + "sInterval"
+        + uvName;
+    String descTemplate = "%d percentile " + lvName + " with " + interval
+        + " second interval for " + desc;
+    for (int i = 0; i < quantiles.length; i++) {
+      int percentile = (int) (100 * quantiles[i].quantile);
+      quantileInfos[i] = info(String.format(nameTemplate, percentile),
+          String.format(descTemplate, percentile));
+    }
+
+    estimator = new SampleQuantiles(quantiles);
+
+    this.interval = interval;
+    scheduler.scheduleAtFixedRate(new RolloverSample(this), interval, interval,
+        TimeUnit.SECONDS);
+  }
+
+  @Override
+  public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
+    if (all || changed()) {
+      builder.addGauge(numInfo, previousCount);
+      for (int i = 0; i < quantiles.length; i++) {
+        long newValue = 0;
+        // If snapshot is null, we failed to update since the window was empty
+        if (previousSnapshot != null) {
+          newValue = previousSnapshot.get(quantiles[i]);
+        }
+        builder.addGauge(quantileInfos[i], newValue);
+      }
+      if (changed()) {
+        clearChanged();
+      }
+    }
+  }
+
+  public synchronized void add(long value) {
+    estimator.insert(value);
+  }
+
+  public int getInterval() {
+    return interval;
+  }
+
+  /**
+   * Runnable used to periodically roll over the internal
+   * {@link SampleQuantiles} every interval.
+   */
+  private static class RolloverSample implements Runnable {
+
+    MutableQuantiles parent;
+
+    public RolloverSample(MutableQuantiles parent) {
+      this.parent = parent;
+    }
+
+    @Override
+    public void run() {
+      synchronized (parent) {
+        try {
+          parent.previousCount = parent.estimator.getCount();
+          parent.previousSnapshot = parent.estimator.snapshot();
+        } catch (IOException e) {
+          // Couldn't get a new snapshot because the window was empty
+          parent.previousCount = 0;
+          parent.previousSnapshot = null;
+        }
+        parent.estimator.clear();
+      }
+      parent.setChanged();
+    }
+
+  }
+}

+ 60 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Specifies a quantile (with error bounds) to be watched by a
+ * {@link SampleQuantiles} object.
+ */
+@InterfaceAudience.Private
+public class Quantile {
+  public final double quantile;
+  public final double error;
+
+  public Quantile(double quantile, double error) {
+    this.quantile = quantile;
+    this.error = error;
+  }
+
+  @Override
+  public boolean equals(Object aThat) {
+    if (this == aThat) {
+      return true;
+    }
+    if (!(aThat instanceof Quantile)) {
+      return false;
+    }
+
+    Quantile that = (Quantile) aThat;
+
+    long qbits = Double.doubleToLongBits(quantile);
+    long ebits = Double.doubleToLongBits(error);
+
+    return qbits == Double.doubleToLongBits(that.quantile)
+        && ebits == Double.doubleToLongBits(that.error);
+  }
+
+  @Override
+  public int hashCode() {
+    return (int) (Double.doubleToLongBits(quantile) ^ Double
+        .doubleToLongBits(error));
+  }
+}

+ 310 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java

@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.ListIterator;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm
+ * for streaming calculation of targeted high-percentile epsilon-approximate
+ * quantiles.
+ * 
+ * This is a generalization of the earlier work by Greenwald and Khanna (GK),
+ * which essentially allows different error bounds on the targeted quantiles,
+ * which allows for far more efficient calculation of high-percentiles.
+ * 
+ * See: Cormode, Korn, Muthukrishnan, and Srivastava
+ * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005
+ * 
+ * Greenwald and Khanna,
+ * "Space-efficient online computation of quantile summaries" in SIGMOD 2001
+ * 
+ */
+@InterfaceAudience.Private
+public class SampleQuantiles {
+
+  /**
+   * Total number of items in stream
+   */
+  private long count = 0;
+
+  /**
+   * Current list of sampled items, maintained in sorted order with error bounds
+   */
+  private LinkedList<SampleItem> samples;
+
+  /**
+   * Buffers incoming items to be inserted in batch. Items are inserted into 
+   * the buffer linearly. When the buffer fills, it is flushed into the samples
+   * array in its entirety.
+   */
+  private long[] buffer = new long[500];
+  private int bufferCount = 0;
+
+  /**
+   * Array of Quantiles that we care about, along with desired error.
+   */
+  private final Quantile quantiles[];
+
+  public SampleQuantiles(Quantile[] quantiles) {
+    this.quantiles = quantiles;
+    this.samples = new LinkedList<SampleItem>();
+  }
+
+  /**
+   * Specifies the allowable error for this rank, depending on which quantiles
+   * are being targeted.
+   * 
+   * This is the f(r_i, n) function from the CKMS paper. It's basically how wide
+   * the range of this rank can be.
+   * 
+   * @param rank
+   *          the index in the list of samples
+   */
+  private double allowableError(int rank) {
+    int size = samples.size();
+    double minError = size + 1;
+    for (Quantile q : quantiles) {
+      double error;
+      if (rank <= q.quantile * size) {
+        error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile);
+      } else {
+        error = (2.0 * q.error * rank) / q.quantile;
+      }
+      if (error < minError) {
+        minError = error;
+      }
+    }
+
+    return minError;
+  }
+
+  /**
+   * Add a new value from the stream.
+   * 
+   * @param v
+   */
+  synchronized public void insert(long v) {
+    buffer[bufferCount] = v;
+    bufferCount++;
+
+    count++;
+
+    if (bufferCount == buffer.length) {
+      insertBatch();
+      compress();
+    }
+  }
+
+  /**
+   * Merges items from buffer into the samples array in one pass.
+   * This is more efficient than doing an insert on every item.
+   */
+  private void insertBatch() {
+    if (bufferCount == 0) {
+      return;
+    }
+
+    Arrays.sort(buffer, 0, bufferCount);
+
+    // Base case: no samples
+    int start = 0;
+    if (samples.size() == 0) {
+      SampleItem newItem = new SampleItem(buffer[0], 1, 0);
+      samples.add(newItem);
+      start++;
+    }
+
+    ListIterator<SampleItem> it = samples.listIterator();
+    SampleItem item = it.next();
+    for (int i = start; i < bufferCount; i++) {
+      long v = buffer[i];
+      while (it.nextIndex() < samples.size() && item.value < v) {
+        item = it.next();
+      }
+      // If we found that bigger item, back up so we insert ourselves before it
+      if (item.value > v) {
+        it.previous();
+      }
+      // We use different indexes for the edge comparisons, because of the above
+      // if statement that adjusts the iterator
+      int delta;
+      if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) {
+        delta = 0;
+      } else {
+        delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1;
+      }
+      SampleItem newItem = new SampleItem(v, 1, delta);
+      it.add(newItem);
+      item = newItem;
+    }
+
+    bufferCount = 0;
+  }
+
+  /**
+   * Try to remove extraneous items from the set of sampled items. This checks
+   * if an item is unnecessary based on the desired error bounds, and merges it
+   * with the adjacent item if it is.
+   */
+  private void compress() {
+    if (samples.size() < 2) {
+      return;
+    }
+
+    ListIterator<SampleItem> it = samples.listIterator();
+    SampleItem prev = null;
+    SampleItem next = it.next();
+
+    while (it.hasNext()) {
+      prev = next;
+      next = it.next();
+      if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) {
+        next.g += prev.g;
+        // Remove prev. it.remove() kills the last thing returned.
+        it.previous();
+        it.previous();
+        it.remove();
+        // it.next() is now equal to next, skip it back forward again
+        it.next();
+      }
+    }
+  }
+
+  /**
+   * Get the estimated value at the specified quantile.
+   * 
+   * @param quantile Queried quantile, e.g. 0.50 or 0.99.
+   * @return Estimated value at that quantile.
+   */
+  private long query(double quantile) throws IOException {
+    if (samples.size() == 0) {
+      throw new IOException("No samples present");
+    }
+
+    int rankMin = 0;
+    int desired = (int) (quantile * count);
+
+    for (int i = 1; i < samples.size(); i++) {
+      SampleItem prev = samples.get(i - 1);
+      SampleItem cur = samples.get(i);
+
+      rankMin += prev.g;
+
+      if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) {
+        return prev.value;
+      }
+    }
+
+    // edge case of wanting max value
+    return samples.get(samples.size() - 1).value;
+  }
+
+  /**
+   * Get a snapshot of the current values of all the tracked quantiles.
+   * 
+   * @return snapshot of the tracked quantiles
+   * @throws IOException
+   *           if no items have been added to the estimator
+   */
+  synchronized public Map<Quantile, Long> snapshot() throws IOException {
+    // flush the buffer first for best results
+    insertBatch();
+    Map<Quantile, Long> values = new HashMap<Quantile, Long>(quantiles.length);
+    for (int i = 0; i < quantiles.length; i++) {
+      values.put(quantiles[i], query(quantiles[i].quantile));
+    }
+
+    return values;
+  }
+
+  /**
+   * Returns the number of items that the estimator has processed
+   * 
+   * @return count total number of items processed
+   */
+  synchronized public long getCount() {
+    return count;
+  }
+
+  /**
+   * Returns the number of samples kept by the estimator
+   * 
+   * @return count current number of samples
+   */
+  @VisibleForTesting
+  synchronized public int getSampleCount() {
+    return samples.size();
+  }
+
+  /**
+   * Resets the estimator, clearing out all previously inserted items
+   */
+  synchronized public void clear() {
+    count = 0;
+    bufferCount = 0;
+    samples.clear();
+  }
+
+  /**
+   * Describes a measured value passed to the estimator, tracking additional
+   * metadata required by the CKMS algorithm.
+   */
+  private static class SampleItem {
+    
+    /**
+     * Value of the sampled item (e.g. a measured latency value)
+     */
+    public final long value;
+    
+    /**
+     * Difference between the lowest possible rank of the previous item, and 
+     * the lowest possible rank of this item.
+     * 
+     * The sum of the g of all previous items yields this item's lower bound. 
+     */
+    public int g;
+    
+    /**
+     * Difference between the item's greatest possible rank and lowest possible
+     * rank.
+     */
+    public final int delta;
+
+    public SampleItem(long value, int lowerDelta, int delta) {
+      this.value = value;
+      this.g = lowerDelta;
+      this.delta = delta;
+    }
+
+    @Override
+    public String toString() {
+      return String.format("%d, %d, %d", value, g, delta);
+    }
+  }
+}

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java

@@ -34,6 +34,7 @@ import java.util.LinkedList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * This supports input and output streams for a socket channels. 
  * This supports input and output streams for a socket channels. 
@@ -194,7 +195,7 @@ abstract class SocketIOWithTimeout {
       }
       }
 
 
       long timeoutLeft = timeout;
       long timeoutLeft = timeout;
-      long endTime = (timeout > 0) ? (System.currentTimeMillis() + timeout): 0;
+      long endTime = (timeout > 0) ? (Time.now() + timeout): 0;
       
       
       while (true) {
       while (true) {
         // we might have to call finishConnect() more than once
         // we might have to call finishConnect() more than once
@@ -209,7 +210,7 @@ abstract class SocketIOWithTimeout {
         
         
         if (ret == 0 ||
         if (ret == 0 ||
             (timeout > 0 &&  
             (timeout > 0 &&  
-              (timeoutLeft = (endTime - System.currentTimeMillis())) <= 0)) {
+              (timeoutLeft = (endTime - Time.now())) <= 0)) {
           throw new SocketTimeoutException(
           throw new SocketTimeoutException(
                     timeoutExceptionString(channel, timeout, 
                     timeoutExceptionString(channel, timeout, 
                                            SelectionKey.OP_CONNECT));
                                            SelectionKey.OP_CONNECT));
@@ -329,7 +330,7 @@ abstract class SocketIOWithTimeout {
       
       
       try {
       try {
         while (true) {
         while (true) {
-          long start = (timeout == 0) ? 0 : System.currentTimeMillis();
+          long start = (timeout == 0) ? 0 : Time.now();
 
 
           key = channel.register(info.selector, ops);
           key = channel.register(info.selector, ops);
           ret = info.selector.select(timeout);
           ret = info.selector.select(timeout);
@@ -342,7 +343,7 @@ abstract class SocketIOWithTimeout {
            * unknown reasons. So select again if required.
            * unknown reasons. So select again if required.
            */
            */
           if (timeout > 0) {
           if (timeout > 0) {
-            timeout -= System.currentTimeMillis() - start;
+            timeout -= Time.now() - start;
             if (timeout <= 0) {
             if (timeout <= 0) {
               return 0;
               return 0;
             }
             }
@@ -414,7 +415,7 @@ abstract class SocketIOWithTimeout {
         selInfo = queue.removeLast();
         selInfo = queue.removeLast();
       }
       }
       
       
-      trimIdleSelectors(System.currentTimeMillis());
+      trimIdleSelectors(Time.now());
       return selInfo;
       return selInfo;
     }
     }
     
     
@@ -425,7 +426,7 @@ abstract class SocketIOWithTimeout {
      * @param info
      * @param info
      */
      */
     private synchronized void release(SelectorInfo info) {
     private synchronized void release(SelectorInfo info) {
-      long now = System.currentTimeMillis();
+      long now = Time.now();
       trimIdleSelectors(now);
       trimIdleSelectors(now);
       info.lastActivityTime = now;
       info.lastActivityTime = now;
       info.queue.addLast(info);
       info.queue.addLast(info);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -32,7 +32,7 @@ import java.util.Map;
 
 
 /**
 /**
  * Initializes hadoop-auth AuthenticationFilter which provides support for
  * Initializes hadoop-auth AuthenticationFilter which provides support for
- * Kerberos HTTP SPENGO authentication.
+ * Kerberos HTTP SPNEGO authentication.
  * <p/>
  * <p/>
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -75,7 +76,7 @@ public class Groups {
   public List<String> getGroups(String user) throws IOException {
   public List<String> getGroups(String user) throws IOException {
     // Return cached value if available
     // Return cached value if available
     CachedGroups groups = userToGroupsMap.get(user);
     CachedGroups groups = userToGroupsMap.get(user);
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     // if cache has a value and it hasn't expired
     // if cache has a value and it hasn't expired
     if (groups != null && (groups.getTimestamp() + cacheTimeout > now)) {
     if (groups != null && (groups.getTimestamp() + cacheTimeout > now)) {
       if(LOG.isDebugEnabled()) {
       if(LOG.isDebugEnabled()) {
@@ -134,7 +135,7 @@ public class Groups {
      */
      */
     CachedGroups(List<String> groups) {
     CachedGroups(List<String> groups) {
       this.groups = groups;
       this.groups = groups;
-      this.timestamp = System.currentTimeMillis();
+      this.timestamp = Time.now();
     }
     }
 
 
     /**
     /**

+ 124 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.security;
 
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
 
 
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.AccessControlContext;
 import java.security.AccessControlContext;
@@ -32,6 +33,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
@@ -62,6 +64,7 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * User and group information for Hadoop.
  * User and group information for Hadoop.
@@ -453,9 +456,28 @@ public class UserGroupInformation {
       return null;
       return null;
     }
     }
   }
   }
-  
+
+  /**
+   * Represents a javax.security configuration that is created at runtime.
+   */
+  private static class DynamicConfiguration
+      extends javax.security.auth.login.Configuration {
+    private AppConfigurationEntry[] ace;
+    
+    DynamicConfiguration(AppConfigurationEntry[] ace) {
+      this.ace = ace;
+    }
+    
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
+      return ace;
+    }
+  }
+
   private static LoginContext
   private static LoginContext
-  newLoginContext(String appName, Subject subject) throws LoginException {
+  newLoginContext(String appName, Subject subject,
+    javax.security.auth.login.Configuration loginConf)
+      throws LoginException {
     // Temporarily switch the thread's ContextClassLoader to match this
     // Temporarily switch the thread's ContextClassLoader to match this
     // class's classloader, so that we can properly load HadoopLoginModule
     // class's classloader, so that we can properly load HadoopLoginModule
     // from the JAAS libraries.
     // from the JAAS libraries.
@@ -463,7 +485,7 @@ public class UserGroupInformation {
     ClassLoader oldCCL = t.getContextClassLoader();
     ClassLoader oldCCL = t.getContextClassLoader();
     t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
     t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
     try {
     try {
-      return new LoginContext(appName, subject, null, new HadoopConfiguration());
+      return new LoginContext(appName, subject, null, loginConf);
     } finally {
     } finally {
       t.setContextClassLoader(oldCCL);
       t.setContextClassLoader(oldCCL);
     }
     }
@@ -515,6 +537,82 @@ public class UserGroupInformation {
     }
     }
   }
   }
 
 
+  /**
+   * Find the most appropriate UserGroupInformation to use
+   *
+   * @param ticketCachePath    The Kerberos ticket cache path, or NULL
+   *                           if none is specfied
+   * @param user               The user name, or NULL if none is specified.
+   *
+   * @return                   The most appropriate UserGroupInformation
+   */ 
+  public static UserGroupInformation getBestUGI(
+      String ticketCachePath, String user) throws IOException {
+    if (ticketCachePath != null) {
+      return getUGIFromTicketCache(ticketCachePath, user);
+    } else if (user == null) {
+      return getCurrentUser();
+    } else {
+      return createRemoteUser(user);
+    }    
+  }
+
+  /**
+   * Create a UserGroupInformation from a Kerberos ticket cache.
+   * 
+   * @param user                The principal name to load from the ticket
+   *                            cache
+   * @param ticketCachePath     the path to the ticket cache file
+   *
+   * @throws IOException        if the kerberos login fails
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public static UserGroupInformation getUGIFromTicketCache(
+            String ticketCache, String user) throws IOException {
+    if (!isSecurityEnabled()) {
+      return getBestUGI(null, user);
+    }
+    try {
+      Map<String,String> krbOptions = new HashMap<String,String>();
+      krbOptions.put("doNotPrompt", "true");
+      krbOptions.put("useTicketCache", "true");
+      krbOptions.put("useKeyTab", "false");
+      krbOptions.put("renewTGT", "false");
+      krbOptions.put("ticketCache", ticketCache);
+      krbOptions.putAll(HadoopConfiguration.BASIC_JAAS_OPTIONS);
+      AppConfigurationEntry ace = new AppConfigurationEntry(
+          KerberosUtil.getKrb5LoginModuleName(),
+          LoginModuleControlFlag.REQUIRED,
+          krbOptions);
+      DynamicConfiguration dynConf =
+          new DynamicConfiguration(new AppConfigurationEntry[]{ ace });
+      LoginContext login = newLoginContext(
+          HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, null, dynConf);
+      login.login();
+
+      Subject loginSubject = login.getSubject();
+      Set<Principal> loginPrincipals = loginSubject.getPrincipals();
+      if (loginPrincipals.isEmpty()) {
+        throw new RuntimeException("No login principals found!");
+      }
+      if (loginPrincipals.size() != 1) {
+        LOG.warn("found more than one principal in the ticket cache file " +
+          ticketCache);
+      }
+      User ugiUser = new User(loginPrincipals.iterator().next().getName(),
+          AuthenticationMethod.KERBEROS, login);
+      loginSubject.getPrincipals().add(ugiUser);
+      UserGroupInformation ugi = new UserGroupInformation(loginSubject);
+      ugi.setLogin(login);
+      ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
+      return ugi;
+    } catch (LoginException le) {
+      throw new IOException("failure to login using ticket cache file " +
+          ticketCache, le);
+    }
+  }
+
   /**
   /**
    * Get the currently logged in user.
    * Get the currently logged in user.
    * @return the logged in user
    * @return the logged in user
@@ -530,10 +628,10 @@ public class UserGroupInformation {
         LoginContext login;
         LoginContext login;
         if (isSecurityEnabled()) {
         if (isSecurityEnabled()) {
           login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
           login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
-              subject);
+              subject, new HadoopConfiguration());
         } else {
         } else {
           login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME, 
           login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME, 
-              subject);
+              subject, new HadoopConfiguration());
         }
         }
         login.login();
         login.login();
         loginUser = new UserGroupInformation(subject);
         loginUser = new UserGroupInformation(subject);
@@ -613,7 +711,7 @@ public class UserGroupInformation {
             long nextRefresh = getRefreshTime(tgt);
             long nextRefresh = getRefreshTime(tgt);
             while (true) {
             while (true) {
               try {
               try {
-                long now = System.currentTimeMillis();
+                long now = Time.now();
                 if(LOG.isDebugEnabled()) {
                 if(LOG.isDebugEnabled()) {
                   LOG.debug("Current time is " + now);
                   LOG.debug("Current time is " + now);
                   LOG.debug("Next refresh is " + nextRefresh);
                   LOG.debug("Next refresh is " + nextRefresh);
@@ -673,17 +771,17 @@ public class UserGroupInformation {
     LoginContext login; 
     LoginContext login; 
     long start = 0;
     long start = 0;
     try {
     try {
-      login = 
-        newLoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject);
-      start = System.currentTimeMillis();
+      login = newLoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME,
+            subject, new HadoopConfiguration());
+      start = Time.now();
       login.login();
       login.login();
-      metrics.loginSuccess.add(System.currentTimeMillis() - start);
+      metrics.loginSuccess.add(Time.now() - start);
       loginUser = new UserGroupInformation(subject);
       loginUser = new UserGroupInformation(subject);
       loginUser.setLogin(login);
       loginUser.setLogin(login);
       loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
     } catch (LoginException le) {
     } catch (LoginException le) {
       if (start > 0) {
       if (start > 0) {
-        metrics.loginFailure.add(System.currentTimeMillis() - start);
+        metrics.loginFailure.add(Time.now() - start);
       }
       }
       throw new IOException("Login failure for " + user + " from keytab " + 
       throw new IOException("Login failure for " + user + " from keytab " + 
                             path, le);
                             path, le);
@@ -703,7 +801,7 @@ public class UserGroupInformation {
         || !isKeytab)
         || !isKeytab)
       return;
       return;
     KerberosTicket tgt = getTGT();
     KerberosTicket tgt = getTGT();
-    if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) {
+    if (tgt != null && Time.now() < getRefreshTime(tgt)) {
       return;
       return;
     }
     }
     reloginFromKeytab();
     reloginFromKeytab();
@@ -727,7 +825,7 @@ public class UserGroupInformation {
          !isKeytab)
          !isKeytab)
       return;
       return;
     
     
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     if (!hasSufficientTimeElapsed(now)) {
     if (!hasSufficientTimeElapsed(now)) {
       return;
       return;
     }
     }
@@ -756,16 +854,17 @@ public class UserGroupInformation {
         // login and also update the subject field of this instance to
         // login and also update the subject field of this instance to
         // have the new credentials (pass it to the LoginContext constructor)
         // have the new credentials (pass it to the LoginContext constructor)
         login = newLoginContext(
         login = newLoginContext(
-            HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, getSubject());
+            HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, getSubject(),
+            new HadoopConfiguration());
         LOG.info("Initiating re-login for " + keytabPrincipal);
         LOG.info("Initiating re-login for " + keytabPrincipal);
-        start = System.currentTimeMillis();
+        start = Time.now();
         login.login();
         login.login();
-        metrics.loginSuccess.add(System.currentTimeMillis() - start);
+        metrics.loginSuccess.add(Time.now() - start);
         setLogin(login);
         setLogin(login);
       }
       }
     } catch (LoginException le) {
     } catch (LoginException le) {
       if (start > 0) {
       if (start > 0) {
-        metrics.loginFailure.add(System.currentTimeMillis() - start);
+        metrics.loginFailure.add(Time.now() - start);
       }
       }
       throw new IOException("Login failure for " + keytabPrincipal + 
       throw new IOException("Login failure for " + keytabPrincipal + 
           " from keytab " + keytabFile, le);
           " from keytab " + keytabFile, le);
@@ -791,7 +890,7 @@ public class UserGroupInformation {
     if (login == null) {
     if (login == null) {
       throw new IOException("login must be done first");
       throw new IOException("login must be done first");
     }
     }
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     if (!hasSufficientTimeElapsed(now)) {
     if (!hasSufficientTimeElapsed(now)) {
       return;
       return;
     }
     }
@@ -807,7 +906,7 @@ public class UserGroupInformation {
       //have the new credentials (pass it to the LoginContext constructor)
       //have the new credentials (pass it to the LoginContext constructor)
       login = 
       login = 
         newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, 
         newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, 
-            getSubject());
+            getSubject(), new HadoopConfiguration());
       LOG.info("Initiating re-login for " + getUserName());
       LOG.info("Initiating re-login for " + getUserName());
       login.login();
       login.login();
       setLogin(login);
       setLogin(login);
@@ -842,12 +941,13 @@ public class UserGroupInformation {
       keytabPrincipal = user;
       keytabPrincipal = user;
       Subject subject = new Subject();
       Subject subject = new Subject();
       
       
-      LoginContext login = 
-        newLoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject); 
+      LoginContext login = newLoginContext(
+          HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject,
+          new HadoopConfiguration());
        
        
-      start = System.currentTimeMillis();
+      start = Time.now();
       login.login();
       login.login();
-      metrics.loginSuccess.add(System.currentTimeMillis() - start);
+      metrics.loginSuccess.add(Time.now() - start);
       UserGroupInformation newLoginUser = new UserGroupInformation(subject);
       UserGroupInformation newLoginUser = new UserGroupInformation(subject);
       newLoginUser.setLogin(login);
       newLoginUser.setLogin(login);
       newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
@@ -855,7 +955,7 @@ public class UserGroupInformation {
       return newLoginUser;
       return newLoginUser;
     } catch (LoginException le) {
     } catch (LoginException le) {
       if (start > 0) {
       if (start > 0) {
-        metrics.loginFailure.add(System.currentTimeMillis() - start);
+        metrics.loginFailure.add(Time.now() - start);
       }
       }
       throw new IOException("Login failure for " + user + " from keytab " + 
       throw new IOException("Login failure for " + user + " from keytab " + 
                             path, le);
                             path, le);

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
@@ -165,7 +166,7 @@ extends AbstractDelegationTokenIdentifier>
     synchronized (this) {
     synchronized (this) {
       removeExpiredKeys();
       removeExpiredKeys();
       /* set final expiry date for retiring currentKey */
       /* set final expiry date for retiring currentKey */
-      currentKey.setExpiryDate(System.currentTimeMillis() + tokenMaxLifetime);
+      currentKey.setExpiryDate(Time.now() + tokenMaxLifetime);
       /*
       /*
        * currentKey might have been removed by removeExpiredKeys(), if
        * currentKey might have been removed by removeExpiredKeys(), if
        * updateMasterKey() isn't called at expected interval. Add it back to
        * updateMasterKey() isn't called at expected interval. Add it back to
@@ -177,7 +178,7 @@ extends AbstractDelegationTokenIdentifier>
   }
   }
 
 
   private synchronized void removeExpiredKeys() {
   private synchronized void removeExpiredKeys() {
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     for (Iterator<Map.Entry<Integer, DelegationKey>> it = allKeys.entrySet()
     for (Iterator<Map.Entry<Integer, DelegationKey>> it = allKeys.entrySet()
         .iterator(); it.hasNext();) {
         .iterator(); it.hasNext();) {
       Map.Entry<Integer, DelegationKey> e = it.next();
       Map.Entry<Integer, DelegationKey> e = it.next();
@@ -191,7 +192,7 @@ extends AbstractDelegationTokenIdentifier>
   protected synchronized byte[] createPassword(TokenIdent identifier) {
   protected synchronized byte[] createPassword(TokenIdent identifier) {
     LOG.info("Creating password for identifier: "+identifier);
     LOG.info("Creating password for identifier: "+identifier);
     int sequenceNum;
     int sequenceNum;
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     sequenceNum = ++delegationTokenSequenceNumber;
     sequenceNum = ++delegationTokenSequenceNumber;
     identifier.setIssueDate(now);
     identifier.setIssueDate(now);
     identifier.setMaxDate(now + tokenMaxLifetime);
     identifier.setMaxDate(now + tokenMaxLifetime);
@@ -211,7 +212,7 @@ extends AbstractDelegationTokenIdentifier>
       throw new InvalidToken("token (" + identifier.toString()
       throw new InvalidToken("token (" + identifier.toString()
           + ") can't be found in cache");
           + ") can't be found in cache");
     }
     }
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     if (info.getRenewDate() < now) {
     if (info.getRenewDate() < now) {
       throw new InvalidToken("token (" + identifier.toString() + ") is expired");
       throw new InvalidToken("token (" + identifier.toString() + ") is expired");
     }
     }
@@ -243,7 +244,7 @@ extends AbstractDelegationTokenIdentifier>
    */
    */
   public synchronized long renewToken(Token<TokenIdent> token,
   public synchronized long renewToken(Token<TokenIdent> token,
                          String renewer) throws InvalidToken, IOException {
                          String renewer) throws InvalidToken, IOException {
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
     ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
     DataInputStream in = new DataInputStream(buf);
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     TokenIdent id = createIdentifier();
@@ -353,7 +354,7 @@ extends AbstractDelegationTokenIdentifier>
   
   
   /** Remove expired delegation tokens from cache */
   /** Remove expired delegation tokens from cache */
   private synchronized void removeExpiredToken() {
   private synchronized void removeExpiredToken() {
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     Iterator<DelegationTokenInformation> i = currentTokens.values().iterator();
     Iterator<DelegationTokenInformation> i = currentTokens.values().iterator();
     while (i.hasNext()) {
     while (i.hasNext()) {
       long renewDate = i.next().getRenewDate();
       long renewDate = i.next().getRenewDate();
@@ -399,7 +400,7 @@ extends AbstractDelegationTokenIdentifier>
           / (60 * 1000) + " min(s)");
           / (60 * 1000) + " min(s)");
       try {
       try {
         while (running) {
         while (running) {
-          long now = System.currentTimeMillis();
+          long now = Time.now();
           if (lastMasterKeyUpdate + keyUpdateInterval < now) {
           if (lastMasterKeyUpdate + keyUpdateInterval < now) {
             try {
             try {
               rollMasterKey();
               rollMasterKey();

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java

@@ -126,12 +126,12 @@ public class AsyncDiskService {
   public synchronized boolean awaitTermination(long milliseconds) 
   public synchronized boolean awaitTermination(long milliseconds) 
       throws InterruptedException {
       throws InterruptedException {
 
 
-    long end = System.currentTimeMillis() + milliseconds;
+    long end = Time.now() + milliseconds;
     for (Map.Entry<String, ThreadPoolExecutor> e:
     for (Map.Entry<String, ThreadPoolExecutor> e:
         executors.entrySet()) {
         executors.entrySet()) {
       ThreadPoolExecutor executor = e.getValue();
       ThreadPoolExecutor executor = e.getValue();
       if (!executor.awaitTermination(
       if (!executor.awaitTermination(
-          Math.max(end - System.currentTimeMillis(), 0),
+          Math.max(end - Time.now(), 0),
           TimeUnit.MILLISECONDS)) {
           TimeUnit.MILLISECONDS)) {
         LOG.warn("AsyncDiskService awaitTermination timeout.");
         LOG.warn("AsyncDiskService awaitTermination timeout.");
         return false;
         return false;

+ 14 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java

@@ -78,26 +78,32 @@ public class DiskChecker {
   }
   }
   
   
   /**
   /**
-   * Create the directory if it doesn't exist and
+   * Create the directory if it doesn't exist and check that dir is readable,
+   * writable and executable
+   *  
    * @param dir
    * @param dir
    * @throws DiskErrorException
    * @throws DiskErrorException
    */
    */
   public static void checkDir(File dir) throws DiskErrorException {
   public static void checkDir(File dir) throws DiskErrorException {
     if (!mkdirsWithExistsCheck(dir))
     if (!mkdirsWithExistsCheck(dir))
-      throw new DiskErrorException("can not create directory: " 
+      throw new DiskErrorException("Can not create directory: "
                                    + dir.toString());
                                    + dir.toString());
-        
+
     if (!dir.isDirectory())
     if (!dir.isDirectory())
-      throw new DiskErrorException("not a directory: " 
+      throw new DiskErrorException("Not a directory: "
                                    + dir.toString());
                                    + dir.toString());
-            
+
     if (!dir.canRead())
     if (!dir.canRead())
-      throw new DiskErrorException("directory is not readable: " 
+      throw new DiskErrorException("Directory is not readable: "
                                    + dir.toString());
                                    + dir.toString());
-            
+
     if (!dir.canWrite())
     if (!dir.canWrite())
-      throw new DiskErrorException("directory is not writable: " 
+      throw new DiskErrorException("Directory is not writable: "
                                    + dir.toString());
                                    + dir.toString());
+
+    if (!dir.canExecute())
+      throw new DiskErrorException("Directory is not executable: "
+	  + dir.toString());
   }
   }
 
 
   /**
   /**

+ 95 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Facilitates hooking process termination for tests and debugging.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class ExitUtil {
+  private final static Log LOG = LogFactory.getLog(ExitUtil.class.getName());
+  private static volatile boolean systemExitDisabled = false;
+  private static volatile ExitException firstExitException;
+
+  public static class ExitException extends RuntimeException {
+    private static final long serialVersionUID = 1L;
+    public final int status;
+
+    public ExitException(int status, String msg) {
+      super(msg);
+      this.status = status;
+    }
+  }
+
+  /**
+   * Disable the use of System.exit for testing.
+   */
+  public static void disableSystemExit() {
+    systemExitDisabled = true;
+  }
+
+  /**
+   * @return true if terminate has been called
+   */
+  public static boolean terminateCalled() {
+    // Either we set this member or we actually called System#exit
+    return firstExitException != null;
+  }
+
+  /**
+   * @return the first ExitException thrown, null if none thrown yet
+   */
+  public static ExitException getFirstExitException() {
+    return firstExitException;
+  }
+
+  /**
+   * Terminate the current process. Note that terminate is the *only* method
+   * that should be used to terminate the daemon processes.
+   * @param status exit code
+   * @param msg message used to create the ExitException
+   * @throws ExitException if System.exit is disabled for test purposes
+   */
+  public static void terminate(int status, String msg) throws ExitException {
+    LOG.info("Exiting with status " + status);
+    if (systemExitDisabled) {
+      ExitException ee = new ExitException(status, msg);
+      LOG.fatal("Terminate called", ee);
+      if (null == firstExitException) {
+        firstExitException = ee;
+      }
+      throw ee;
+    }
+    System.exit(status);
+  }
+
+  /**
+   * Like {@link terminate(int, String)} without a message.
+   * @param status
+   * @throws ExitException
+   */
+  public static void terminate(int status) throws ExitException {
+    terminate(status, "ExitException");
+  }
+}

+ 10 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -268,7 +268,8 @@ public class GenericOptionsParser {
     }
     }
 
 
     if (line.hasOption("jt")) {
     if (line.hasOption("jt")) {
-      conf.set("mapred.job.tracker", line.getOptionValue("jt"));
+      conf.set("mapred.job.tracker", line.getOptionValue("jt"), 
+          "from -jt command line option");
     }
     }
     if (line.hasOption("conf")) {
     if (line.hasOption("conf")) {
       String[] values = line.getOptionValues("conf");
       String[] values = line.getOptionValues("conf");
@@ -278,7 +279,8 @@ public class GenericOptionsParser {
     }
     }
     if (line.hasOption("libjars")) {
     if (line.hasOption("libjars")) {
       conf.set("tmpjars", 
       conf.set("tmpjars", 
-               validateFiles(line.getOptionValue("libjars"), conf));
+               validateFiles(line.getOptionValue("libjars"), conf),
+               "from -libjars command line option");
       //setting libjars in client classpath
       //setting libjars in client classpath
       URL[] libjars = getLibJars(conf);
       URL[] libjars = getLibJars(conf);
       if(libjars!=null && libjars.length>0) {
       if(libjars!=null && libjars.length>0) {
@@ -290,18 +292,20 @@ public class GenericOptionsParser {
     }
     }
     if (line.hasOption("files")) {
     if (line.hasOption("files")) {
       conf.set("tmpfiles", 
       conf.set("tmpfiles", 
-               validateFiles(line.getOptionValue("files"), conf));
+               validateFiles(line.getOptionValue("files"), conf),
+               "from -files command line option");
     }
     }
     if (line.hasOption("archives")) {
     if (line.hasOption("archives")) {
       conf.set("tmparchives", 
       conf.set("tmparchives", 
-                validateFiles(line.getOptionValue("archives"), conf));
+                validateFiles(line.getOptionValue("archives"), conf),
+                "from -archives command line option");
     }
     }
     if (line.hasOption('D')) {
     if (line.hasOption('D')) {
       String[] property = line.getOptionValues('D');
       String[] property = line.getOptionValues('D');
       for(String prop : property) {
       for(String prop : property) {
         String[] keyval = prop.split("=", 2);
         String[] keyval = prop.split("=", 2);
         if (keyval.length == 2) {
         if (keyval.length == 2) {
-          conf.set(keyval[0], keyval[1]);
+          conf.set(keyval[0], keyval[1], "from command line");
         }
         }
       }
       }
     }
     }
@@ -320,7 +324,7 @@ public class GenericOptionsParser {
         LOG.debug("setting conf tokensFile: " + fileName);
         LOG.debug("setting conf tokensFile: " + fileName);
       }
       }
       conf.set("mapreduce.job.credentials.json", localFs.makeQualified(p)
       conf.set("mapreduce.job.credentials.json", localFs.makeQualified(p)
-          .toString());
+          .toString(), "from -tokenCacheFile command line option");
 
 
     }
     }
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * 
  * 
  * <p>Clients and/or applications can use the provided <code>Progressable</code>
  * <p>Clients and/or applications can use the provided <code>Progressable</code>
  * to explicitly report progress to the Hadoop framework. This is especially
  * to explicitly report progress to the Hadoop framework. This is especially
- * important for operations which take an insignificant amount of time since,
+ * important for operations which take significant amount of time since,
  * in-lieu of the reported progress, the framework has to assume that an error
  * in-lieu of the reported progress, the framework has to assume that an error
  * has occured and time-out the operation.</p>
  * has occured and time-out the operation.</p>
  */
  */

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java

@@ -205,7 +205,7 @@ public class ReflectionUtils {
     boolean dumpStack = false;
     boolean dumpStack = false;
     if (log.isInfoEnabled()) {
     if (log.isInfoEnabled()) {
       synchronized (ReflectionUtils.class) {
       synchronized (ReflectionUtils.class) {
-        long now = System.currentTimeMillis();
+        long now = Time.now();
         if (now - previousLogTime >= minInterval * 1000) {
         if (now - previousLogTime >= minInterval * 1000) {
           previousLogTime = now;
           previousLogTime = now;
           dumpStack = true;
           dumpStack = true;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -124,7 +124,7 @@ abstract public class Shell {
 
 
   /** check to see if a command needs to be executed and execute if needed */
   /** check to see if a command needs to be executed and execute if needed */
   protected void run() throws IOException {
   protected void run() throws IOException {
-    if (lastTime + interval > System.currentTimeMillis())
+    if (lastTime + interval > Time.now())
       return;
       return;
     exitCode = 0; // reset for next run
     exitCode = 0; // reset for next run
     runCommand();
     runCommand();
@@ -223,7 +223,7 @@ abstract public class Shell {
         LOG.warn("Error while closing the error stream", ioe);
         LOG.warn("Error while closing the error stream", ioe);
       }
       }
       process.destroy();
       process.destroy();
-      lastTime = System.currentTimeMillis();
+      lastTime = Time.now();
     }
     }
   }
   }
 
 

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -202,8 +202,12 @@ public class StringUtils {
   }
   }
   
   
   /**
   /**
-   * 
    * @param str
    * @param str
+   *          The string array to be parsed into an URI array.
+   * @return <tt>null</tt> if str is <tt>null</tt>, else the URI array
+   *         equivalent to str.
+   * @throws IllegalArgumentException
+   *           If any string in str violates RFC&nbsp;2396.
    */
    */
   public static URI[] stringToURI(String[] str){
   public static URI[] stringToURI(String[] str){
     if (str == null) 
     if (str == null) 
@@ -213,9 +217,8 @@ public class StringUtils {
       try{
       try{
         uris[i] = new URI(str[i]);
         uris[i] = new URI(str[i]);
       }catch(URISyntaxException ur){
       }catch(URISyntaxException ur){
-        System.out.println("Exception in specified URI's " + StringUtils.stringifyException(ur));
-        //making sure its asssigned to null in case of an error
-        uris[i] = null;
+        throw new IllegalArgumentException(
+            "Failed to create uri for " + str[i], ur);
       }
       }
     }
     }
     return uris;
     return uris;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java

@@ -35,10 +35,10 @@ public class ThreadUtil {
    * @param millis the number of milliseconds for the current thread to sleep
    * @param millis the number of milliseconds for the current thread to sleep
    */
    */
   public static void sleepAtLeastIgnoreInterrupts(long millis) {
   public static void sleepAtLeastIgnoreInterrupts(long millis) {
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start < millis) {
+    long start = Time.now();
+    while (Time.now() - start < millis) {
       long timeToSleep = millis -
       long timeToSleep = millis -
-          (System.currentTimeMillis() - start);
+          (Time.now() - start);
       try {
       try {
         Thread.sleep(timeToSleep);
         Thread.sleep(timeToSleep);
       } catch (InterruptedException ie) {
       } catch (InterruptedException ie) {

+ 52 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Utility methods for getting the time and computing intervals.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class Time {
+
+  /**
+   * Current system time.  Do not use this to calculate a duration or interval
+   * to sleep, because it will be broken by settimeofday.  Instead, use
+   * monotonicNow.
+   * @return current time in msec.
+   */
+  public static long now() {
+    return System.currentTimeMillis();
+  }
+  
+  /**
+   * Current time from some arbitrary time base in the past, counting in
+   * milliseconds, and not affected by settimeofday or similar system clock
+   * changes.  This is appropriate to use when computing how much longer to
+   * wait for an interval to expire.
+   * @return a monotonic clock that counts in milliseconds.
+   */
+  public static long monotonicNow() {
+    final long NANOSECONDS_PER_MILLISECOND = 1000000;
+
+    return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties

@@ -106,7 +106,7 @@ hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
 log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
 log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java

@@ -64,7 +64,7 @@ public class TestConfServlet extends TestCase {
       String resource = (String)propertyInfo.get("resource");
       String resource = (String)propertyInfo.get("resource");
       System.err.println("k: " + key + " v: " + val + " r: " + resource);
       System.err.println("k: " + key + " v: " + val + " r: " + resource);
       if (TEST_KEY.equals(key) && TEST_VAL.equals(val)
       if (TEST_KEY.equals(key) && TEST_VAL.equals(val)
-          && Configuration.UNKNOWN_RESOURCE.equals(resource)) {
+          && "programatically".equals(resource)) {
         foundSetting = true;
         foundSetting = true;
       }
       }
     }
     }

+ 73 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -18,10 +18,12 @@
 package org.apache.hadoop.conf;
 package org.apache.hadoop.conf;
 
 
 import java.io.BufferedWriter;
 import java.io.BufferedWriter;
+import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.StringWriter;
 import java.io.StringWriter;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -77,6 +79,22 @@ public class TestConfiguration extends TestCase {
   private void addInclude(String filename) throws IOException{
   private void addInclude(String filename) throws IOException{
     out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\"  />\n ");
     out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\"  />\n ");
   }
   }
+  
+  public void testInputStreamResource() throws Exception {
+    StringWriter writer = new StringWriter();
+    out = new BufferedWriter(writer);
+    startConfig();
+    declareProperty("prop", "A", "A");
+    endConfig();
+    
+    InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes());
+    Configuration conf = new Configuration(false);
+    conf.addResource(in1);
+    assertEquals("A", conf.get("prop"));
+    InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes());
+    conf.addResource(in2);
+    assertEquals("A", conf.get("prop"));
+  }
 
 
   public void testVariableSubstitution() throws IOException {
   public void testVariableSubstitution() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
@@ -168,7 +186,8 @@ public class TestConfiguration extends TestCase {
     appendProperty(name, val, false);
     appendProperty(name, val, false);
   }
   }
  
  
-  void appendProperty(String name, String val, boolean isFinal)
+  void appendProperty(String name, String val, boolean isFinal, 
+      String ... sources)
     throws IOException {
     throws IOException {
     out.write("<property>");
     out.write("<property>");
     out.write("<name>");
     out.write("<name>");
@@ -180,6 +199,11 @@ public class TestConfiguration extends TestCase {
     if (isFinal) {
     if (isFinal) {
       out.write("<final>true</final>");
       out.write("<final>true</final>");
     }
     }
+    for(String s : sources) {
+      out.write("<source>");
+      out.write(s);
+      out.write("</source>");
+    }
     out.write("</property>\n");
     out.write("</property>\n");
   }
   }
   
   
@@ -648,16 +672,38 @@ public class TestConfiguration extends TestCase {
     Path fileResource = new Path(CONFIG);
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
     conf.addResource(fileResource);
     conf.set("fs.defaultFS", "value");
     conf.set("fs.defaultFS", "value");
+    String [] sources = conf.getPropertySources("test.foo");
+    assertEquals(1, sources.length);
     assertEquals(
     assertEquals(
         "Resource string returned for a file-loaded property" +
         "Resource string returned for a file-loaded property" +
         " must be a proper absolute path",
         " must be a proper absolute path",
         fileResource,
         fileResource,
-        new Path(conf.getPropertySource("test.foo")));
-    assertEquals("Resource string returned for a set() property must be null",
-        null,
-        conf.getPropertySource("fs.defaultFS"));
+        new Path(sources[0]));
+    assertArrayEquals("Resource string returned for a set() property must be " +
+    		"\"programatically\"",
+        new String[]{"programatically"},
+        conf.getPropertySources("fs.defaultFS"));
     assertEquals("Resource string returned for an unset property must be null",
     assertEquals("Resource string returned for an unset property must be null",
-        null, conf.getPropertySource("fs.defaultFoo"));
+        null, conf.getPropertySources("fs.defaultFoo"));
+  }
+  
+  public void testMultiplePropertySource() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.foo", "bar", false, "a", "b", "c");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    String [] sources = conf.getPropertySources("test.foo");
+    assertEquals(4, sources.length);
+    assertEquals("a", sources[0]);
+    assertEquals("b", sources[1]);
+    assertEquals("c", sources[2]);
+    assertEquals(
+        "Resource string returned for a file-loaded property" +
+        " must be a proper absolute path",
+        fileResource,
+        new Path(sources[3]));
   }
   }
 
 
   public void testSocketAddress() throws IOException {
   public void testSocketAddress() throws IOException {
@@ -906,7 +952,7 @@ public class TestConfiguration extends TestCase {
       confDump.put(prop.getKey(), prop);
       confDump.put(prop.getKey(), prop);
     }
     }
     assertEquals("value5",confDump.get("test.key6").getValue());
     assertEquals("value5",confDump.get("test.key6").getValue());
-    assertEquals("Unknown", confDump.get("test.key4").getResource());
+    assertEquals("programatically", confDump.get("test.key4").getResource());
     outWriter.close();
     outWriter.close();
   }
   }
   
   
@@ -975,6 +1021,26 @@ public class TestConfiguration extends TestCase {
     assertTrue("Picked out wrong key " + key3, !res.containsKey(key3));
     assertTrue("Picked out wrong key " + key3, !res.containsKey(key3));
     assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
     assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
   }
   }
+  
+  public void testSettingValueNull() throws Exception {
+    Configuration config = new Configuration();
+    try {
+      config.set("testClassName", null);
+      fail("Should throw an IllegalArgumentException exception ");
+    } catch (Exception e) {
+      assertTrue(e instanceof IllegalArgumentException);
+    }
+  }
+
+  public void testSettingKeyNull() throws Exception {
+    Configuration config = new Configuration();
+    try {
+      config.set(null, "test");
+      fail("Should throw an IllegalArgumentException exception ");
+    } catch (Exception e) {
+      assertTrue(e instanceof IllegalArgumentException);
+    }
+  }
 
 
   public static void main(String[] argv) throws Exception {
   public static void main(String[] argv) throws Exception {
     junit.textui.TestRunner.main(new String[]{
     junit.textui.TestRunner.main(new String[]{

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.conf;
 package org.apache.hadoop.conf;
 
 
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.Before;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
@@ -295,8 +296,8 @@ public class TestReconfiguration {
     }
     }
     dummy.reconfigureProperty(PROP1, VAL2);
     dummy.reconfigureProperty(PROP1, VAL2);
 
 
-    long endWait = System.currentTimeMillis() + 2000;
-    while (dummyThread.isAlive() && System.currentTimeMillis() < endWait) {
+    long endWait = Time.now() + 2000;
+    while (dummyThread.isAlive() && Time.now() < endWait) {
       try {
       try {
         Thread.sleep(50);
         Thread.sleep(50);
       } catch (InterruptedException ignore) {
       } catch (InterruptedException ignore) {

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java

@@ -32,6 +32,7 @@ import java.util.Set;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * This class tests commands from Trash.
  * This class tests commands from Trash.
@@ -600,7 +601,7 @@ public class TestTrash extends TestCase {
       
       
       writeFile(fs, myFile, 10);
       writeFile(fs, myFile, 10);
       
       
-      start = System.currentTimeMillis();
+      start = Time.now();
       
       
       try {
       try {
         retVal = shell.run(args);
         retVal = shell.run(args);
@@ -612,7 +613,7 @@ public class TestTrash extends TestCase {
       
       
       assertTrue(retVal == 0);
       assertTrue(retVal == 0);
       
       
-      long iterTime = System.currentTimeMillis() - start;
+      long iterTime = Time.now() - start;
       // take median of the first 10 runs
       // take median of the first 10 runs
       if(i<10) {
       if(i<10) {
         if(i==0) {
         if(i==0) {

+ 14 - 13
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
@@ -121,7 +122,7 @@ public class LoadGenerator extends Configured implements Tool {
   private double [] writeProbs = {0.3333};
   private double [] writeProbs = {0.3333};
   private volatile int currentIndex = 0;
   private volatile int currentIndex = 0;
   long totalTime = 0;
   long totalTime = 0;
-  private long startTime = System.currentTimeMillis()+10000;
+  private long startTime = Time.now()+10000;
   final static private int BLOCK_SIZE = 10;
   final static private int BLOCK_SIZE = 10;
   private ArrayList<String> files = new ArrayList<String>();  // a table of file names
   private ArrayList<String> files = new ArrayList<String>();  // a table of file names
   private ArrayList<String> dirs = new ArrayList<String>(); // a table of directory names
   private ArrayList<String> dirs = new ArrayList<String>(); // a table of directory names
@@ -232,9 +233,9 @@ public class LoadGenerator extends Configured implements Tool {
      * the entire file */
      * the entire file */
     private void read() throws IOException {
     private void read() throws IOException {
       String fileName = files.get(r.nextInt(files.size()));
       String fileName = files.get(r.nextInt(files.size()));
-      long startTime = System.currentTimeMillis();
+      long startTime = Time.now();
       InputStream in = fc.open(new Path(fileName));
       InputStream in = fc.open(new Path(fileName));
-      executionTime[OPEN] += (System.currentTimeMillis()-startTime);
+      executionTime[OPEN] += (Time.now()-startTime);
       totalNumOfOps[OPEN]++;
       totalNumOfOps[OPEN]++;
       while (in.read(buffer) != -1) {}
       while (in.read(buffer) != -1) {}
       in.close();
       in.close();
@@ -254,9 +255,9 @@ public class LoadGenerator extends Configured implements Tool {
       double fileSize = 0;
       double fileSize = 0;
       while ((fileSize = r.nextGaussian()+2)<=0) {}
       while ((fileSize = r.nextGaussian()+2)<=0) {}
       genFile(file, (long)(fileSize*BLOCK_SIZE));
       genFile(file, (long)(fileSize*BLOCK_SIZE));
-      long startTime = System.currentTimeMillis();
+      long startTime = Time.now();
       fc.delete(file, true);
       fc.delete(file, true);
-      executionTime[DELETE] += (System.currentTimeMillis()-startTime);
+      executionTime[DELETE] += (Time.now()-startTime);
       totalNumOfOps[DELETE]++;
       totalNumOfOps[DELETE]++;
     }
     }
     
     
@@ -265,9 +266,9 @@ public class LoadGenerator extends Configured implements Tool {
      */
      */
     private void list() throws IOException {
     private void list() throws IOException {
       String dirName = dirs.get(r.nextInt(dirs.size()));
       String dirName = dirs.get(r.nextInt(dirs.size()));
-      long startTime = System.currentTimeMillis();
+      long startTime = Time.now();
       fc.listStatus(new Path(dirName));
       fc.listStatus(new Path(dirName));
-      executionTime[LIST] += (System.currentTimeMillis()-startTime);
+      executionTime[LIST] += (Time.now()-startTime);
       totalNumOfOps[LIST]++;
       totalNumOfOps[LIST]++;
     }
     }
   }
   }
@@ -435,7 +436,7 @@ public class LoadGenerator extends Configured implements Tool {
     }
     }
     
     
     if (r==null) {
     if (r==null) {
-      r = new Random(System.currentTimeMillis()+hostHashCode);
+      r = new Random(Time.now()+hostHashCode);
     }
     }
     
     
     return initFileDirTables();
     return initFileDirTables();
@@ -571,7 +572,7 @@ public class LoadGenerator extends Configured implements Tool {
    */
    */
   private void barrier() {
   private void barrier() {
     long sleepTime;
     long sleepTime;
-    while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
+    while ((sleepTime = startTime - Time.now()) > 0) {
       try {
       try {
         Thread.sleep(sleepTime);
         Thread.sleep(sleepTime);
       } catch (InterruptedException ex) {
       } catch (InterruptedException ex) {
@@ -583,20 +584,20 @@ public class LoadGenerator extends Configured implements Tool {
    * The file is filled with 'a'.
    * The file is filled with 'a'.
    */
    */
   private void genFile(Path file, long fileSize) throws IOException {
   private void genFile(Path file, long fileSize) throws IOException {
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     FSDataOutputStream out = fc.create(file,
     FSDataOutputStream out = fc.create(file,
         EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
         EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
         CreateOpts.createParent(), CreateOpts.bufferSize(4096),
         CreateOpts.createParent(), CreateOpts.bufferSize(4096),
         CreateOpts.repFac((short) 3));
         CreateOpts.repFac((short) 3));
-    executionTime[CREATE] += (System.currentTimeMillis()-startTime);
+    executionTime[CREATE] += (Time.now()-startTime);
     totalNumOfOps[CREATE]++;
     totalNumOfOps[CREATE]++;
 
 
     for (long i=0; i<fileSize; i++) {
     for (long i=0; i<fileSize; i++) {
       out.writeByte('a');
       out.writeByte('a');
     }
     }
-    startTime = System.currentTimeMillis();
+    startTime = Time.now();
     out.close();
     out.close();
-    executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime);
+    executionTime[WRITE_CLOSE] += (Time.now()-startTime);
     totalNumOfOps[WRITE_CLOSE]++;
     totalNumOfOps[WRITE_CLOSE]++;
   }
   }
   
   

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java

@@ -39,6 +39,7 @@ import java.util.TreeSet;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * <p>
  * <p>
@@ -59,7 +60,7 @@ class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
   }
   }
 
 
   public void storeEmptyFile(String key) throws IOException {
   public void storeEmptyFile(String key) throws IOException {
-    metadataMap.put(key, new FileMetadata(key, 0, System.currentTimeMillis()));
+    metadataMap.put(key, new FileMetadata(key, 0, Time.now()));
     dataMap.put(key, new byte[0]);
     dataMap.put(key, new byte[0]);
   }
   }
 
 
@@ -81,7 +82,7 @@ class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
       }
       }
     }
     }
     metadataMap.put(key,
     metadataMap.put(key,
-        new FileMetadata(key, file.length(), System.currentTimeMillis()));
+        new FileMetadata(key, file.length(), Time.now()));
     dataMap.put(key, out.toByteArray());
     dataMap.put(key, out.toByteArray());
   }
   }
 
 

+ 6 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.server.ZooKeeperServer;
 import org.apache.zookeeper.server.ZooKeeperServer;
@@ -36,7 +37,7 @@ public abstract class ActiveStandbyElectorTestUtil {
   public static void waitForActiveLockData(TestContext ctx,
   public static void waitForActiveLockData(TestContext ctx,
       ZooKeeperServer zks, String parentDir, byte[] activeData)
       ZooKeeperServer zks, String parentDir, byte[] activeData)
       throws Exception {
       throws Exception {
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     long lastPrint = st;
     long lastPrint = st;
     while (true) {
     while (true) {
       if (ctx != null) {
       if (ctx != null) {
@@ -51,17 +52,17 @@ public abstract class ActiveStandbyElectorTestUtil {
             Arrays.equals(activeData, data)) {
             Arrays.equals(activeData, data)) {
           return;
           return;
         }
         }
-        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+        if (Time.now() > lastPrint + LOG_INTERVAL_MS) {
           LOG.info("Cur data: " + StringUtils.byteToHexString(data));
           LOG.info("Cur data: " + StringUtils.byteToHexString(data));
-          lastPrint = System.currentTimeMillis();
+          lastPrint = Time.now();
         }
         }
       } catch (NoNodeException nne) {
       } catch (NoNodeException nne) {
         if (activeData == null) {
         if (activeData == null) {
           return;
           return;
         }
         }
-        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+        if (Time.now() > lastPrint + LOG_INTERVAL_MS) {
           LOG.info("Cur data: no node");
           LOG.info("Cur data: no node");
-          lastPrint = System.currentTimeMillis();
+          lastPrint = Time.now();
         }
         }
       }
       }
       Thread.sleep(50);
       Thread.sleep(50);

+ 9 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java

@@ -32,6 +32,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
+import org.apache.hadoop.util.Time;
 import org.apache.zookeeper.PortAssignment;
 import org.apache.zookeeper.PortAssignment;
 import org.apache.zookeeper.TestableZooKeeper;
 import org.apache.zookeeper.TestableZooKeeper;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.WatchedEvent;
@@ -111,11 +112,11 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
             return connected;
             return connected;
         }
         }
         synchronized void waitForConnected(long timeout) throws InterruptedException, TimeoutException {
         synchronized void waitForConnected(long timeout) throws InterruptedException, TimeoutException {
-            long expire = System.currentTimeMillis() + timeout;
+            long expire = Time.now() + timeout;
             long left = timeout;
             long left = timeout;
             while(!connected && left > 0) {
             while(!connected && left > 0) {
                 wait(left);
                 wait(left);
-                left = expire - System.currentTimeMillis();
+                left = expire - Time.now();
             }
             }
             if (!connected) {
             if (!connected) {
                 throw new TimeoutException("Did not connect");
                 throw new TimeoutException("Did not connect");
@@ -123,11 +124,11 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
             }
             }
         }
         }
         synchronized void waitForDisconnected(long timeout) throws InterruptedException, TimeoutException {
         synchronized void waitForDisconnected(long timeout) throws InterruptedException, TimeoutException {
-            long expire = System.currentTimeMillis() + timeout;
+            long expire = Time.now() + timeout;
             long left = timeout;
             long left = timeout;
             while(connected && left > 0) {
             while(connected && left > 0) {
                 wait(left);
                 wait(left);
-                left = expire - System.currentTimeMillis();
+                left = expire - Time.now();
             }
             }
             if (connected) {
             if (connected) {
                 throw new TimeoutException("Did not disconnect");
                 throw new TimeoutException("Did not disconnect");
@@ -248,7 +249,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
     }
     }
 
 
     public static boolean waitForServerUp(String hp, long timeout) {
     public static boolean waitForServerUp(String hp, long timeout) {
-        long start = System.currentTimeMillis();
+        long start = Time.now();
         while (true) {
         while (true) {
             try {
             try {
                 // if there are multiple hostports, just take the first one
                 // if there are multiple hostports, just take the first one
@@ -263,7 +264,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
                 LOG.info("server " + hp + " not up " + e);
                 LOG.info("server " + hp + " not up " + e);
             }
             }
 
 
-            if (System.currentTimeMillis() > start + timeout) {
+            if (Time.now() > start + timeout) {
                 break;
                 break;
             }
             }
             try {
             try {
@@ -275,7 +276,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
         return false;
         return false;
     }
     }
     public static boolean waitForServerDown(String hp, long timeout) {
     public static boolean waitForServerDown(String hp, long timeout) {
-        long start = System.currentTimeMillis();
+        long start = Time.now();
         while (true) {
         while (true) {
             try {
             try {
                 HostPort hpobj = parseHostPortList(hp).get(0);
                 HostPort hpobj = parseHostPortList(hp).get(0);
@@ -284,7 +285,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
                 return true;
                 return true;
             }
             }
 
 
-            if (System.currentTimeMillis() > start + timeout) {
+            if (Time.now() > start + timeout) {
                 break;
                 break;
             }
             }
             try {
             try {

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HealthMonitor.Callback;
 import org.apache.hadoop.ha.HealthMonitor.Callback;
 import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.HealthMonitor.State;
+import org.apache.hadoop.util.Time;
 
 
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -136,8 +137,8 @@ public class TestHealthMonitor {
 
 
   private void waitForState(HealthMonitor hm, State state)
   private void waitForState(HealthMonitor hm, State state)
       throws InterruptedException {
       throws InterruptedException {
-    long st = System.currentTimeMillis();
-    while (System.currentTimeMillis() - st < 2000) {
+    long st = Time.now();
+    while (Time.now() - st < 2000) {
       if (hm.getHealthState() == state) {
       if (hm.getHealthState() == state) {
         return;
         return;
       }
       }

+ 4 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
 import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.ZooKeeper;
@@ -394,9 +395,9 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
       // Ask it to cede active for 3 seconds. It should respond promptly
       // Ask it to cede active for 3 seconds. It should respond promptly
       // (i.e. the RPC itself should not take 3 seconds!)
       // (i.e. the RPC itself should not take 3 seconds!)
       ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
       ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
-      long st = System.currentTimeMillis();
+      long st = Time.now();
       proxy.cedeActive(3000);
       proxy.cedeActive(3000);
-      long et = System.currentTimeMillis();
+      long et = Time.now();
       assertTrue("RPC to cedeActive took " + (et - st) + " ms",
       assertTrue("RPC to cedeActive took " + (et - st) + " ms",
           et - st < 1000);
           et - st < 1000);
       
       
@@ -408,7 +409,7 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
       // After the prescribed 3 seconds, should go into STANDBY state,
       // After the prescribed 3 seconds, should go into STANDBY state,
       // since the other node in the cluster would have taken ACTIVE.
       // since the other node in the cluster would have taken ACTIVE.
       cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
       cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
-      long et2 = System.currentTimeMillis();
+      long et2 = Time.now();
       assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
       assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
           "ms before rejoining.",
           "ms before rejoining.",
           et2 - et > 2800);      
           et2 - et > 2800);      

+ 7 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java

@@ -21,6 +21,7 @@ import java.util.Random;
 
 
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -61,11 +62,11 @@ public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
   @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
   @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
   public void testExpireBackAndForth() throws Exception {
   public void testExpireBackAndForth() throws Exception {
     cluster.start();
     cluster.start();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     long runFor = STRESS_RUNTIME_SECS * 1000;
     long runFor = STRESS_RUNTIME_SECS * 1000;
 
 
     int i = 0;
     int i = 0;
-    while (System.currentTimeMillis() - st < runFor) {
+    while (Time.now() - st < runFor) {
       // flip flop the services back and forth
       // flip flop the services back and forth
       int from = i % 2;
       int from = i % 2;
       int to = (i + 1) % 2;
       int to = (i + 1) % 2;
@@ -87,11 +88,11 @@ public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
   @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
   @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
   public void testRandomExpirations() throws Exception {
   public void testRandomExpirations() throws Exception {
     cluster.start();
     cluster.start();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     long runFor = STRESS_RUNTIME_SECS * 1000;
     long runFor = STRESS_RUNTIME_SECS * 1000;
 
 
     Random r = new Random();
     Random r = new Random();
-    while (System.currentTimeMillis() - st < runFor) {
+    while (Time.now() - st < runFor) {
       cluster.getTestContext().checkException();
       cluster.getTestContext().checkException();
       int targetIdx = r.nextInt(2);
       int targetIdx = r.nextInt(2);
       ActiveStandbyElector target = cluster.getElector(targetIdx);
       ActiveStandbyElector target = cluster.getElector(targetIdx);
@@ -125,8 +126,8 @@ public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
     // setting up the mock.
     // setting up the mock.
     cluster.start();
     cluster.start();
     
     
-    long st = System.currentTimeMillis();
-    while (System.currentTimeMillis() - st < runFor) {
+    long st = Time.now();
+    while (Time.now() - st < runFor) {
       cluster.getTestContext().checkException();
       cluster.getTestContext().checkException();
       serverFactory.closeAll();
       serverFactory.closeAll();
       Thread.sleep(50);
       Thread.sleep(50);

+ 23 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java

@@ -29,6 +29,7 @@ import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
 
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
@@ -152,4 +153,26 @@ public class TestIOUtils {
       }
       }
     }
     }
   }
   }
+
+  @Test
+  public void testWrappedReadForCompressedData() throws IOException {
+    byte[] buf = new byte[2];
+    InputStream mockStream = Mockito.mock(InputStream.class);
+    Mockito.when(mockStream.read(buf, 0, 1)).thenReturn(1);
+    Mockito.when(mockStream.read(buf, 0, 2)).thenThrow(
+        new java.lang.InternalError());
+
+    try {
+      assertEquals("Check expected value", 1,
+          IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 1));
+    } catch (IOException ioe) {
+      fail("Unexpected error while reading");
+    }
+    try {
+      IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 2);
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Error while reading compressed data", ioe);
+    }
+  }
 }
 }

+ 55 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.Metadata;
 import org.apache.hadoop.io.SequenceFile.Metadata;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -557,6 +558,60 @@ public class TestSequenceFile extends TestCase {
     // should succeed, fails if exception thrown
     // should succeed, fails if exception thrown
   }
   }
 
 
+  public void testSerializationAvailability() throws IOException {
+    Configuration conf = new Configuration();
+    Path path = new Path(System.getProperty("test.build.data", "."),
+        "serializationAvailability");
+    // Check if any serializers aren't found.
+    try {
+      SequenceFile.createWriter(
+          conf,
+          SequenceFile.Writer.file(path),
+          SequenceFile.Writer.keyClass(String.class),
+          SequenceFile.Writer.valueClass(NullWritable.class));
+      // Note: This may also fail someday if JavaSerialization
+      // is activated by default.
+      fail("Must throw IOException for missing serializer for the Key class");
+    } catch (IOException e) {
+      assertTrue(e.getMessage().startsWith(
+        "Could not find a serializer for the Key class: '" +
+            String.class.getName() + "'."));
+    }
+    try {
+      SequenceFile.createWriter(
+          conf,
+          SequenceFile.Writer.file(path),
+          SequenceFile.Writer.keyClass(NullWritable.class),
+          SequenceFile.Writer.valueClass(String.class));
+      // Note: This may also fail someday if JavaSerialization
+      // is activated by default.
+      fail("Must throw IOException for missing serializer for the Value class");
+    } catch (IOException e) {
+      assertTrue(e.getMessage().startsWith(
+        "Could not find a serializer for the Value class: '" +
+            String.class.getName() + "'."));
+    }
+
+    // Write a simple file to test deserialization failures with
+    writeTest(FileSystem.get(conf), 1, 1, path, CompressionType.NONE, null);
+
+    // Remove Writable serializations, to enforce error.
+    conf.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
+        AvroReflectSerialization.class.getName());
+
+    // Now check if any deserializers aren't found.
+    try {
+      new SequenceFile.Reader(
+          conf,
+          SequenceFile.Reader.file(path));
+      fail("Must throw IOException for missing deserializer for the Key class");
+    } catch (IOException e) {
+      assertTrue(e.getMessage().startsWith(
+        "Could not find a deserializer for the Key class: '" +
+            RandomDatum.class.getName() + "'."));
+    }
+  }
+
   /** For debugging and testing. */
   /** For debugging and testing. */
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
     int count = 1024 * 1024;
     int count = 1024 * 1024;

+ 46 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.RandomDatum;
 import org.apache.hadoop.io.RandomDatum;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
@@ -68,6 +69,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 
 
 import org.junit.Assert;
 import org.junit.Assert;
+import org.junit.Assume;
 import org.junit.Test;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
@@ -514,6 +516,50 @@ public class TestCodec {
     LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\"");
     LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\"");
   }
   }
   
   
+  /**
+   * Regression test for HADOOP-8423: seeking in a block-compressed
+   * stream would not properly reset the block decompressor state.
+   */
+  @Test
+  public void testSnappyMapFile() throws Exception {
+    Assume.assumeTrue(SnappyCodec.isNativeCodeLoaded());
+    codecTestMapFile(SnappyCodec.class, CompressionType.BLOCK, 100);
+  }
+  
+  private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
+      CompressionType type, int records) throws Exception {
+    
+    FileSystem fs = FileSystem.get(conf);
+    LOG.info("Creating MapFiles with " + records  + 
+            " records using codec " + clazz.getSimpleName());
+    Path path = new Path(new Path(
+        System.getProperty("test.build.data", "/tmp")),
+      clazz.getSimpleName() + "-" + type + "-" + records);
+
+    LOG.info("Writing " + path);
+    createMapFile(conf, fs, path, clazz.newInstance(), type, records);
+    MapFile.Reader reader = new MapFile.Reader(path, conf);
+    Text key1 = new Text("002");
+    assertNotNull(reader.get(key1, new Text()));
+    Text key2 = new Text("004");
+    assertNotNull(reader.get(key2, new Text()));
+  }
+  
+  private static void createMapFile(Configuration conf, FileSystem fs, Path path, 
+      CompressionCodec codec, CompressionType type, int records) throws IOException {
+    MapFile.Writer writer = 
+        new MapFile.Writer(conf, path,
+            MapFile.Writer.keyClass(Text.class),
+            MapFile.Writer.valueClass(Text.class),
+            MapFile.Writer.compression(type, codec));
+    Text key = new Text();
+    for (int j = 0; j < records; j++) {
+        key.set(String.format("%03d", j));
+        writer.append(key, key);
+    }
+    writer.close();
+  }
+
   public static void main(String[] args) throws IOException {
   public static void main(String[] args) throws IOException {
     int count = 10000;
     int count = 10000;
     String codecClass = "org.apache.hadoop.io.compress.DefaultCodec";
     String codecClass = "org.apache.hadoop.io.compress.DefaultCodec";

+ 4 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry;
 import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry;
+import org.apache.hadoop.util.Time;
 
 
 public class TestTFileSeqFileComparison extends TestCase {
 public class TestTFileSeqFileComparison extends TestCase {
   MyOptions options;
   MyOptions options;
@@ -86,12 +87,12 @@ public class TestTFileSeqFileComparison extends TestCase {
   }
   }
 
 
   public void startTime() throws IOException {
   public void startTime() throws IOException {
-    startTimeEpoch = System.currentTimeMillis();
+    startTimeEpoch = Time.now();
     System.out.println(formatTime() + " Started timing.");
     System.out.println(formatTime() + " Started timing.");
   }
   }
 
 
   public void stopTime() throws IOException {
   public void stopTime() throws IOException {
-    finishTimeEpoch = System.currentTimeMillis();
+    finishTimeEpoch = Time.now();
     System.out.println(formatTime() + " Stopped timing.");
     System.out.println(formatTime() + " Stopped timing.");
   }
   }
 
 
@@ -111,7 +112,7 @@ public class TestTFileSeqFileComparison extends TestCase {
   }
   }
 
 
   public String formatTime() {
   public String formatTime() {
-    return formatTime(System.currentTimeMillis());
+    return formatTime(Time.now());
   }
   }
 
 
   private interface KVAppendable {
   private interface KVAppendable {

+ 5 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/Timer.java

@@ -20,6 +20,8 @@ import java.io.IOException;
 import java.text.DateFormat;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 
 
+import org.apache.hadoop.util.Time;
+
 /**
 /**
  * this class is a time class to 
  * this class is a time class to 
  * measure to measure the time 
  * measure to measure the time 
@@ -31,11 +33,11 @@ public  class Timer {
   private DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
   private DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
   
   
   public void startTime() throws IOException {
   public void startTime() throws IOException {
-      startTimeEpoch = System.currentTimeMillis();
+      startTimeEpoch = Time.now();
     }
     }
 
 
     public void stopTime() throws IOException {
     public void stopTime() throws IOException {
-      finishTimeEpoch = System.currentTimeMillis();
+      finishTimeEpoch = Time.now();
     }
     }
 
 
     public long getIntervalMillis() throws IOException {
     public long getIntervalMillis() throws IOException {
@@ -56,7 +58,7 @@ public  class Timer {
     }
     }
     
     
     public String formatCurrentTime() {
     public String formatCurrentTime() {
-      return formatTime(System.currentTimeMillis());
+      return formatTime(Time.now());
     }
     }
 
 
 }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.Time;
 
 
 public class TestNativeIO {
 public class TestNativeIO {
   static final Log LOG = LogFactory.getLog(TestNativeIO.class);
   static final Log LOG = LogFactory.getLog(TestNativeIO.class);
@@ -88,8 +89,8 @@ public class TestNativeIO {
     for (int i = 0; i < 10; i++) {
     for (int i = 0; i < 10; i++) {
       Thread statter = new Thread() {
       Thread statter = new Thread() {
         public void run() {
         public void run() {
-          long et = System.currentTimeMillis() + 5000;
-          while (System.currentTimeMillis() < et) {
+          long et = Time.now() + 5000;
+          while (Time.now() < et) {
             try {
             try {
               NativeIO.Stat stat = NativeIO.fstat(fos.getFD());
               NativeIO.Stat stat = NativeIO.fstat(fos.getFD());
               assertEquals(System.getProperty("user.name"), stat.getOwner());
               assertEquals(System.getProperty("user.name"), stat.getOwner());

+ 5 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.LogManager;
 
 
@@ -186,10 +187,10 @@ public class MiniRPCBenchmark {
   throws IOException {
   throws IOException {
     MiniProtocol client = null;
     MiniProtocol client = null;
     try {
     try {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       client = (MiniProtocol) RPC.getProxy(MiniProtocol.class,
       client = (MiniProtocol) RPC.getProxy(MiniProtocol.class,
           MiniProtocol.versionID, addr, conf);
           MiniProtocol.versionID, addr, conf);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end - start;
       return end - start;
     } finally {
     } finally {
       RPC.stopProxy(client);
       RPC.stopProxy(client);
@@ -231,7 +232,7 @@ public class MiniRPCBenchmark {
       final Configuration conf, final InetSocketAddress addr) throws IOException {
       final Configuration conf, final InetSocketAddress addr) throws IOException {
     MiniProtocol client = null;
     MiniProtocol client = null;
     try {
     try {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       try {
       try {
         client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
         client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
           public MiniProtocol run() throws IOException {
           public MiniProtocol run() throws IOException {
@@ -242,7 +243,7 @@ public class MiniRPCBenchmark {
       } catch (InterruptedException e) {
       } catch (InterruptedException e) {
         e.printStackTrace();
         e.printStackTrace();
       }
       }
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end - start;
       return end - start;
     } finally {
     } finally {
       RPC.stopProxy(client);
       RPC.stopProxy(client);

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.log;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Category;
 import org.apache.log4j.Category;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
@@ -63,7 +64,7 @@ public class TestLog4Json extends TestCase {
         new NoRouteToHostException("that box caught fire 3 years ago");
         new NoRouteToHostException("that box caught fire 3 years ago");
     ThrowableInformation ti = new ThrowableInformation(e);
     ThrowableInformation ti = new ThrowableInformation(e);
     Log4Json l4j = new Log4Json();
     Log4Json l4j = new Log4Json();
-    long timeStamp = System.currentTimeMillis();
+    long timeStamp = Time.now();
     String outcome = l4j.toJson(new StringWriter(),
     String outcome = l4j.toJson(new StringWriter(),
         "testException",
         "testException",
         timeStamp,
         timeStamp,
@@ -82,7 +83,7 @@ public class TestLog4Json extends TestCase {
     Exception ioe = new IOException("Datacenter problems", e);
     Exception ioe = new IOException("Datacenter problems", e);
     ThrowableInformation ti = new ThrowableInformation(ioe);
     ThrowableInformation ti = new ThrowableInformation(ioe);
     Log4Json l4j = new Log4Json();
     Log4Json l4j = new Log4Json();
-    long timeStamp = System.currentTimeMillis();
+    long timeStamp = Time.now();
     String outcome = l4j.toJson(new StringWriter(),
     String outcome = l4j.toJson(new StringWriter(),
         "testNestedException",
         "testNestedException",
         timeStamp,
         timeStamp,

+ 135 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java

@@ -18,13 +18,24 @@
 
 
 package org.apache.hadoop.metrics2.lib;
 package org.apache.hadoop.metrics2.lib;
 
 
-import org.junit.Test;
-import static org.mockito.Mockito.*;
-import static org.mockito.AdditionalMatchers.*;
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.mockMetricsRecordBuilder;
+import static org.mockito.AdditionalMatchers.eq;
+import static org.mockito.AdditionalMatchers.geq;
+import static org.mockito.AdditionalMatchers.leq;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.util.Map;
+import java.util.Map.Entry;
 
 
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import static org.apache.hadoop.metrics2.lib.Interns.*;
-import static org.apache.hadoop.test.MetricsAsserts.*;
+import org.apache.hadoop.metrics2.util.Quantile;
+import org.junit.Test;
 
 
 /**
 /**
  * Test metrics record builder interface and mutable metrics
  * Test metrics record builder interface and mutable metrics
@@ -103,4 +114,123 @@ public class TestMutableMetrics {
     assertCounter("BarNumOps", 0L, rb);
     assertCounter("BarNumOps", 0L, rb);
     assertGauge("BarAvgTime", 0.0, rb);
     assertGauge("BarAvgTime", 0.0, rb);
   }
   }
+
+  /**
+   * Ensure that quantile estimates from {@link MutableQuantiles} are within
+   * specified error bounds.
+   */
+  @Test(timeout = 30000)
+  public void testMutableQuantilesError() throws Exception {
+    MetricsRecordBuilder mb = mockMetricsRecordBuilder();
+    MetricsRegistry registry = new MetricsRegistry("test");
+    // Use a 5s rollover period
+    MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
+        "Latency", 5);
+    // Push some values in and wait for it to publish
+    long start = System.nanoTime() / 1000000;
+    for (long i = 1; i <= 1000; i++) {
+      quantiles.add(i);
+      quantiles.add(1001 - i);
+    }
+    long end = System.nanoTime() / 1000000;
+
+    Thread.sleep(6000 - (end - start));
+
+    registry.snapshot(mb, false);
+
+    // Print out the snapshot
+    Map<Quantile, Long> previousSnapshot = quantiles.previousSnapshot;
+    for (Entry<Quantile, Long> item : previousSnapshot.entrySet()) {
+      System.out.println(String.format("Quantile %.2f has value %d",
+          item.getKey().quantile, item.getValue()));
+    }
+
+    // Verify the results are within our requirements
+    verify(mb).addGauge(
+        info("FooNumOps", "Number of ops for stat with 5s interval"),
+        (long) 2000);
+    Quantile[] quants = MutableQuantiles.quantiles;
+    String name = "Foo%dthPercentile5sIntervalLatency";
+    String desc = "%d percentile latency with 5 second interval for stat";
+    for (Quantile q : quants) {
+      int percentile = (int) (100 * q.quantile);
+      int error = (int) (1000 * q.error);
+      String n = String.format(name, percentile);
+      String d = String.format(desc, percentile);
+      long expected = (long) (q.quantile * 1000);
+      verify(mb).addGauge(eq(info(n, d)), leq(expected + error));
+      verify(mb).addGauge(eq(info(n, d)), geq(expected - error));
+    }
+  }
+
+  /**
+   * Test that {@link MutableQuantiles} rolls the window over at the specified
+   * interval.
+   */
+  @Test(timeout = 30000)
+  public void testMutableQuantilesRollover() throws Exception {
+    MetricsRecordBuilder mb = mockMetricsRecordBuilder();
+    MetricsRegistry registry = new MetricsRegistry("test");
+    // Use a 5s rollover period
+    MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
+        "Latency", 5);
+
+    Quantile[] quants = MutableQuantiles.quantiles;
+    String name = "Foo%dthPercentile5sIntervalLatency";
+    String desc = "%d percentile latency with 5 second interval for stat";
+
+    // Push values for three intervals
+    long start = System.nanoTime() / 1000000;
+    for (int i = 1; i <= 3; i++) {
+      // Insert the values
+      for (long j = 1; j <= 1000; j++) {
+        quantiles.add(i);
+      }
+      // Sleep until 1s after the next 5s interval, to let the metrics
+      // roll over
+      long sleep = (start + (5000 * i) + 1000) - (System.nanoTime() / 1000000);
+      Thread.sleep(sleep);
+      // Verify that the window reset, check it has the values we pushed in
+      registry.snapshot(mb, false);
+      for (Quantile q : quants) {
+        int percentile = (int) (100 * q.quantile);
+        String n = String.format(name, percentile);
+        String d = String.format(desc, percentile);
+        verify(mb).addGauge(info(n, d), (long) i);
+      }
+    }
+
+    // Verify the metrics were added the right number of times
+    verify(mb, times(3)).addGauge(
+        info("FooNumOps", "Number of ops for stat with 5s interval"),
+        (long) 1000);
+    for (Quantile q : quants) {
+      int percentile = (int) (100 * q.quantile);
+      String n = String.format(name, percentile);
+      String d = String.format(desc, percentile);
+      verify(mb, times(3)).addGauge(eq(info(n, d)), anyLong());
+    }
+  }
+
+  /**
+   * Test that {@link MutableQuantiles} rolls over correctly even if no items
+   * have been added to the window
+   */
+  @Test(timeout = 30000)
+  public void testMutableQuantilesEmptyRollover() throws Exception {
+    MetricsRecordBuilder mb = mockMetricsRecordBuilder();
+    MetricsRegistry registry = new MetricsRegistry("test");
+    // Use a 5s rollover period
+    MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
+        "Latency", 5);
+
+    // Check it initially
+    quantiles.snapshot(mb, true);
+    verify(mb).addGauge(
+        info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
+    Thread.sleep(6000);
+    quantiles.snapshot(mb, false);
+    verify(mb, times(2)).addGauge(
+        info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
+  }
 }
 }

+ 125 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleQuantiles.java

@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSampleQuantiles {
+
+  static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
+      new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
+      new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
+
+  SampleQuantiles estimator;
+
+  @Before
+  public void init() {
+    estimator = new SampleQuantiles(quantiles);
+  }
+
+  /**
+   * Check that the counts of the number of items in the window and sample are
+   * incremented correctly as items are added.
+   */
+  @Test
+  public void testCount() throws IOException {
+    // Counts start off zero
+    assertEquals(estimator.getCount(), 0);
+    assertEquals(estimator.getSampleCount(), 0);
+    try {
+      estimator.snapshot();
+      fail("Expected IOException from empty window");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No samples", e);
+    }
+
+    // Count increment correctly by 1
+    estimator.insert(1337);
+    assertEquals(estimator.getCount(), 1);
+    estimator.snapshot();
+    assertEquals(estimator.getSampleCount(), 1);
+  }
+
+  /**
+   * Check that counts and quantile estimates are correctly reset after a call
+   * to {@link SampleQuantiles#clear()}.
+   */
+  @Test
+  public void testClear() throws IOException {
+    for (int i = 0; i < 1000; i++) {
+      estimator.insert(i);
+    }
+    estimator.clear();
+    assertEquals(estimator.getCount(), 0);
+    assertEquals(estimator.getSampleCount(), 0);
+    try {
+      estimator.snapshot();
+      fail("Expected IOException for an empty window.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No samples", e);
+    }
+  }
+
+  /**
+   * Correctness test that checks that absolute error of the estimate is within
+   * specified error bounds for some randomly permuted streams of items.
+   */
+  @Test
+  public void testQuantileError() throws IOException {
+    final int count = 100000;
+    Random r = new Random(0xDEADDEAD);
+    Long[] values = new Long[count];
+    for (int i = 0; i < count; i++) {
+      values[i] = (long) (i + 1);
+    }
+    // Do 10 shuffle/insert/check cycles
+    for (int i = 0; i < 10; i++) {
+      System.out.println("Starting run " + i);
+      Collections.shuffle(Arrays.asList(values), r);
+      estimator.clear();
+      for (int j = 0; j < count; j++) {
+        estimator.insert(values[j]);
+      }
+      Map<Quantile, Long> snapshot;
+      snapshot = estimator.snapshot();
+      for (Quantile q : quantiles) {
+        long actual = (long) (q.quantile * count);
+        long error = (long) (q.error * count);
+        long estimate = snapshot.get(q);
+        System.out
+            .println(String.format("Expected %d with error %d, estimated %d",
+                actual, error, estimate));
+        assertTrue(estimate <= actual + error);
+        assertTrue(estimate >= actual - error);
+      }
+    }
+  }
+}

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java

@@ -25,6 +25,7 @@ import javax.naming.NameNotFoundException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Time;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
@@ -57,9 +58,9 @@ public class TestDNS {
     String hostname1 = DNS.getDefaultHost(DEFAULT);
     String hostname1 = DNS.getDefaultHost(DEFAULT);
     assertNotNull(hostname1);
     assertNotNull(hostname1);
     String hostname2 = DNS.getDefaultHost(DEFAULT);
     String hostname2 = DNS.getDefaultHost(DEFAULT);
-    long t1 = System.currentTimeMillis();
+    long t1 = Time.now();
     String hostname3 = DNS.getDefaultHost(DEFAULT);
     String hostname3 = DNS.getDefaultHost(DEFAULT);
-    long t2 = System.currentTimeMillis();
+    long t2 = Time.now();
     assertEquals(hostname3, hostname2);
     assertEquals(hostname3, hostname2);
     assertEquals(hostname2, hostname1);
     assertEquals(hostname2, hostname1);
     long interval = t2 - t1;
     long interval = t2 - t1;

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil;
 import org.apache.hadoop.test.MultithreadedTestUtil;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.apache.hadoop.util.Time;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
@@ -59,7 +60,7 @@ public class TestSocketIOWithTimeout {
     byte buf[] = new byte[4192];
     byte buf[] = new byte[4192];
     
     
     while (true) {
     while (true) {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       try {
       try {
         if (in != null) {
         if (in != null) {
           in.read(buf);
           in.read(buf);
@@ -67,7 +68,7 @@ public class TestSocketIOWithTimeout {
           out.write(buf);
           out.write(buf);
         }
         }
       } catch (SocketTimeoutException e) {
       } catch (SocketTimeoutException e) {
-        long diff = System.currentTimeMillis() - start;
+        long diff = Time.now() - start;
         LOG.info("Got SocketTimeoutException as expected after " + 
         LOG.info("Got SocketTimeoutException as expected after " + 
                  diff + " millis : " + e.getMessage());
                  diff + " millis : " + e.getMessage());
         assertTrue(Math.abs(expectedTimeout - diff) <=
         assertTrue(Math.abs(expectedTimeout - diff) <=

+ 2 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
@@ -188,7 +189,7 @@ public class TestDelegationToken {
         }
         }
       }, AccessControlException.class);
       }, AccessControlException.class);
       long time = dtSecretManager.renewToken(token, "JobTracker");
       long time = dtSecretManager.renewToken(token, "JobTracker");
-      assertTrue("renew time is in future", time > System.currentTimeMillis());
+      assertTrue("renew time is in future", time > Time.now());
       TestDelegationTokenIdentifier identifier = 
       TestDelegationTokenIdentifier identifier = 
         new TestDelegationTokenIdentifier();
         new TestDelegationTokenIdentifier();
       byte[] tokenId = token.getIdentifier();
       byte[] tokenId = token.getIdentifier();

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java

@@ -31,6 +31,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Layout;
 import org.apache.log4j.Layout;
 import org.apache.log4j.Logger;
 import org.apache.log4j.Logger;
 import org.apache.log4j.WriterAppender;
 import org.apache.log4j.WriterAppender;
@@ -94,7 +95,7 @@ public abstract class GenericTestUtils {
       int checkEveryMillis, int waitForMillis)
       int checkEveryMillis, int waitForMillis)
       throws TimeoutException, InterruptedException
       throws TimeoutException, InterruptedException
   {
   {
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     do {
     do {
       boolean result = check.get();
       boolean result = check.get();
       if (result) {
       if (result) {
@@ -102,7 +103,7 @@ public abstract class GenericTestUtils {
       }
       }
       
       
       Thread.sleep(checkEveryMillis);
       Thread.sleep(checkEveryMillis);
-    } while (System.currentTimeMillis() - st < waitForMillis);
+    } while (Time.now() - st < waitForMillis);
     throw new TimeoutException("Timed out waiting for condition");
     throw new TimeoutException("Timed out waiting for condition");
   }
   }
   
   

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java

@@ -22,6 +22,7 @@ import java.util.Set;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Time;
 
 
 /**
 /**
  * A utility to easily test threaded/synchronized code.
  * A utility to easily test threaded/synchronized code.
@@ -109,10 +110,10 @@ public abstract class MultithreadedTestUtil {
      * have thrown up an error.
      * have thrown up an error.
      */
      */
     public synchronized void waitFor(long millis) throws Exception {
     public synchronized void waitFor(long millis) throws Exception {
-      long endTime = System.currentTimeMillis() + millis;
+      long endTime = Time.now() + millis;
       while (shouldRun() &&
       while (shouldRun() &&
              finishedThreads.size() < testThreads.size()) {
              finishedThreads.size() < testThreads.size()) {
-        long left = endTime - System.currentTimeMillis();
+        long left = endTime - Time.now();
         if (left <= 0) break;
         if (left <= 0) break;
         checkException();
         checkException();
         wait(left);
         wait(left);

+ 9 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestMultithreadedTestUtil.java

@@ -26,6 +26,7 @@ import org.junit.Test;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
+import org.apache.hadoop.util.Time;
 
 
 public class TestMultithreadedTestUtil {
 public class TestMultithreadedTestUtil {
 
 
@@ -47,9 +48,9 @@ public class TestMultithreadedTestUtil {
     }
     }
     assertEquals(0, threadsRun.get());
     assertEquals(0, threadsRun.get());
     ctx.startThreads();
     ctx.startThreads();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     ctx.waitFor(30000);
     ctx.waitFor(30000);
-    long et = System.currentTimeMillis();
+    long et = Time.now();
 
 
     // All threads should have run
     // All threads should have run
     assertEquals(3, threadsRun.get());
     assertEquals(3, threadsRun.get());
@@ -69,7 +70,7 @@ public class TestMultithreadedTestUtil {
       }
       }
     });
     });
     ctx.startThreads();
     ctx.startThreads();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     try {
     try {
       ctx.waitFor(30000);
       ctx.waitFor(30000);
       fail("waitFor did not throw");
       fail("waitFor did not throw");
@@ -77,7 +78,7 @@ public class TestMultithreadedTestUtil {
       // expected
       // expected
       assertEquals(FAIL_MSG, rte.getCause().getMessage());
       assertEquals(FAIL_MSG, rte.getCause().getMessage());
     }
     }
-    long et = System.currentTimeMillis();
+    long et = Time.now();
     // Test shouldn't have waited the full 30 seconds, since
     // Test shouldn't have waited the full 30 seconds, since
     // the thread throws faster than that
     // the thread throws faster than that
     assertTrue("Test took " + (et - st) + "ms",
     assertTrue("Test took " + (et - st) + "ms",
@@ -94,7 +95,7 @@ public class TestMultithreadedTestUtil {
       }
       }
     });
     });
     ctx.startThreads();
     ctx.startThreads();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     try {
     try {
       ctx.waitFor(30000);
       ctx.waitFor(30000);
       fail("waitFor did not throw");
       fail("waitFor did not throw");
@@ -102,7 +103,7 @@ public class TestMultithreadedTestUtil {
       // expected
       // expected
       assertEquals("my ioe", rte.getCause().getMessage());
       assertEquals("my ioe", rte.getCause().getMessage());
     }
     }
-    long et = System.currentTimeMillis();
+    long et = Time.now();
     // Test shouldn't have waited the full 30 seconds, since
     // Test shouldn't have waited the full 30 seconds, since
     // the thread throws faster than that
     // the thread throws faster than that
     assertTrue("Test took " + (et - st) + "ms",
     assertTrue("Test took " + (et - st) + "ms",
@@ -121,10 +122,10 @@ public class TestMultithreadedTestUtil {
       }
       }
     });
     });
     ctx.startThreads();
     ctx.startThreads();
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     ctx.waitFor(3000);
     ctx.waitFor(3000);
     ctx.stop();
     ctx.stop();
-    long et = System.currentTimeMillis();
+    long et = Time.now();
     long elapsed = et - st;
     long elapsed = et - st;
 
 
     // Test should have waited just about 3 seconds
     // Test should have waited just about 3 seconds

+ 49 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java

@@ -124,4 +124,53 @@ public class TestDiskChecker {
     }
     }
     System.out.println("checkDir success: "+ success);
     System.out.println("checkDir success: "+ success);
   }
   }
+
+  /**
+   * These test cases test to test the creation of a local folder with correct
+   * permission for result of mapper.
+   */
+
+  @Test
+  public void testCheckDir_normal_local() throws Throwable {
+    _checkDirs(true, "755", true);
+  }
+
+  @Test
+  public void testCheckDir_notDir_local() throws Throwable {
+    _checkDirs(false, "000", false);
+  }
+
+  @Test
+  public void testCheckDir_notReadable_local() throws Throwable {
+    _checkDirs(true, "000", false);
+  }
+
+  @Test
+  public void testCheckDir_notWritable_local() throws Throwable {
+    _checkDirs(true, "444", false);
+  }
+
+  @Test
+  public void testCheckDir_notListable_local() throws Throwable {
+    _checkDirs(true, "666", false);
+  }
+
+  private void _checkDirs(boolean isDir, String perm, boolean success)
+      throws Throwable {
+    File localDir = File.createTempFile("test", "tmp");
+    localDir.delete();
+    localDir.mkdir();
+    Runtime.getRuntime().exec(
+	"chmod " + perm + "  " + localDir.getAbsolutePath()).waitFor();
+    try {
+      DiskChecker.checkDir(localDir);
+      assertTrue("checkDir success", success);
+    } catch (DiskErrorException e) {
+      e.printStackTrace();
+      assertFalse("checkDir success", success);
+    }
+    localDir.delete();
+    System.out.println("checkDir success: " + success);
+
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java

@@ -55,7 +55,7 @@ public class TestShell extends TestCase {
     testInterval(Long.MIN_VALUE / 60000);  // test a negative interval
     testInterval(Long.MIN_VALUE / 60000);  // test a negative interval
     testInterval(0L);  // test a zero interval
     testInterval(0L);  // test a zero interval
     testInterval(10L); // interval equal to 10mins
     testInterval(10L); // interval equal to 10mins
-    testInterval(System.currentTimeMillis() / 60000 + 60); // test a very big interval
+    testInterval(Time.now() / 60000 + 60); // test a very big interval
   }
   }
 
 
   /**
   /**

+ 11 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java

@@ -269,6 +269,17 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
     assertEquals("Yy", StringUtils.camelize("yY"));
     assertEquals("Yy", StringUtils.camelize("yY"));
     assertEquals("Zz", StringUtils.camelize("zZ"));
     assertEquals("Zz", StringUtils.camelize("zZ"));
   }
   }
+  
+  @Test
+  public void testStringToURI() {
+    String[] str = new String[] { "file://" };
+    try {
+      StringUtils.stringToURI(str);
+      fail("Ignoring URISyntaxException while creating URI from string file://");
+    } catch (IllegalArgumentException iae) {
+      assertEquals("Failed to create uri for file://", iae.getMessage());
+    }
+  }
 
 
   // Benchmark for StringUtils split
   // Benchmark for StringUtils split
   public static void main(String []args) {
   public static void main(String []args) {

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.lib.server.BaseService;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Instrumentation;
 import org.apache.hadoop.lib.service.Instrumentation;
 import org.apache.hadoop.lib.service.Scheduler;
 import org.apache.hadoop.lib.service.Scheduler;
+import org.apache.hadoop.util.Time;
 import org.json.simple.JSONAware;
 import org.json.simple.JSONAware;
 import org.json.simple.JSONObject;
 import org.json.simple.JSONObject;
 import org.json.simple.JSONStreamAware;
 import org.json.simple.JSONStreamAware;
@@ -164,10 +165,10 @@ public class InstrumentationService extends BaseService implements Instrumentati
         throw new IllegalStateException("Cron already used");
         throw new IllegalStateException("Cron already used");
       }
       }
       if (start == 0) {
       if (start == 0) {
-        start = System.currentTimeMillis();
+        start = Time.now();
         lapStart = start;
         lapStart = start;
       } else if (lapStart == 0) {
       } else if (lapStart == 0) {
-        lapStart = System.currentTimeMillis();
+        lapStart = Time.now();
       }
       }
       return this;
       return this;
     }
     }
@@ -177,7 +178,7 @@ public class InstrumentationService extends BaseService implements Instrumentati
         throw new IllegalStateException("Cron already used");
         throw new IllegalStateException("Cron already used");
       }
       }
       if (lapStart > 0) {
       if (lapStart > 0) {
-        own += System.currentTimeMillis() - lapStart;
+        own += Time.now() - lapStart;
         lapStart = 0;
         lapStart = 0;
       }
       }
       return this;
       return this;
@@ -185,7 +186,7 @@ public class InstrumentationService extends BaseService implements Instrumentati
 
 
     void end() {
     void end() {
       stop();
       stop();
-      total = System.currentTimeMillis() - start;
+      total = Time.now() - start;
     }
     }
 
 
   }
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Instrumentation;
 import org.apache.hadoop.lib.service.Instrumentation;
 import org.apache.hadoop.lib.service.Scheduler;
 import org.apache.hadoop.lib.service.Scheduler;
 import org.apache.hadoop.lib.util.Check;
 import org.apache.hadoop.lib.util.Check;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -59,11 +60,11 @@ public class SchedulerService extends BaseService implements Scheduler {
   @Override
   @Override
   public void destroy() {
   public void destroy() {
     try {
     try {
-      long limit = System.currentTimeMillis() + 30 * 1000;
+      long limit = Time.now() + 30 * 1000;
       scheduler.shutdownNow();
       scheduler.shutdownNow();
       while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
       while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
         LOG.debug("Waiting for scheduler to shutdown");
         LOG.debug("Waiting for scheduler to shutdown");
-        if (System.currentTimeMillis() > limit) {
+        if (Time.now() > limit) {
           LOG.warn("Gave up waiting for scheduler to shutdown");
           LOG.warn("Gave up waiting for scheduler to shutdown");
           break;
           break;
         }
         }

+ 1 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java

@@ -42,10 +42,7 @@ public class InputStreamEntity implements StreamingOutput {
 
 
   @Override
   @Override
   public void write(OutputStream os) throws IOException {
   public void write(OutputStream os) throws IOException {
-    long skipped = is.skip(offset);
-    if (skipped < offset) {
-      throw new IOException("Requested offset beyond stream size");
-    }
+    IOUtils.skipFully(is, offset);
     if (len == -1) {
     if (len == -1) {
       IOUtils.copyBytes(is, os, 4096, true);
       IOUtils.copyBytes(is, os, 4096, true);
     } else {
     } else {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml

@@ -104,7 +104,7 @@
       The HTTP Kerberos principal used by HttpFS in the HTTP endpoint.
       The HTTP Kerberos principal used by HttpFS in the HTTP endpoint.
 
 
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
+      HTTP SPNEGO specification.
     </description>
     </description>
   </property>
   </property>
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm

@@ -37,7 +37,7 @@ $ curl "http://<HTTFS_HOST>:14000/webhdfs/v1?op=homedir&user.name=babu"
 
 
 ** Kerberos HTTP SPNEGO Authentication
 ** Kerberos HTTP SPNEGO Authentication
 
 
-  Kerberos HTTP SPENGO authentication requires a tool or library supporting
+  Kerberos HTTP SPNEGO authentication requires a tool or library supporting
   Kerberos HTTP SPNEGO protocol.
   Kerberos HTTP SPNEGO protocol.
 
 
   IMPORTANT: If using <<<curl>>>, the <<<curl>>> version being used must support
   IMPORTANT: If using <<<curl>>>, the <<<curl>>> version being used must support

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm

@@ -72,7 +72,7 @@ Hadoop HDFS over HTTP - Documentation Sets ${project.version}
   HttpFS uses a clean HTTP REST API making its use with HTTP tools more
   HttpFS uses a clean HTTP REST API making its use with HTTP tools more
   intuitive.
   intuitive.
 
 
-  HttpFS supports Hadoop pseudo authentication, Kerberos SPENGOS authentication
+  HttpFS supports Hadoop pseudo authentication, Kerberos SPNEGOS authentication
   and Hadoop proxy users. Hadoop HDFS proxy did not.
   and Hadoop proxy users. Hadoop HDFS proxy did not.
 
 
 * User and Developer Documentation
 * User and Developer Documentation

+ 17 - 16
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.test.HTestCase;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDirHelper;
 import org.apache.hadoop.test.TestDirHelper;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.json.simple.JSONObject;
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
 import org.json.simple.parser.JSONParser;
 import org.junit.Test;
 import org.junit.Test;
@@ -50,26 +51,26 @@ public class TestInstrumentationService extends HTestCase {
     Assert.assertEquals(cron.lapStart, 0);
     Assert.assertEquals(cron.lapStart, 0);
     Assert.assertEquals(cron.own, 0);
     Assert.assertEquals(cron.own, 0);
     Assert.assertEquals(cron.total, 0);
     Assert.assertEquals(cron.total, 0);
-    long begin = System.currentTimeMillis();
+    long begin = Time.now();
     Assert.assertEquals(cron.start(), cron);
     Assert.assertEquals(cron.start(), cron);
     Assert.assertEquals(cron.start(), cron);
     Assert.assertEquals(cron.start(), cron);
     Assert.assertEquals(cron.start, begin, 20);
     Assert.assertEquals(cron.start, begin, 20);
     Assert.assertEquals(cron.start, cron.lapStart);
     Assert.assertEquals(cron.start, cron.lapStart);
     sleep(100);
     sleep(100);
     Assert.assertEquals(cron.stop(), cron);
     Assert.assertEquals(cron.stop(), cron);
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     long delta = end - begin;
     long delta = end - begin;
     Assert.assertEquals(cron.own, delta, 20);
     Assert.assertEquals(cron.own, delta, 20);
     Assert.assertEquals(cron.total, 0);
     Assert.assertEquals(cron.total, 0);
     Assert.assertEquals(cron.lapStart, 0);
     Assert.assertEquals(cron.lapStart, 0);
     sleep(100);
     sleep(100);
-    long reStart = System.currentTimeMillis();
+    long reStart = Time.now();
     cron.start();
     cron.start();
     Assert.assertEquals(cron.start, begin, 20);
     Assert.assertEquals(cron.start, begin, 20);
     Assert.assertEquals(cron.lapStart, reStart, 20);
     Assert.assertEquals(cron.lapStart, reStart, 20);
     sleep(100);
     sleep(100);
     cron.stop();
     cron.stop();
-    long reEnd = System.currentTimeMillis();
+    long reEnd = Time.now();
     delta += reEnd - reStart;
     delta += reEnd - reStart;
     Assert.assertEquals(cron.own, delta, 20);
     Assert.assertEquals(cron.own, delta, 20);
     Assert.assertEquals(cron.total, 0);
     Assert.assertEquals(cron.total, 0);
@@ -109,22 +110,22 @@ public class TestInstrumentationService extends HTestCase {
     long avgOwn;
     long avgOwn;
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     totalStart = ownStart;
     totalStart = ownStart;
     ownDelta = 0;
     ownDelta = 0;
     sleep(100);
     sleep(100);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     sleep(100);
     sleep(100);
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     sleep(100);
     sleep(100);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     totalEnd = ownEnd;
     totalEnd = ownEnd;
     totalDelta = totalEnd - totalStart;
     totalDelta = totalEnd - totalStart;
@@ -142,22 +143,22 @@ public class TestInstrumentationService extends HTestCase {
     cron = new InstrumentationService.Cron();
     cron = new InstrumentationService.Cron();
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     totalStart = ownStart;
     totalStart = ownStart;
     ownDelta = 0;
     ownDelta = 0;
     sleep(200);
     sleep(200);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     sleep(200);
     sleep(200);
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     sleep(200);
     sleep(200);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     totalEnd = ownEnd;
     totalEnd = ownEnd;
     totalDelta = totalEnd - totalStart;
     totalDelta = totalEnd - totalStart;
@@ -178,22 +179,22 @@ public class TestInstrumentationService extends HTestCase {
     cron = new InstrumentationService.Cron();
     cron = new InstrumentationService.Cron();
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     totalStart = ownStart;
     totalStart = ownStart;
     ownDelta = 0;
     ownDelta = 0;
     sleep(300);
     sleep(300);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     sleep(300);
     sleep(300);
 
 
     cron.start();
     cron.start();
-    ownStart = System.currentTimeMillis();
+    ownStart = Time.now();
     sleep(300);
     sleep(300);
 
 
     cron.stop();
     cron.stop();
-    ownEnd = System.currentTimeMillis();
+    ownEnd = Time.now();
     ownDelta += ownEnd - ownStart;
     ownDelta += ownEnd - ownStart;
     totalEnd = ownEnd;
     totalEnd = ownEnd;
     totalDelta = totalEnd - totalStart;
     totalDelta = totalEnd - totalStart;

+ 10 - 8
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.test;
 package org.apache.hadoop.test;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
+
+import org.apache.hadoop.util.Time;
 import org.junit.Rule;
 import org.junit.Rule;
 import org.junit.rules.MethodRule;
 import org.junit.rules.MethodRule;
 
 
@@ -142,18 +144,18 @@ public abstract class HTestCase {
    *         to <code>true</code>.
    *         to <code>true</code>.
    */
    */
   protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate) {
   protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate) {
-    long started = System.currentTimeMillis();
-    long mustEnd = System.currentTimeMillis() + (long) (getWaitForRatio() * timeout);
+    long started = Time.now();
+    long mustEnd = Time.now() + (long) (getWaitForRatio() * timeout);
     long lastEcho = 0;
     long lastEcho = 0;
     try {
     try {
-      long waiting = mustEnd - System.currentTimeMillis();
+      long waiting = mustEnd - Time.now();
       System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
       System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
       boolean eval;
       boolean eval;
-      while (!(eval = predicate.evaluate()) && System.currentTimeMillis() < mustEnd) {
-        if ((System.currentTimeMillis() - lastEcho) > 5000) {
-          waiting = mustEnd - System.currentTimeMillis();
+      while (!(eval = predicate.evaluate()) && Time.now() < mustEnd) {
+        if ((Time.now() - lastEcho) > 5000) {
+          waiting = mustEnd - Time.now();
           System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
           System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
-          lastEcho = System.currentTimeMillis();
+          lastEcho = Time.now();
         }
         }
         Thread.sleep(100);
         Thread.sleep(100);
       }
       }
@@ -164,7 +166,7 @@ public abstract class HTestCase {
           System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
           System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
         }
         }
       }
       }
-      return (eval) ? System.currentTimeMillis() - started : -1;
+      return (eval) ? Time.now() - started : -1;
     } catch (Exception ex) {
     } catch (Exception ex) {
       throw new RuntimeException(ex);
       throw new RuntimeException(ex);
     }
     }

+ 11 - 10
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java

@@ -22,6 +22,7 @@ import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.Context;
@@ -73,13 +74,13 @@ public class TestHFSTestCase extends HFSTestCase {
 
 
   @Test
   @Test
   public void waitFor() {
   public void waitFor() {
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(1000, new Predicate() {
     long waited = waitFor(1000, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return true;
         return true;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, 0, 50);
     Assert.assertEquals(waited, 0, 50);
     Assert.assertEquals(end - start - waited, 0, 50);
     Assert.assertEquals(end - start - waited, 0, 50);
   }
   }
@@ -87,13 +88,13 @@ public class TestHFSTestCase extends HFSTestCase {
   @Test
   @Test
   public void waitForTimeOutRatio1() {
   public void waitForTimeOutRatio1() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(200, new Predicate() {
     long waited = waitFor(200, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return false;
         return false;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(end - start, 200, 50);
     Assert.assertEquals(end - start, 200, 50);
   }
   }
@@ -101,13 +102,13 @@ public class TestHFSTestCase extends HFSTestCase {
   @Test
   @Test
   public void waitForTimeOutRatio2() {
   public void waitForTimeOutRatio2() {
     setWaitForRatio(2);
     setWaitForRatio(2);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(200, new Predicate() {
     long waited = waitFor(200, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return false;
         return false;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
     Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
   }
   }
@@ -115,18 +116,18 @@ public class TestHFSTestCase extends HFSTestCase {
   @Test
   @Test
   public void sleepRatio1() {
   public void sleepRatio1() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     sleep(100);
     sleep(100);
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(end - start, 100, 50);
     Assert.assertEquals(end - start, 100, 50);
   }
   }
 
 
   @Test
   @Test
   public void sleepRatio2() {
   public void sleepRatio2() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     sleep(100);
     sleep(100);
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
     Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
   }
   }
 
 

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java

@@ -19,6 +19,8 @@
 package org.apache.hadoop.test;
 package org.apache.hadoop.test;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
+
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.Context;
@@ -58,13 +60,13 @@ public class TestHTestCase extends HTestCase {
 
 
   @Test
   @Test
   public void waitFor() {
   public void waitFor() {
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(1000, new Predicate() {
     long waited = waitFor(1000, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return true;
         return true;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, 0, 50);
     Assert.assertEquals(waited, 0, 50);
     Assert.assertEquals(end - start - waited, 0, 50);
     Assert.assertEquals(end - start - waited, 0, 50);
   }
   }
@@ -72,13 +74,13 @@ public class TestHTestCase extends HTestCase {
   @Test
   @Test
   public void waitForTimeOutRatio1() {
   public void waitForTimeOutRatio1() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(200, new Predicate() {
     long waited = waitFor(200, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return false;
         return false;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(end - start, 200, 50);
     Assert.assertEquals(end - start, 200, 50);
   }
   }
@@ -86,13 +88,13 @@ public class TestHTestCase extends HTestCase {
   @Test
   @Test
   public void waitForTimeOutRatio2() {
   public void waitForTimeOutRatio2() {
     setWaitForRatio(2);
     setWaitForRatio(2);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     long waited = waitFor(200, new Predicate() {
     long waited = waitFor(200, new Predicate() {
       public boolean evaluate() throws Exception {
       public boolean evaluate() throws Exception {
         return false;
         return false;
       }
       }
     });
     });
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(waited, -1);
     Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
     Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
   }
   }
@@ -100,18 +102,18 @@ public class TestHTestCase extends HTestCase {
   @Test
   @Test
   public void sleepRatio1() {
   public void sleepRatio1() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     sleep(100);
     sleep(100);
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(end - start, 100, 50);
     Assert.assertEquals(end - start, 100, 50);
   }
   }
 
 
   @Test
   @Test
   public void sleepRatio2() {
   public void sleepRatio2() {
     setWaitForRatio(1);
     setWaitForRatio(1);
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     sleep(100);
     sleep(100);
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
     Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
   }
   }
 
 

+ 65 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -110,6 +110,55 @@ Release 2.0.1-alpha - UNRELEASED
     HDFS-3067. NPE in DFSInputStream.readBuffer if read is repeated on
     HDFS-3067. NPE in DFSInputStream.readBuffer if read is repeated on
     corrupted block. (Henry Robinson via atm)
     corrupted block. (Henry Robinson via atm)
 
 
+    HDFS-3555. idle client socket triggers DN ERROR log
+    (should be INFO or DEBUG). (Andy Isaacson via harsh)
+
+    HDFS-3568. fuse_dfs: add support for security. (Colin McCabe via atm)
+
+    HDFS-3629. Fix the typo in the error message about inconsistent
+    storage layout version. (Brandon Li via harsh)
+
+    HDFS-3613. GSet prints some INFO level values, which aren't
+    really very useful to all (Andrew Wang via harsh)
+
+    HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping a few bytes. (Colin Patrick McCabe via harsh)
+
+    HDFS-3582. Hook daemon process exit for testing. (eli)
+
+    HDFS-3641. Move server Util time methods to common and use now
+    instead of System#currentTimeMillis. (eli)
+
+    HDFS-3633. libhdfs: hdfsDelete should pass JNI_FALSE or JNI_TRUE.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-799. libhdfs must call DetachCurrentThread when a thread is destroyed.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3306. fuse_dfs: don't lock release operations.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3612. Single namenode image directory config warning can
+    be improved. (Andy Isaacson via harsh)
+
+    HDFS-3606. libhdfs: create self-contained unit test.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3539. libhdfs code cleanups. (Colin Patrick McCabe via eli)
+
+    HDFS-3610. fuse_dfs: Provide a way to use the default (configured) NN URI.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3663. MiniDFSCluster should capture the code path that led to
+    the first ExitException. (eli)
+
+    HDFS-3659. Add missing @Override to methods across the hadoop-hdfs
+    project. (Brandon Li via harsh)
+
+    HDFS-3537. Move libhdfs and fuse-dfs source to native subdirectories.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3665. Add a test for renaming across file systems via a symlink. (eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-2982. Startup performance suffers when there are many edit log
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -286,6 +335,22 @@ Release 2.0.1-alpha - UNRELEASED
     HDFS-3548. NamenodeFsck.copyBlock fails to create a Block Reader.
     HDFS-3548. NamenodeFsck.copyBlock fails to create a Block Reader.
     (Colin Patrick McCabe via eli)
     (Colin Patrick McCabe via eli)
 
 
+    HDFS-3615. Two BlockTokenSecretManager findbugs warnings. (atm)
+
+    HDFS-3639. JspHelper#getUGI should always verify the token if
+    security is enabled. (eli)
+
+    HDFS-470. libhdfs should handle 0-length reads from FSInputStream
+    correctly. (Colin Patrick McCabe via eli)
+
+    HDFS-3492. fix some misuses of InputStream#skip.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3609. libhdfs: don't force the URI to look like hdfs://hostname:port.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3654. TestJspHelper#testGetUgi fails with NPE. (eli)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

+ 12 - 7
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -411,18 +411,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   </target>
                   </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>
-              <!-- TODO wire here native testcases
               <execution>
               <execution>
-                <id>test</id>
+                <id>native_tests</id>
                 <phase>test</phase>
                 <phase>test</phase>
-                <goals>
-                  <goal>test</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
                 <configuration>
                 <configuration>
-                  <destDir>${project.build.directory}/native/target</destDir>
+                  <target>
+                    <property name="compile_classpath" refid="maven.compile.classpath"/>
+                    <property name="test_classpath" refid="maven.test.classpath"/>
+                    <exec executable="${project.build.directory}/native/test_libhdfs_threaded" dir="${project.build.directory}/native/" failonerror="true">
+                      <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+                    </exec>
+                    <exec executable="${project.build.directory}/native/test_native_mini_dfs" dir="${project.build.directory}/native/" failonerror="true">
+                      <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+                    </exec>
+                  </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>
-              -->
             </executions>
             </executions>
           </plugin>
           </plugin>
         </plugins>
         </plugins>

+ 42 - 16
hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt

@@ -67,6 +67,12 @@ function(FLATTEN_LIST INPUT SEPARATOR OUTPUT)
   set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
   set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
 endfunction()
 endfunction()
 
 
+# Check to see if our compiler and linker support the __thread attribute.
+# On Linux and some other operating systems, this is a more efficient 
+# alternative to POSIX thread local storage. 
+INCLUDE(CheckCSourceCompiles)
+CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
+
 find_package(JNI REQUIRED)
 find_package(JNI REQUIRED)
 if (NOT GENERATED_JAVAH)
 if (NOT GENERATED_JAVAH)
     # Must identify where the generated headers have been placed
     # Must identify where the generated headers have been placed
@@ -81,15 +87,15 @@ include_directories(
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
     ${CMAKE_BINARY_DIR}
     ${JNI_INCLUDE_DIRS}
     ${JNI_INCLUDE_DIRS}
-    main/native/
+    main/native/libhdfs
 )
 )
 
 
 set(_FUSE_DFS_VERSION 0.1.0)
 set(_FUSE_DFS_VERSION 0.1.0)
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
 
 
 add_dual_library(hdfs
 add_dual_library(hdfs
-    main/native/hdfs.c
-    main/native/hdfsJniHelper.c
+    main/native/libhdfs/hdfs.c
+    main/native/libhdfs/jni_helper.c
 )
 )
 target_link_dual_libraries(hdfs
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}
@@ -99,31 +105,51 @@ set(LIBHDFS_VERSION "0.0.0")
 set_target_properties(hdfs PROPERTIES
 set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
     SOVERSION ${LIBHDFS_VERSION})
 
 
-add_executable(hdfs_test
-    main/native/hdfs_test.c
+add_executable(test_libhdfs_ops
+    main/native/libhdfs/test/test_libhdfs_ops.c
 )
 )
-target_link_libraries(hdfs_test
+target_link_libraries(test_libhdfs_ops
     hdfs
     hdfs
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}
 )
 )
-output_directory(hdfs_test target/usr/local/bin)
 
 
-add_executable(hdfs_read
-    main/native/hdfs_read.c
+add_executable(test_libhdfs_read
+    main/native/libhdfs/test/test_libhdfs_read.c
 )
 )
-target_link_libraries(hdfs_read
+target_link_libraries(test_libhdfs_read
     hdfs
     hdfs
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}
 )
 )
-output_directory(hdfs_read target/usr/local/bin)
 
 
-add_executable(hdfs_write
-    main/native/hdfs_write.c
+add_executable(test_libhdfs_write
+    main/native/libhdfs/test/test_libhdfs_write.c
 )
 )
-target_link_libraries(hdfs_write
+target_link_libraries(test_libhdfs_write
     hdfs
     hdfs
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}
 )
 )
-output_directory(hdfs_write target/usr/local/bin)
 
 
-add_subdirectory(contrib/fuse-dfs/src)
+add_library(native_mini_dfs
+    main/native/libhdfs/native_mini_dfs.c
+)
+target_link_libraries(native_mini_dfs
+    hdfs
+)
+
+add_executable(test_native_mini_dfs
+    main/native/libhdfs/test_native_mini_dfs.c
+)
+target_link_libraries(test_native_mini_dfs
+    native_mini_dfs
+)
+
+add_executable(test_libhdfs_threaded
+    main/native/libhdfs/test_libhdfs_threaded.c
+)
+target_link_libraries(test_libhdfs_threaded
+    hdfs
+    native_mini_dfs
+    pthread
+)
+
+add_subdirectory(main/native/fuse-dfs)

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake

@@ -3,4 +3,6 @@
 
 
 #cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
 #cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
 
 
+#cmakedefine HAVE_BETTER_TLS
+
 #endif
 #endif

+ 21 - 36
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java

@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 
 
-import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
@@ -42,6 +41,8 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 
 
+import org.apache.hadoop.util.ExitUtil.ExitException;
+
 import org.apache.bookkeeper.proto.BookieServer;
 import org.apache.bookkeeper.proto.BookieServer;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -49,12 +50,6 @@ import org.apache.commons.logging.LogFactory;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.verify;
-
 /**
 /**
  * Integration test to ensure that the BookKeeper JournalManager
  * Integration test to ensure that the BookKeeper JournalManager
  * works for HDFS Namenode HA
  * works for HDFS Namenode HA
@@ -83,8 +78,6 @@ public class TestBookKeeperAsHASharedDir {
    */
    */
   @Test
   @Test
   public void testFailoverWithBK() throws Exception {
   public void testFailoverWithBK() throws Exception {
-    Runtime mockRuntime1 = mock(Runtime.class);
-    Runtime mockRuntime2 = mock(Runtime.class);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
       Configuration conf = new Configuration();
       Configuration conf = new Configuration();
@@ -100,8 +93,6 @@ public class TestBookKeeperAsHASharedDir {
         .build();
         .build();
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn2 = cluster.getNameNode(1);
       NameNode nn2 = cluster.getNameNode(1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
 
 
       cluster.waitActive();
       cluster.waitActive();
       cluster.transitionToActive(0);
       cluster.transitionToActive(0);
@@ -117,9 +108,6 @@ public class TestBookKeeperAsHASharedDir {
 
 
       assertTrue(fs.exists(p));
       assertTrue(fs.exists(p));
     } finally {
     } finally {
-      verify(mockRuntime1, times(0)).exit(anyInt());
-      verify(mockRuntime2, times(0)).exit(anyInt());
-
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();
       }
       }
@@ -141,9 +129,6 @@ public class TestBookKeeperAsHASharedDir {
 
 
     BookieServer replacementBookie = null;
     BookieServer replacementBookie = null;
 
 
-    Runtime mockRuntime1 = mock(Runtime.class);
-    Runtime mockRuntime2 = mock(Runtime.class);
-
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
 
 
     try {
     try {
@@ -161,11 +146,10 @@ public class TestBookKeeperAsHASharedDir {
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(0)
         .numDataNodes(0)
         .manageNameDfsSharedDirs(false)
         .manageNameDfsSharedDirs(false)
+        .checkExitOnShutdown(false)
         .build();
         .build();
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn2 = cluster.getNameNode(1);
       NameNode nn2 = cluster.getNameNode(1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
 
 
       cluster.waitActive();
       cluster.waitActive();
       cluster.transitionToActive(0);
       cluster.transitionToActive(0);
@@ -180,20 +164,22 @@ public class TestBookKeeperAsHASharedDir {
       assertEquals("New bookie didn't stop",
       assertEquals("New bookie didn't stop",
                    numBookies, bkutil.checkBookiesUp(numBookies, 10));
                    numBookies, bkutil.checkBookiesUp(numBookies, 10));
 
 
-      // mkdirs will "succeed", but nn have called runtime.exit
-      fs.mkdirs(p2);
-      verify(mockRuntime1, atLeastOnce()).exit(anyInt());
-      verify(mockRuntime2, times(0)).exit(anyInt());
+      try {
+        fs.mkdirs(p2);
+        fail("mkdirs should result in the NN exiting");
+      } catch (RemoteException re) {
+        assertTrue(re.getClassName().contains("ExitException"));
+      }
       cluster.shutdownNameNode(0);
       cluster.shutdownNameNode(0);
 
 
       try {
       try {
         cluster.transitionToActive(1);
         cluster.transitionToActive(1);
         fail("Shouldn't have been able to transition with bookies down");
         fail("Shouldn't have been able to transition with bookies down");
-      } catch (ServiceFailedException e) {
-        assertTrue("Wrong exception",
-            e.getMessage().contains("Failed to start active services"));
+      } catch (ExitException ee) {
+        assertTrue("Should shutdown due to required journal failure",
+            ee.getMessage().contains(
+                "starting log segment 3 failed for required journal"));
       }
       }
-      verify(mockRuntime2, atLeastOnce()).exit(anyInt());
 
 
       replacementBookie = bkutil.newBookie();
       replacementBookie = bkutil.newBookie();
       assertEquals("Replacement bookie didn't start",
       assertEquals("Replacement bookie didn't start",
@@ -219,8 +205,6 @@ public class TestBookKeeperAsHASharedDir {
    */
    */
   @Test
   @Test
   public void testMultiplePrimariesStarted() throws Exception {
   public void testMultiplePrimariesStarted() throws Exception {
-    Runtime mockRuntime1 = mock(Runtime.class);
-    Runtime mockRuntime2 = mock(Runtime.class);
     Path p1 = new Path("/testBKJMMultiplePrimary");
     Path p1 = new Path("/testBKJMMultiplePrimary");
 
 
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
@@ -235,11 +219,10 @@ public class TestBookKeeperAsHASharedDir {
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(0)
         .numDataNodes(0)
         .manageNameDfsSharedDirs(false)
         .manageNameDfsSharedDirs(false)
+        .checkExitOnShutdown(false)
         .build();
         .build();
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn2 = cluster.getNameNode(1);
       NameNode nn2 = cluster.getNameNode(1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
-      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
       cluster.waitActive();
       cluster.waitActive();
       cluster.transitionToActive(0);
       cluster.transitionToActive(0);
 
 
@@ -248,11 +231,13 @@ public class TestBookKeeperAsHASharedDir {
       nn1.getRpcServer().rollEditLog();
       nn1.getRpcServer().rollEditLog();
       cluster.transitionToActive(1);
       cluster.transitionToActive(1);
       fs = cluster.getFileSystem(0); // get the older active server.
       fs = cluster.getFileSystem(0); // get the older active server.
-      // This edit log updation on older active should make older active
-      // shutdown.
-      fs.delete(p1, true);
-      verify(mockRuntime1, atLeastOnce()).exit(anyInt());
-      verify(mockRuntime2, times(0)).exit(anyInt());
+
+      try {
+        fs.delete(p1, true);
+        fail("Log update on older active should cause it to exit");
+      } catch (RemoteException re) {
+        assertTrue(re.getClassName().contains("ExitException"));
+      }
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java

@@ -36,9 +36,4 @@ public class FSEditLogTestUtil {
     FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
     FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
   }
   }
-
-  public static void setRuntimeForEditLog(NameNode nn, Runtime rt) {
-    nn.setRuntimeForTesting(rt);
-    nn.getFSImage().getEditLog().setRuntimeForTesting(rt);
-  }
 }
 }

+ 0 - 312
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build-contrib.xml

@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Imported by contrib/*/build.xml files to share generic targets. -->
-
-<project name="hadoopbuildcontrib" xmlns:ivy="antlib:org.apache.ivy.ant">
-
-  <property name="name" value="${ant.project.name}"/>
-  <property name="root" value="${basedir}"/>
-  <property name="hadoop.root" location="${root}/../../../"/>
-
-  <!-- Load all the default properties, and any the user wants    -->
-  <!-- to contribute (without having to type -D or edit this file -->
-  <property file="${user.home}/${name}.build.properties" />
-  <property file="${root}/build.properties" />
-  <property file="${hadoop.root}/build.properties" />
-
-  <property name="src.dir"  location="${root}/src/java"/>
-  <property name="src.test" location="${root}/src/test"/>
-  <property name="src.examples" location="${root}/src/examples"/>
-
-  <available file="${src.examples}" type="dir" property="examples.available"/>
-  <available file="${src.test}" type="dir" property="test.available"/>
-
-  <property name="conf.dir" location="${hadoop.root}/conf"/>
-  <property name="test.junit.output.format" value="plain"/>
-  <property name="test.output" value="no"/>
-  <property name="test.timeout" value="900000"/>
-  <property name="build.dir" location="${hadoop.root}/build/contrib/${name}"/>
-  <property name="build.webapps.root.dir" value="${hadoop.root}/build/web"/>
-  <property name="build.webapps" value="${build.webapps.root.dir}/webapps"/>
-  <property name="build.classes" location="${build.dir}/classes"/>
-  <!-- NB: sun.arch.data.model is not supported on all platforms -->
-  <property name="build.platform"
-            value="${os.name}-${os.arch}-${sun.arch.data.model}"/>
-  <property name="build.c++.libhdfs" value="${build.dir}/../../c++/${build.platform}/lib"/>
-  <property name="build.test" location="${build.dir}/test"/>
-  <property name="build.examples" location="${build.dir}/examples"/>
-  <property name="hadoop.log.dir" location="${build.dir}/test/logs"/>
-  <!-- all jars together -->
-  <property name="javac.deprecation" value="off"/>
-  <property name="javac.debug" value="on"/>
-  <property name="build.ivy.lib.dir" value="${hadoop.root}/build/ivy/lib"/> 
-
-  <property name="javadoc.link"
-            value="http://java.sun.com/j2se/1.4/docs/api/"/>
-
-  <property name="build.encoding" value="ISO-8859-1"/>
-
-  <fileset id="lib.jars" dir="${root}" includes="lib/*.jar"/>
-
-
-   <!-- IVY properties set here -->
-  <property name="ivy.dir" location="ivy" />
-  <property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/>
-  <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
-  <loadproperties srcfile="ivy/libraries.properties"/>
-  <property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/>
-  <property name="ivy_repo_url" 
-	value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" />
-  <property name="build.dir" location="build" />
-  <property name="build.ivy.dir" location="${build.dir}/ivy" />
-  <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
-  <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
-  <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/> 
-
-  <!--this is the naming policy for artifacts we want pulled down-->
-  <property name="ivy.artifact.retrieve.pattern"
-    			value="${ant.project.name}/[conf]/[artifact]-[revision](-[classifier]).[ext]"/>
-
-  <!-- the normal classpath -->
-  <path id="contrib-classpath">
-    <pathelement location="${build.classes}"/>
-    <fileset refid="lib.jars"/>
-    <pathelement location="${hadoop.root}/build/classes"/>
-    <fileset dir="${hadoop.root}/lib">
-      <include name="**/*.jar" />
-    </fileset>
-    <path refid="${ant.project.name}.common-classpath"/>
-    <pathelement path="${clover.jar}"/>
-  </path>
-
-  <!-- the unit test classpath -->
-  <path id="test.classpath">
-    <pathelement location="${build.test}"/>
-    <pathelement location="${build.webapps.root.dir}"/>
-    <pathelement location="${hadoop.root}/build/test/core/classes"/>
-    <pathelement location="${hadoop.root}/build/test/hdfs/classes"/>
-    <pathelement location="${hadoop.root}/build/test/mapred/classes"/>
-    <pathelement location="${hadoop.root}/src/contrib/test"/>
-    <pathelement location="${conf.dir}"/>
-    <pathelement location="${hadoop.root}/build"/>
-    <pathelement location="${build.examples}"/>
-    <path refid="contrib-classpath"/>
-  </path>
-
-
-  <!-- to be overridden by sub-projects -->
-  <target name="check-contrib"/>
-  <target name="init-contrib"/>
-
-  <!-- ====================================================== -->
-  <!-- Stuff needed by all targets                            -->
-  <!-- ====================================================== -->
-  <target name="init" depends="check-contrib" unless="skip.contrib">
-    <echo message="contrib: ${name}"/>
-    <mkdir dir="${build.dir}"/>
-    <mkdir dir="${build.classes}"/>
-    <mkdir dir="${build.test}"/>
-    <mkdir dir="${build.examples}"/>
-    <mkdir dir="${hadoop.log.dir}"/>
-    <antcall target="init-contrib"/>
-  </target>
-
-
-  <!-- ====================================================== -->
-  <!-- Compile a Hadoop contrib's files                       -->
-  <!-- ====================================================== -->
-  <target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib">
-    <echo message="contrib: ${name}"/>
-    <javac
-     encoding="${build.encoding}"
-     srcdir="${src.dir}"
-     includes="**/*.java"
-     destdir="${build.classes}"
-     debug="${javac.debug}"
-     deprecation="${javac.deprecation}">
-     <classpath refid="contrib-classpath"/>
-    </javac>
-  </target>
-
-
-  <!-- ======================================================= -->
-  <!-- Compile a Hadoop contrib's example files (if available) -->
-  <!-- ======================================================= -->
-  <target name="compile-examples" depends="compile" if="examples.available">
-    <echo message="contrib: ${name}"/>
-    <javac
-     encoding="${build.encoding}"
-     srcdir="${src.examples}"
-     includes="**/*.java"
-     destdir="${build.examples}"
-     debug="${javac.debug}">
-     <classpath refid="contrib-classpath"/>
-    </javac>
-  </target>
-
-
-  <!-- ================================================================== -->
-  <!-- Compile test code                                                  -->
-  <!-- ================================================================== -->
-  <target name="compile-test" depends="compile-examples" if="test.available">
-    <echo message="contrib: ${name}"/>
-    <javac
-     encoding="${build.encoding}"
-     srcdir="${src.test}"
-     includes="**/*.java"
-     destdir="${build.test}"
-     debug="${javac.debug}">
-    <classpath refid="test.classpath"/>
-    </javac>
-  </target>
-  
-
-  <!-- ====================================================== -->
-  <!-- Make a Hadoop contrib's jar                            -->
-  <!-- ====================================================== -->
-  <target name="jar" depends="compile" unless="skip.contrib">
-    <echo message="contrib: ${name}"/>
-    <jar
-      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
-      basedir="${build.classes}"      
-    />
-  </target>
-
-  
-  <!-- ====================================================== -->
-  <!-- Make a Hadoop contrib's examples jar                   -->
-  <!-- ====================================================== -->
-  <target name="jar-examples" depends="compile-examples"
-          if="examples.available" unless="skip.contrib">
-    <echo message="contrib: ${name}"/>
-    <jar jarfile="${build.dir}/hadoop-${version}-${name}-examples.jar">
-      <fileset dir="${build.classes}">
-      </fileset>
-      <fileset dir="${build.examples}">
-      </fileset>
-    </jar>
-  </target>
-  
-  <!-- ====================================================== -->
-  <!-- Package a Hadoop contrib                               -->
-  <!-- ====================================================== -->
-  <target name="package" depends="jar, jar-examples" unless="skip.contrib"> 
-    <mkdir dir="${dist.dir}/contrib/${name}"/>
-    <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true">
-      <fileset dir="${build.dir}">
-        <include name="hadoop-${version}-${name}.jar" />
-      </fileset>
-    </copy>
-  </target>
-  
-  <!-- ================================================================== -->
-  <!-- Run unit tests                                                     -->
-  <!-- ================================================================== -->
-  <target name="test" depends="compile-test, compile" if="test.available">
-    <echo message="contrib: ${name}"/>
-    <delete dir="${hadoop.log.dir}"/>
-    <mkdir dir="${hadoop.log.dir}"/>
-    <junit
-      printsummary="yes" showoutput="${test.output}" 
-      haltonfailure="no" fork="yes" maxmemory="256m"
-      errorProperty="tests.failed" failureProperty="tests.failed"
-      timeout="${test.timeout}">
-      
-      <sysproperty key="test.build.data" value="${build.test}/data"/>
-      <sysproperty key="build.test" value="${build.test}"/>
-      <sysproperty key="contrib.name" value="${name}"/>
-      
-      <!-- requires fork=yes for: 
-        relative File paths to use the specified user.dir 
-        classpath to use build/contrib/*.jar
-      -->
-      <sysproperty key="java.net.preferIPv4Stack" value="true"/>
-      <sysproperty key="user.dir" value="${build.test}/data"/>
-      
-      <sysproperty key="fs.default.name" value="${fs.default.name}"/>
-      <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/>
-      <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> 
-      <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/>
-      <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/>
-      <classpath refid="test.classpath"/>
-      <formatter type="${test.junit.output.format}" />
-      <batchtest todir="${build.test}" unless="testcase">
-        <fileset dir="${src.test}"
-                 includes="**/Test*.java" excludes="**/${test.exclude}.java" />
-      </batchtest>
-      <batchtest todir="${build.test}" if="testcase">
-        <fileset dir="${src.test}" includes="**/${testcase}.java"/>
-      </batchtest>
-    </junit>
-    <fail if="tests.failed">Tests failed!</fail>
-  </target>
-
-  <!-- ================================================================== -->
-  <!-- Clean.  Delete the build files, and their directories              -->
-  <!-- ================================================================== -->
-  <target name="clean">
-    <echo message="contrib: ${name}"/>
-    <delete dir="${build.dir}"/>
-  </target>
-
-  <target name="ivy-probe-antlib" >
-    <condition property="ivy.found">
-      <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
-    </condition>
-  </target>
-
-
-  <target name="ivy-download" description="To download ivy " unless="offline">
-    <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/>
-  </target>
-
-  <target name="ivy-init-antlib" depends="ivy-download,ivy-probe-antlib" unless="ivy.found">
-    <typedef uri="antlib:org.apache.ivy.ant" onerror="fail"
-      loaderRef="ivyLoader">
-      <classpath>
-        <pathelement location="${ivy.jar}"/>
-      </classpath>
-    </typedef>
-    <fail >
-      <condition >
-        <not>
-          <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
-        </not>
-      </condition>
-      You need Apache Ivy 2.0 or later from http://ant.apache.org/
-      It could not be loaded from ${ivy_repo_url}
-    </fail>
-  </target>
-
-  <target name="ivy-init" depends="ivy-init-antlib">
-    <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}"/>
-  </target>
-
-  <target name="ivy-resolve-common" depends="ivy-init">
-    <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" />
-  </target>
-
-  <target name="ivy-retrieve-common" depends="ivy-resolve-common"
-    description="Retrieve Ivy-managed artifacts for the compile/test configurations">
-    <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" 
-      pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" sync="true" />
-    <ivy:cachepath pathid="${ant.project.name}.common-classpath" conf="common" />
-  </target>
-</project>

+ 0 - 87
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build.xml

@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project name="fuse-dfs" default="compile" xmlns:ivy="antlib:org.apache.ivy.ant">
-
-  <import file="build-contrib.xml"/>
-
-  <target name="check-libhdfs-exists">
-    <property name="libhdfs.lib" value="${build.c++.libhdfs}/libhdfs.so"/>
-    <available file="${libhdfs.lib}" property="libhdfs-exists"/>
-    <fail message="libhdfs.so does not exist: ${libhdfs.lib}.">
-      <condition><not><isset property="libhdfs-exists"/></not></condition>
-    </fail>
-  </target>
-
-  <target name="compile">
-    <exec executable="autoreconf" dir="${basedir}" 
-          searchpath="yes" failonerror="yes">
-       <arg value="-if"/>
-    </exec>
-
-    <exec executable="${basedir}/configure" dir="${basedir}"
-          failonerror="yes">
-    </exec>
-
-    <exec executable="make" failonerror="true">
-      <env key="OS_NAME" value="${os.name}"/>
-      <env key="OS_ARCH" value="${os.arch}"/>
-      <env key="HADOOP_PREFIX" value="${hadoop.root}"/>
-      <env key="PACKAGE_VERSION" value="0.1.0"/>
-      <env key="BUILD_PLATFORM" value="${build.platform}" />
-    </exec>
-  </target>
-
-  <target name="jar" />
-  <target name="package" />
-
-  <target name="compile-test" depends="ivy-retrieve-common, check-libhdfs-exists">
-    <javac encoding="${build.encoding}"
-	   srcdir="${src.test}"
-	   includes="**/*.java"
-	   destdir="${build.test}"
-	   debug="${javac.debug}">
-      <classpath refid="test.classpath"/>
-    </javac>
-  </target>
-
-  <target name="test" depends="compile-test,check-libhdfs-exists">
-    <junit showoutput="${test.output}" fork="yes" printsummary="yes"
-           errorProperty="tests.failed" haltonfailure="no" failureProperty="tests.failed">
-      <classpath refid="test.classpath"/>
-      <sysproperty key="test.build.data" value="${build.test}/data"/>
-      <sysproperty key="build.test" value="${build.test}"/>
-      <sysproperty key="user.dir" value="${build.test}/data"/>
-      <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/>
-      <sysproperty key="test.src.dir" value="${test.src.dir}"/>
-      <formatter type="${test.junit.output.format}" />
-      <batchtest todir="${build.test}" unless="testcase">
-        <fileset dir="${src.test}">
-          <include name="**/Test*.java"/>
-        </fileset>
-      </batchtest>
-      <batchtest todir="${build.test}" if="testcase">
-        <fileset dir="${src.test}">
-          <include name="**/${testcase}.java"/>
-        </fileset>
-      </batchtest>
-    </junit>
-    <fail if="tests.failed">Tests failed!</fail>
- </target>
-</project>

+ 0 - 18
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/global_footer.mk

@@ -1,18 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-thriftstyle : $(XBUILT_SOURCES)
-

+ 0 - 51
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/global_header.mk

@@ -1,51 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-ifneq ($$(XBUILT_SOURCES),)
-    XBUILT_SOURCES := $$(XBUILT_SOURCES) $$(XTARGET)
-else
-    XBUILT_SOURCES := $$(XTARGET)
-endif
-
-showvars:
-	@echo BUILD_SOURCES = $(BUILT_SOURCES)
-	@echo XBUILTSOURCES = $(XBUILT_SOURCES)
-	@echo DEFS = $(DEFS)
-	@echo CXXFLAGS = $(CXXFLAGS)
-	@echo AM_CXXFLAGS = $(AM_CXXFLAGS)
-	@echo CPPFLAGS = $(CPPFLAGS)
-	@echo AM_CPPFLAGS = $(AM_CPPFLAGS)
-	@echo LDFLAGS = $(LDFLAGS)
-	@echo AM_LDFLAGS = $(AM_LDFLAGS)
-	@echo LDADD = $(LDADD)
-	@echo LIBS = $(LIBS)
-	@echo EXTERNAL_LIBS = $(EXTERNAL_LIBS)
-	@echo EXTERNAL_PATH = $(EXTERNAL_PATH)
-	@echo MAKE = $(MAKE)
-	@echo MAKE_FLAGS = $(MAKE_FLAGS)
-	@echo AM_MAKEFLAGS = $(AM_MAKEFLAGS)
-	@echo top_builddir = $(top_builddir)
-	@echo top_srcdir = $(top_srcdir)
-	@echo srcdir = $(srcdir)
-	@echo PHPVAL = $(PHPVAL)
-	@echo PHPCONFIGDIR  = $(PHPCONFIGDIR)
-	@echo PHPCONFIGINCLUDEDIR = $(PHPCONFIGINCLUDEDIR)
-	@echo PHPCONFIGINCLUDES  = $(PHPCONFIGINCLUDES)
-	@echo PHPCONFIGLDFLAGS  = $(PHPCONFIGLDFLAGS)
-	@echo PHPCONFIGLIBS  = $(PHPCONFIGLIBS)
-
-clean-common:
-	rm -rf gen-*

+ 0 - 71
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/ivy.xml

@@ -1,71 +0,0 @@
-<?xml version="1.0" ?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
-  <info organisation="org.apache.hadoop" module="${ant.project.name}">
-    <license name="Apache 2.0"/>
-    <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
-    <description>
-        FUSE plugin for HDFS
-    </description>
-  </info>
-  <configurations defaultconfmapping="default">
-    <!--these match the Maven configurations-->
-    <conf name="default" extends="master,runtime"/>
-    <conf name="master" description="contains the artifact but no dependencies"/>
-    <conf name="runtime" description="runtime but not the artifact" />
-
-    <conf name="common" visibility="private" 
-      extends="runtime"
-      description="artifacts needed to compile/test the application"/>
-    <conf name="test" visibility="private" extends="runtime"/>
-  </configurations>
-
-  <publications>
-    <!--get the artifact from our module name-->
-    <artifact conf="master"/>
-  </publications>
-  <dependencies>
-    <dependency org="org.apache.hadoop"
-      name="hadoop-common"
-      rev="${hadoop-common.version}"
-      conf="common->default"/>
-    <dependency org="org.apache.hadoop"
-      name="hadoop-common"
-      rev="${hadoop-common.version}"
-      conf="common->default">
-      <artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
-    </dependency>
-    <dependency org="log4j"
-      name="log4j"
-      rev="${log4j.version}"
-      conf="common->master">
-      <exclude org="com.sun.jdmk"/>
-      <exclude org="com.sun.jmx"/>
-      <exclude org="javax.jms"/> 
-    </dependency>
-    
-    <dependency org="commons-logging"
-      name="commons-logging"
-      rev="${commons-logging.version}"
-      conf="common->master"/>
-    <dependency org="junit"
-      name="junit"
-      rev="${junit.version}"
-      conf="common->master"/>
-  </dependencies>
-</ivy-module>

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/ivy/libraries.properties

@@ -1,5 +0,0 @@
-#This properties file lists the versions of the various artifacts used by streaming.
-#It drives ivy and the generation of a maven POM
-
-#Please list the dependencies name with version if they are different from the ones 
-#listed in the global libraries.properties file (in alphabetical order)

+ 0 - 122
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_connect.c

@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hdfs.h"
-#include "fuse_dfs.h"
-#include "fuse_connect.h"
-#include "fuse_users.h" 
-
-#include <search.h>
-
-#define MAX_ELEMENTS (16 * 1024)
-static struct hsearch_data *fsTable = NULL;
-static pthread_mutex_t tableMutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Allocate a hash table for fs handles. Returns 0 on success,
- * -1 on failure.
- */
-int allocFsTable(void) {
-  assert(NULL == fsTable);
-  fsTable = calloc(1, sizeof(struct hsearch_data));
-  if (0 == hcreate_r(MAX_ELEMENTS, fsTable)) {
-    ERROR("Unable to initialize connection table");
-    return -1;
-  }
-  return 0;
-}
-
-/*
- * Find a fs handle for the given key. Returns a fs handle, 
- * or NULL if there is no fs for the given key.
- */
-static hdfsFS findFs(char *key) {
-  ENTRY entry;
-  ENTRY *entryP = NULL;
-  entry.key = key;
-  if (0 == hsearch_r(entry, FIND, &entryP, fsTable)) {
-    return NULL;
-  }
-  assert(NULL != entryP->data);
-  return (hdfsFS)entryP->data;
-}
-
-/*
- * Insert the given fs handle into the table.
- * Returns 0 on success, -1 on failure.
- */
-static int insertFs(char *key, hdfsFS fs) {
-  ENTRY entry;
-  ENTRY *entryP = NULL;
-  assert(NULL != fs);
-  entry.key = strdup(key);
-  if (entry.key == NULL) {
-    return -1;
-  }
-  entry.data = (void*)fs;
-  if (0 == hsearch_r(entry, ENTER, &entryP, fsTable)) {
-    return -1;
-  }
-  return 0;
-}
-
-/*
- * Connect to the NN as the current user/group.
- * Returns a fs handle on success, or NULL on failure.
- */
-hdfsFS doConnectAsUser(const char *hostname, int port) {
-  uid_t uid = fuse_get_context()->uid;
-  char *user = getUsername(uid);
-  int ret;
-  hdfsFS fs = NULL;
-  if (NULL == user) {
-    goto done;
-  }
-
-  ret = pthread_mutex_lock(&tableMutex);
-  assert(0 == ret);
-
-  fs = findFs(user);
-  if (NULL == fs) {
-    fs = hdfsConnectAsUserNewInstance(hostname, port, user);
-    if (NULL == fs) {
-      ERROR("Unable to create fs for user %s", user);
-      goto done;
-    }
-    if (-1 == insertFs(user, fs)) {
-      ERROR("Unable to cache fs for user %s", user);
-    }
-  }
-
-done:
-  ret = pthread_mutex_unlock(&tableMutex);
-  assert(0 == ret);
-  if (user) {
-    free(user);
-  }
-  return fs;
-}
-
-/*
- * We currently cache a fs handle per-user in this module rather
- * than use the FileSystem cache in the java client. Therefore
- * we do not disconnect the fs handle here.
- */
-int doDisconnect(hdfsFS fs) {
-  return 0;
-}

+ 7 - 27
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.util.DirectBufferPool;
 import org.apache.hadoop.hdfs.util.DirectBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
@@ -315,23 +316,10 @@ class BlockReaderLocal implements BlockReader {
     boolean success = false;
     boolean success = false;
     try {
     try {
       // Skip both input streams to beginning of the chunk containing startOffset
       // Skip both input streams to beginning of the chunk containing startOffset
-      long toSkip = firstChunkOffset;
-      while (toSkip > 0) {
-        long skipped = dataIn.skip(toSkip);
-        if (skipped == 0) {
-          throw new IOException("Couldn't initialize input stream");
-        }
-        toSkip -= skipped;
-      }
+      IOUtils.skipFully(dataIn, firstChunkOffset);
       if (checksumIn != null) {
       if (checksumIn != null) {
         long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
         long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
-        while (checkSumOffset > 0) {
-          long skipped = checksumIn.skip(checkSumOffset);
-          if (skipped == 0) {
-            throw new IOException("Couldn't initialize checksum input stream");
-          }
-          checkSumOffset -= skipped;
-        }
+        IOUtils.skipFully(checksumIn, checkSumOffset);
       }
       }
       success = true;
       success = true;
     } finally {
     } finally {
@@ -636,17 +624,9 @@ class BlockReaderLocal implements BlockReader {
     slowReadBuff.position(slowReadBuff.limit());
     slowReadBuff.position(slowReadBuff.limit());
     checksumBuff.position(checksumBuff.limit());
     checksumBuff.position(checksumBuff.limit());
   
   
-    long dataSkipped = dataIn.skip(toskip);
-    if (dataSkipped != toskip) {
-      throw new IOException("skip error in data input stream");
-    }
-    long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
-    if (checkSumOffset > 0) {
-      long skipped = checksumIn.skip(checkSumOffset);
-      if (skipped != checkSumOffset) {
-        throw new IOException("skip error in checksum input stream");
-      }
-    }
+    IOUtils.skipFully(dataIn, toskip);
+    long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
+    IOUtils.skipFully(checksumIn, checkSumOffset);
 
 
     // read into the middle of the chunk
     // read into the middle of the chunk
     if (skipBuf == null) {
     if (skipBuf == null) {
@@ -701,4 +681,4 @@ class BlockReaderLocal implements BlockReader {
   public boolean hasSentStatusCode() {
   public boolean hasSentStatusCode() {
     return false;
     return false;
   }
   }
-}
+}

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -139,6 +139,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
@@ -520,7 +521,7 @@ public class DFSClient implements java.io.Closeable {
       if (filesBeingWritten.isEmpty()) {
       if (filesBeingWritten.isEmpty()) {
         return;
         return;
       }
       }
-      lastLeaseRenewal = System.currentTimeMillis();
+      lastLeaseRenewal = Time.now();
     }
     }
   }
   }
 
 
@@ -537,7 +538,7 @@ public class DFSClient implements java.io.Closeable {
         return true;
         return true;
       } catch (IOException e) {
       } catch (IOException e) {
         // Abort if the lease has already expired. 
         // Abort if the lease has already expired. 
-        final long elapsed = System.currentTimeMillis() - getLastLeaseRenewal();
+        final long elapsed = Time.now() - getLastLeaseRenewal();
         if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
         if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
           LOG.warn("Failed to renew lease for " + clientName + " for "
           LOG.warn("Failed to renew lease for " + clientName + " for "
               + (elapsed/1000) + " seconds (>= soft-limit ="
               + (elapsed/1000) + " seconds (>= soft-limit ="
@@ -599,6 +600,7 @@ public class DFSClient implements java.io.Closeable {
    * Close the file system, abandoning all of the leases and files being
    * Close the file system, abandoning all of the leases and files being
    * created and close connections to the namenode.
    * created and close connections to the namenode.
    */
    */
+  @Override
   public synchronized void close() throws IOException {
   public synchronized void close() throws IOException {
     if(clientRunning) {
     if(clientRunning) {
       closeAllFilesBeingWritten(false);
       closeAllFilesBeingWritten(false);
@@ -635,7 +637,7 @@ public class DFSClient implements java.io.Closeable {
    * @see ClientProtocol#getServerDefaults()
    * @see ClientProtocol#getServerDefaults()
    */
    */
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
-    long now = System.currentTimeMillis();
+    long now = Time.now();
     if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
     if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
       serverDefaults = namenode.getServerDefaults();
       serverDefaults = namenode.getServerDefaults();
       serverDefaultsLastUpdate = now;
       serverDefaultsLastUpdate = now;

+ 15 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -74,6 +74,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
 
 
 
 
 /****************************************************************
 /****************************************************************
@@ -267,6 +268,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
       return seqno == HEART_BEAT_SEQNO;
       return seqno == HEART_BEAT_SEQNO;
     }
     }
     
     
+    @Override
     public String toString() {
     public String toString() {
       return "packet seqno:" + this.seqno +
       return "packet seqno:" + this.seqno +
       " offsetInBlock:" + this.offsetInBlock + 
       " offsetInBlock:" + this.offsetInBlock + 
@@ -395,8 +397,9 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
      * streamer thread is the only thread that opens streams to datanode, 
      * streamer thread is the only thread that opens streams to datanode, 
      * and closes them. Any error recovery is also done by this thread.
      * and closes them. Any error recovery is also done by this thread.
      */
      */
+    @Override
     public void run() {
     public void run() {
-      long lastPacket = System.currentTimeMillis();
+      long lastPacket = Time.now();
       while (!streamerClosed && dfsClient.clientRunning) {
       while (!streamerClosed && dfsClient.clientRunning) {
 
 
         // if the Responder encountered an error, shutdown Responder
         // if the Responder encountered an error, shutdown Responder
@@ -420,7 +423,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
 
 
           synchronized (dataQueue) {
           synchronized (dataQueue) {
             // wait for a packet to be sent.
             // wait for a packet to be sent.
-            long now = System.currentTimeMillis();
+            long now = Time.now();
             while ((!streamerClosed && !hasError && dfsClient.clientRunning 
             while ((!streamerClosed && !hasError && dfsClient.clientRunning 
                 && dataQueue.size() == 0 && 
                 && dataQueue.size() == 0 && 
                 (stage != BlockConstructionStage.DATA_STREAMING || 
                 (stage != BlockConstructionStage.DATA_STREAMING || 
@@ -435,7 +438,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
               } catch (InterruptedException  e) {
               } catch (InterruptedException  e) {
               }
               }
               doSleep = false;
               doSleep = false;
-              now = System.currentTimeMillis();
+              now = Time.now();
             }
             }
             if (streamerClosed || hasError || !dfsClient.clientRunning) {
             if (streamerClosed || hasError || !dfsClient.clientRunning) {
               continue;
               continue;
@@ -518,7 +521,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
             errorIndex = 0;
             errorIndex = 0;
             throw e;
             throw e;
           }
           }
-          lastPacket = System.currentTimeMillis();
+          lastPacket = Time.now();
           
           
           if (one.isHeartbeatPacket()) {  //heartbeat packet
           if (one.isHeartbeatPacket()) {  //heartbeat packet
           }
           }
@@ -653,6 +656,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         this.targets = targets;
         this.targets = targets;
       }
       }
 
 
+      @Override
       public void run() {
       public void run() {
 
 
         setName("ResponseProcessor for block " + block);
         setName("ResponseProcessor for block " + block);
@@ -981,7 +985,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         errorIndex = -1;
         errorIndex = -1;
         success = false;
         success = false;
 
 
-        long startTime = System.currentTimeMillis();
+        long startTime = Time.now();
         DatanodeInfo[] excluded = excludedNodes.toArray(
         DatanodeInfo[] excluded = excludedNodes.toArray(
             new DatanodeInfo[excludedNodes.size()]);
             new DatanodeInfo[excludedNodes.size()]);
         block = oldBlock;
         block = oldBlock;
@@ -1107,7 +1111,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
       int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
       int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
       long sleeptime = 400;
       long sleeptime = 400;
       while (true) {
       while (true) {
-        long localstart = System.currentTimeMillis();
+        long localstart = Time.now();
         while (true) {
         while (true) {
           try {
           try {
             return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes);
             return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes);
@@ -1130,9 +1134,9 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
               } else {
               } else {
                 --retries;
                 --retries;
                 DFSClient.LOG.info("Exception while adding a block", e);
                 DFSClient.LOG.info("Exception while adding a block", e);
-                if (System.currentTimeMillis() - localstart > 5000) {
+                if (Time.now() - localstart > 5000) {
                   DFSClient.LOG.info("Waiting for replication for "
                   DFSClient.LOG.info("Waiting for replication for "
-                      + (System.currentTimeMillis() - localstart) / 1000
+                      + (Time.now() - localstart) / 1000
                       + " seconds");
                       + " seconds");
                 }
                 }
                 try {
                 try {
@@ -1727,14 +1731,14 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
   // should be called holding (this) lock since setTestFilename() may 
   // should be called holding (this) lock since setTestFilename() may 
   // be called during unit tests
   // be called during unit tests
   private void completeFile(ExtendedBlock last) throws IOException {
   private void completeFile(ExtendedBlock last) throws IOException {
-    long localstart = System.currentTimeMillis();
+    long localstart = Time.now();
     boolean fileComplete = false;
     boolean fileComplete = false;
     while (!fileComplete) {
     while (!fileComplete) {
       fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last);
       fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last);
       if (!fileComplete) {
       if (!fileComplete) {
         if (!dfsClient.clientRunning ||
         if (!dfsClient.clientRunning ||
               (dfsClient.hdfsTimeout > 0 &&
               (dfsClient.hdfsTimeout > 0 &&
-               localstart + dfsClient.hdfsTimeout < System.currentTimeMillis())) {
+               localstart + dfsClient.hdfsTimeout < Time.now())) {
             String msg = "Unable to close file because dfsclient " +
             String msg = "Unable to close file because dfsclient " +
                           " was unable to contact the HDFS servers." +
                           " was unable to contact the HDFS servers." +
                           " clientRunning " + dfsClient.clientRunning +
                           " clientRunning " + dfsClient.clientRunning +
@@ -1744,7 +1748,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         }
         }
         try {
         try {
           Thread.sleep(400);
           Thread.sleep(400);
-          if (System.currentTimeMillis() - localstart > 5000) {
+          if (Time.now() - localstart > 5000) {
             DFSClient.LOG.info("Could not complete file " + src + " retrying...");
             DFSClient.LOG.info("Could not complete file " + src + " retrying...");
           }
           }
         } catch (InterruptedException ie) {
         } catch (InterruptedException ie) {

部分文件因为文件数量过多而无法显示