Browse Source

Merge branch 'trunk' into HADOOP-13345

 Conflicts:
	hadoop-tools/hadoop-aws/pom.xml
Steve Loughran 7 years ago
parent
commit
b114f2488f
100 changed files with 1425 additions and 505 deletions
  1. 1 0
      LICENSE.txt
  2. 1 1
      dev-support/docker/hadoop_env_checks.sh
  3. 1 1
      dev-support/findHangingTest.sh
  4. 1 1
      dev-support/verify-xml.sh
  5. 1 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  6. 0 1
      hadoop-client-modules/hadoop-client-check-invariants/pom.xml
  7. 0 1
      hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
  8. 43 0
      hadoop-client-modules/hadoop-client-minicluster/pom.xml
  9. 0 4
      hadoop-client-modules/hadoop-client/pom.xml
  10. 4 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
  11. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  12. 2 5
      hadoop-common-project/hadoop-common/pom.xml
  13. 14 14
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  14. 142 19
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  15. 46 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  16. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
  17. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
  18. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
  19. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
  20. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
  21. 19 20
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  22. 78 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
  23. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
  24. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
  25. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  26. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
  27. 31 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  28. 4 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
  29. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  30. 156 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
  31. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
  32. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  33. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
  34. 117 55
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  35. 17 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  36. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  37. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  38. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
  39. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
  40. 25 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
  41. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
  42. 6 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  43. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  44. 51 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
  45. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
  46. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  47. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  48. 15 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  49. 131 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
  50. 18 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/package-info.java
  51. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
  52. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
  53. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  54. 110 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  55. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
  56. 14 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
  57. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
  58. 58 21
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  59. 17 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  60. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  61. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  62. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  63. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  64. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
  65. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
  66. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
  67. 7 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
  68. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
  69. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  70. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
  71. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  72. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
  73. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
  74. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
  75. 6 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  76. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
  77. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
  78. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  79. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
  80. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
  81. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  82. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
  83. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
  84. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
  85. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
  86. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
  87. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
  88. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
  89. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
  90. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
  91. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
  92. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
  93. 12 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
  94. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
  95. 9 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
  96. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
  97. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
  98. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
  99. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
  100. 8 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

+ 1 - 0
LICENSE.txt

@@ -698,6 +698,7 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
 Apache HBase - Server which contains JQuery minified javascript library version 1.8.3
+Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7
 --------------------------------------------------------------------------------
 
 Copyright 2005, 2012, 2013 jQuery Foundation and other contributors, https://jquery.org/

+ 1 - 1
dev-support/docker/hadoop_env_checks.sh

@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

+ 1 - 1
dev-support/findHangingTest.sh

@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

+ 1 - 1
dev-support/verify-xml.sh

@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

+ 1 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -61,6 +61,7 @@
         <include>stop-yarn.sh</include>
         <include>start-yarn.cmd</include>
         <include>stop-yarn.cmd</include>
+        <include>FederationStateStore**/**</include>
       </includes>
       <fileMode>0755</fileMode>
     </fileSet>

+ 0 - 1
hadoop-client-modules/hadoop-client-check-invariants/pom.xml

@@ -46,7 +46,6 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-enforcer-plugin</artifactId>
-        <version>1.4</version>
         <dependencies>
           <dependency>
             <groupId>org.codehaus.mojo</groupId>

+ 0 - 1
hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml

@@ -50,7 +50,6 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-enforcer-plugin</artifactId>
-        <version>1.4</version>
         <dependencies>
           <dependency>
             <groupId>org.codehaus.mojo</groupId>

+ 43 - 0
hadoop-client-modules/hadoop-client-minicluster/pom.xml

@@ -126,6 +126,10 @@
           <groupId>javax.xml.bind</groupId>
           <artifactId>jaxb-api</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>xml-apis</groupId>
+          <artifactId>xml-apis</artifactId>
+        </exclusion>
         <exclusion>
           <groupId>org.apache.avro</groupId>
           <artifactId>avro</artifactId>
@@ -393,6 +397,10 @@
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-yarn-common</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-registry</artifactId>
+        </exclusion>
         <exclusion>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-yarn-server-common</artifactId>
@@ -620,6 +628,19 @@
                         <exclude>**/*.class</exclude>
                       </excludes>
                     </filter>
+                    <filter>
+                      <artifact>xerces:xercesImpl</artifact>
+                      <excludes>
+                        <exclude>**/*</exclude>
+                      </excludes>
+                    </filter>
+                    <filter>
+                      <artifact>org.apache.hadoop:hadoop-mapreduce-client-jobclient:*</artifact>
+                      <excludes>
+                        <exclude>testjar/*</exclude>
+                        <exclude>testshell/*</exclude>
+                      </excludes>
+                    </filter>
                   </filters>
                   <relocations>
                     <relocation>
@@ -642,6 +663,7 @@
                         <exclude>org/junit/*</exclude>
                         <exclude>org/junit/**/*</exclude>
                         <!-- Not the org/ packages that are a part of the jdk -->
+
                         <exclude>org/ietf/jgss/*</exclude>
                         <exclude>org/omg/**/*</exclude>
                         <exclude>org/w3c/dom/*</exclude>
@@ -650,6 +672,13 @@
                         <exclude>org/xml/sax/**/*</exclude>
                       </excludes>
                     </relocation>
+                    <relocation>
+                      <pattern>contribs/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.contribs.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
                     <relocation>
                       <pattern>com/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
@@ -687,6 +716,13 @@
                         <exclude>io/serializations</exclude>
                       </excludes>
                     </relocation>
+                    <relocation>
+                      <pattern>javassist/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javassist.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
                     <relocation>
                       <pattern>javax/el/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
@@ -708,6 +744,13 @@
                         <exclude>**/pom.xml</exclude>
                       </excludes>
                     </relocation>
+                    <relocation>
+                      <pattern>jersey/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.jersey.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
                     <relocation>
                       <pattern>net/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>

+ 0 - 4
hadoop-client-modules/hadoop-client/pom.xml

@@ -162,10 +162,6 @@
           <groupId>org.apache.avro</groupId>
           <artifactId>avro</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>io.netty</groupId>
           <artifactId>netty</artifactId>

+ 4 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -86,8 +88,8 @@ public class TestCertificateUtil {
         + "Mzc1xA==";
     try {
       RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
-      assertTrue(pk != null);
-      assertTrue(pk.getAlgorithm().equals("RSA"));
+      assertNotNull(pk);
+      assertEquals("RSA", pk.getAlgorithm());
     } catch (ServletException se) {
       fail("Should not have thrown ServletException");
     }

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -323,6 +323,10 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.fs\.FSProto.*"/>
+    </Match>
 
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.

+ 2 - 5
hadoop-common-project/hadoop-common/pom.xml

@@ -184,7 +184,7 @@
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
-      <scope>runtime</scope>
+      <scope>compile</scope>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
@@ -267,10 +267,6 @@
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <exclusions>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.jboss.netty</groupId>
           <artifactId>netty</artifactId>
@@ -402,6 +398,7 @@
                   <include>RefreshUserMappingsProtocol.proto</include>
                   <include>RefreshCallQueueProtocol.proto</include>
                   <include>GenericRefreshProtocol.proto</include>
+                  <include>FSProtos.proto</include>
                 </includes>
               </source>
             </configuration>

+ 14 - 14
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -30,20 +30,20 @@ function hadoop_usage
   hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
   hadoop_add_option "workers" "turn on worker mode"
 
-  hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
-  hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
-  hadoop_add_subcommand "conftest" "validate configuration XML files"
-  hadoop_add_subcommand "credential" "interact with credential providers"
-  hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
-  hadoop_add_subcommand "dtutil" "operations related to delegation tokens"
-  hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
-  hadoop_add_subcommand "fs" "run a generic filesystem user client"
-  hadoop_add_subcommand "jar <jar>" "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
-  hadoop_add_subcommand "jnipath" "prints the java.library.path"
-  hadoop_add_subcommand "kerbname" "show auth_to_local principal conversion"
-  hadoop_add_subcommand "key" "manage keys via the KeyProvider"
-  hadoop_add_subcommand "trace" "view and modify Hadoop tracing settings"
-  hadoop_add_subcommand "version" "print the version"
+  hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
+  hadoop_add_subcommand "classpath" client "prints the class path needed to get the Hadoop jar and the required libraries"
+  hadoop_add_subcommand "conftest" client "validate configuration XML files"
+  hadoop_add_subcommand "credential" client "interact with credential providers"
+  hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
+  hadoop_add_subcommand "dtutil" client "operations related to delegation tokens"
+  hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
+  hadoop_add_subcommand "fs" client "run a generic filesystem user client"
+  hadoop_add_subcommand "jar <jar>" client "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
+  hadoop_add_subcommand "jnipath" client "prints the java.library.path"
+  hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion"
+  hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
+  hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
+  hadoop_add_subcommand "version" client "print the version"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
 

+ 142 - 19
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -18,6 +18,7 @@
 # be done outside of a function
 declare -a HADOOP_SUBCMD_USAGE
 declare -a HADOOP_OPTION_USAGE
+declare -a HADOOP_SUBCMD_USAGE_TYPES
 
 ## @description  Print a message to stderr
 ## @audience     public
@@ -115,6 +116,89 @@ function hadoop_verify_entry
   [[ ${!1} =~ \ ${2}\  ]]
 }
 
+## @description  Check if an array has a given value
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        element
+## @param        array
+## @returns      0 = yes
+## @returns      1 = no
+function hadoop_array_contains
+{
+  declare element=$1
+  shift
+  declare val
+
+  if [[ "$#" -eq 0 ]]; then
+    return 1
+  fi
+
+  for val in "${@}"; do
+    if [[ "${val}" == "${element}" ]]; then
+      return 0
+    fi
+  done
+  return 1
+}
+
+## @description  Add the `appendstring` if `checkstring` is not
+## @description  present in the given array
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        envvar
+## @param        appendstring
+function hadoop_add_array_param
+{
+  declare arrname=$1
+  declare add=$2
+
+  declare arrref="${arrname}[@]"
+  declare array=("${!arrref}")
+
+  if ! hadoop_array_contains "${add}" "${array[@]}"; then
+    #shellcheck disable=SC1083,SC2086
+    eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \)
+    hadoop_debug "$1 accepted $2"
+  else
+    hadoop_debug "$1 declined $2"
+  fi
+}
+
+## @description  Sort an array (must not contain regexps)
+## @description  present in the given array
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        arrayvar
+function hadoop_sort_array
+{
+  declare arrname=$1
+  declare arrref="${arrname}[@]"
+  declare array=("${!arrref}")
+  declare oifs
+
+  declare globstatus
+  declare -a sa
+
+  globstatus=$(set -o | grep noglob | awk '{print $NF}')
+
+  set -f
+  oifs=${IFS}
+
+  # shellcheck disable=SC2034
+  IFS=$'\n' sa=($(sort <<<"${array[*]}"))
+
+  # shellcheck disable=SC1083
+  eval "${arrname}"=\(\"\${sa[@]}\"\)
+
+  IFS=${oifs}
+  if [[ "${globstatus}" = off ]]; then
+    set +f
+  fi
+}
+
 ## @description  Check if we are running with priv
 ## @description  by default, this implementation looks for
 ## @description  EUID=0.  For OSes that have true priv
@@ -220,13 +304,20 @@ function hadoop_uservar_su
 ## @stability    evolving
 ## @replaceable  no
 ## @param        subcommand
+## @param        subcommandtype
 ## @param        subcommanddesc
 function hadoop_add_subcommand
 {
-  local subcmd=$1
-  local text=$2
+  declare subcmd=$1
+  declare subtype=$2
+  declare text=$3
 
-  HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
+  hadoop_debug "${subcmd} as a ${subtype}"
+
+  hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}"
+
+  # done in this order so that sort works later
+  HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}"
   ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
 }
 
@@ -253,17 +344,22 @@ function hadoop_reset_usage
 {
   HADOOP_SUBCMD_USAGE=()
   HADOOP_OPTION_USAGE=()
+  HADOOP_SUBCMD_USAGE_TYPES=()
   HADOOP_SUBCMD_USAGE_COUNTER=0
   HADOOP_OPTION_USAGE_COUNTER=0
 }
 
 ## @description  Print a screen-size aware two-column output
+## @description  if reqtype is not null, only print those requested
 ## @audience     private
 ## @stability    evolving
 ## @replaceable  no
+## @param        reqtype
 ## @param        array
 function hadoop_generic_columnprinter
 {
+  declare reqtype=$1
+  shift
   declare -a input=("$@")
   declare -i i=0
   declare -i counter=0
@@ -275,11 +371,13 @@ function hadoop_generic_columnprinter
   declare -i foldsize
   declare -a tmpa
   declare numcols
+  declare brup
 
   if [[ -n "${COLUMNS}" ]]; then
     numcols=${COLUMNS}
   else
     numcols=$(tput cols) 2>/dev/null
+    COLUMNS=${numcols}
   fi
 
   if [[ -z "${numcols}"
@@ -292,7 +390,8 @@ function hadoop_generic_columnprinter
   while read -r line; do
     tmpa[${counter}]=${line}
     ((counter=counter+1))
-    option=$(echo "${line}" | cut -f1 -d'@')
+    IFS='@' read -ra brup <<< "${line}"
+    option="${brup[0]}"
     if [[ ${#option} -gt ${maxoptsize} ]]; then
       maxoptsize=${#option}
     fi
@@ -304,8 +403,22 @@ function hadoop_generic_columnprinter
   ((foldsize=numcols-maxoptsize))
 
   until [[ $i -eq ${#tmpa[@]} ]]; do
-    option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
-    giventext=$(echo "${tmpa[$i]}" | cut -f2 -d'@')
+    IFS='@' read -ra brup <<< "${tmpa[$i]}"
+
+    option="${brup[0]}"
+    cmdtype="${brup[1]}"
+    giventext="${brup[2]}"
+
+    if [[ -n "${reqtype}" ]]; then
+      if [[ "${cmdtype}" != "${reqtype}" ]]; then
+        ((i=i+1))
+        continue
+      fi
+    fi
+
+    if [[ -z "${giventext}" ]]; then
+      giventext=${cmdtype}
+    fi
 
     while read -r line; do
       printf "%-${maxoptsize}s   %-s\n" "${option}" "${line}"
@@ -325,13 +438,14 @@ function hadoop_generic_columnprinter
 ## @param        [text to use in place of SUBCOMMAND]
 function hadoop_generate_usage
 {
-  local cmd=$1
-  local takesclass=$2
-  local subcmdtext=${3:-"SUBCOMMAND"}
-  local haveoptions
-  local optstring
-  local havesubs
-  local subcmdstring
+  declare cmd=$1
+  declare takesclass=$2
+  declare subcmdtext=${3:-"SUBCOMMAND"}
+  declare haveoptions
+  declare optstring
+  declare havesubs
+  declare subcmdstring
+  declare cmdtype
 
   cmd=${cmd##*/}
 
@@ -358,7 +472,7 @@ function hadoop_generate_usage
     echo "  OPTIONS is none or any of:"
     echo ""
 
-    hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
+    hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}"
   fi
 
   if [[ "${havesubs}" = true ]]; then
@@ -366,7 +480,18 @@ function hadoop_generate_usage
     echo "  ${subcmdtext} is one of:"
     echo ""
 
-    hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
+    if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then
+
+      hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES
+      for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do
+        #shellcheck disable=SC2086
+        cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}"
+        printf "\n    %s Commands:\n\n" "${cmdtype}"
+        hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}"
+      done
+    else
+      hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}"
+    fi
     echo ""
     echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
   fi
@@ -1873,11 +1998,9 @@ function hadoop_start_secure_daemon_wrapper
     (( counter++ ))
   done
 
-  # this is for the daemon pid creation
   #shellcheck disable=SC2086
-  echo $! > "${jsvcpidfile}" 2>/dev/null
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${daemonpidfile}."
+  if ! echo $! > "${jsvcpidfile}"; then
+    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${jsvcpidfile}."
   fi
 
   sleep 1

+ 46 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.conf;
 
+import com.ctc.wstx.io.StreamBootstrapper;
+import com.ctc.wstx.io.SystemId;
 import com.ctc.wstx.stax.WstxInputFactory;
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.core.JsonGenerator;
@@ -79,8 +81,6 @@ import javax.xml.transform.stream.StreamResult;
 
 import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,8 +96,9 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.stax2.XMLInputFactory2;
 import org.codehaus.stax2.XMLStreamReader2;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
@@ -192,11 +193,12 @@ import com.google.common.base.Strings;
 @InterfaceStability.Stable
 public class Configuration implements Iterable<Map.Entry<String,String>>,
                                       Writable {
-  private static final Log LOG =
-    LogFactory.getLog(Configuration.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Configuration.class);
 
-  private static final Log LOG_DEPRECATION =
-    LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
+  private static final Logger LOG_DEPRECATION =
+      LoggerFactory.getLogger(
+          "org.apache.hadoop.conf.Configuration.deprecation");
 
   private boolean quietmode = true;
 
@@ -284,7 +286,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Specify exact input factory to avoid time finding correct one.
    * Factory is reusable across un-synchronized threads once initialized
    */
-  private static final XMLInputFactory2 XML_INPUT_FACTORY = new WstxInputFactory();
+  private static final WstxInputFactory XML_INPUT_FACTORY =
+      new WstxInputFactory();
 
   /**
    * Class to keep the information about the keys which replace the deprecated
@@ -1667,7 +1670,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
-  private long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
+  /**
+   * Return time duration in the given time unit. Valid units are encoded in
+   * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+   * (ms), seconds (s), minutes (m), hours (h), and days (d).
+   * @param name Property name
+   * @param vStr The string value with time unit suffix to be converted.
+   * @param unit Unit to convert the stored property, if it exists.
+   */
+  public long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
     vStr = vStr.trim();
     vStr = StringUtils.toLowerCase(vStr);
     ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr);
@@ -2638,15 +2649,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return parse(connection.getInputStream(), url.toString());
   }
 
-  private XMLStreamReader parse(InputStream is,
-      String systemId) throws IOException, XMLStreamException {
+  private XMLStreamReader parse(InputStream is, String systemIdStr)
+      throws IOException, XMLStreamException {
     if (!quietmode) {
       LOG.debug("parsing input stream " + is);
     }
     if (is == null) {
       return null;
     }
-    return XML_INPUT_FACTORY.createXMLStreamReader(systemId, is);
+    SystemId systemId = SystemId.construct(systemIdStr);
+    return XML_INPUT_FACTORY.createSR(XML_INPUT_FACTORY.createPrivateConfig(),
+        systemId, StreamBootstrapper.getInstance(null, systemId, is), false,
+        true);
   }
 
   private void loadResources(Properties properties,
@@ -2877,10 +2891,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
       return null;
     } catch (IOException e) {
-      LOG.fatal("error parsing conf " + name, e);
+      LOG.error("error parsing conf " + name, e);
       throw new RuntimeException(e);
     } catch (XMLStreamException e) {
-      LOG.fatal("error parsing conf " + name, e);
+      LOG.error("error parsing conf " + name, e);
       throw new RuntimeException(e);
     }
   }
@@ -2902,9 +2916,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         if(source != null) {
           updatingResource.put(attr, source);
         }
-      } else if (!value.equals(properties.getProperty(attr))) {
-        LOG.warn(name+":an attempt to override final parameter: "+attr
-            +";  Ignoring.");
+      } else {
+        // This is a final parameter so check for overrides.
+        checkForOverride(this.properties, name, attr, value);
+        if (this.properties != properties) {
+          checkForOverride(properties, name, attr, value);
+        }
       }
     }
     if (finalParameter && attr != null) {
@@ -2912,6 +2929,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
+  /**
+   * Print a warning if a property with a given name already exists with a
+   * different value
+   */
+  private void checkForOverride(Properties properties, String name, String attr, String value) {
+    String propertyValue = properties.getProperty(attr);
+    if (propertyValue != null && !propertyValue.equals(value)) {
+      LOG.warn(name + ":an attempt to override final parameter: " + attr
+          + ";  Ignoring.");
+    }
+  }
+
   /** 
    * Write out the non-default properties in this configuration to the given
    * {@link OutputStream} using UTF-8 encoding.

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java

@@ -22,9 +22,10 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -41,8 +42,8 @@ import java.util.Map;
 public abstract class ReconfigurableBase 
   extends Configured implements Reconfigurable {
   
-  private static final Log LOG =
-    LogFactory.getLog(ReconfigurableBase.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReconfigurableBase.class);
   // Use for testing purpose.
   private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil();
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.conf;
 
-import org.apache.commons.logging.*;
-
 import org.apache.commons.lang.StringEscapeUtils;
 
 import java.util.Collection;
@@ -33,6 +31,8 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A servlet for changing a node's configuration.
@@ -45,8 +45,8 @@ public class ReconfigurationServlet extends HttpServlet {
   
   private static final long serialVersionUID = 1L;
 
-  private static final Log LOG =
-    LogFactory.getLog(ReconfigurationServlet.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReconfigurationServlet.class);
 
   // the prefix used to fing the attribute holding the reconfigurable 
   // for a given request

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java

@@ -26,12 +26,12 @@ import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
@@ -42,8 +42,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  */
 @InterfaceAudience.Private
 public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
-  private static final Log LOG =
-      LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JceAesCtrCryptoCodec.class.getName());
   
   private Configuration conf;
   private String provider;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java

@@ -26,22 +26,22 @@ import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.crypto.random.OsSecureRandom;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implement the AES-CTR crypto codec using JNI into OpenSSL.
  */
 @InterfaceAudience.Private
 public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
-  private static final Log LOG =
-      LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslAesCtrCryptoCodec.class.getName());
 
   private Configuration conf;
   private Random random;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java

@@ -26,13 +26,13 @@ import javax.crypto.IllegalBlockSizeException;
 import javax.crypto.NoSuchPaddingException;
 import javax.crypto.ShortBufferException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenSSL cipher using JNI.
@@ -41,8 +41,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
  */
 @InterfaceAudience.Private
 public final class OpensslCipher {
-  private static final Log LOG =
-      LogFactory.getLog(OpensslCipher.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslCipher.class.getName());
   public static final int ENCRYPT_MODE = 1;
   public static final int DECRYPT_MODE = 0;
   

+ 19 - 20
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -309,9 +309,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
      * - HOSTNAME = string
      * - PORT = integer
      *
-     * If multiple hosts are provider, the Factory will create a
-     * {@link LoadBalancingKMSClientProvider} that round-robins requests
-     * across the provided list of hosts.
+     * This will always create a {@link LoadBalancingKMSClientProvider}
+     * if the uri is correct.
      */
     @Override
     public KeyProvider createProvider(URI providerUri, Configuration conf)
@@ -338,30 +337,26 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
           }
           hostsPart = t[0];
         }
-        return createProvider(providerUri, conf, origUrl, port, hostsPart);
+        return createProvider(conf, origUrl, port, hostsPart);
       }
       return null;
     }
 
-    private KeyProvider createProvider(URI providerUri, Configuration conf,
+    private KeyProvider createProvider(Configuration conf,
         URL origUrl, int port, String hostsPart) throws IOException {
       String[] hosts = hostsPart.split(";");
-      if (hosts.length == 1) {
-        return new KMSClientProvider(providerUri, conf);
-      } else {
-        KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
-        for (int i = 0; i < hosts.length; i++) {
-          try {
-            providers[i] =
-                new KMSClientProvider(
-                    new URI("kms", origUrl.getProtocol(), hosts[i], port,
-                        origUrl.getPath(), null, null), conf);
-          } catch (URISyntaxException e) {
-            throw new IOException("Could not instantiate KMSProvider..", e);
-          }
+      KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
+      for (int i = 0; i < hosts.length; i++) {
+        try {
+          providers[i] =
+              new KMSClientProvider(
+                  new URI("kms", origUrl.getProtocol(), hosts[i], port,
+                      origUrl.getPath(), null, null), conf);
+        } catch (URISyntaxException e) {
+          throw new IOException("Could not instantiate KMSProvider.", e);
         }
-        return new LoadBalancingKMSClientProvider(providers, conf);
       }
+      return new LoadBalancingKMSClientProvider(providers, conf);
     }
   }
 
@@ -1078,7 +1073,11 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
       } catch (Exception e) {
-        throw new IOException(e);
+        if (e instanceof IOException) {
+          throw (IOException) e;
+        } else {
+          throw new IOException(e);
+        }
       }
     }
     return tokens;

+ 78 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
@@ -31,14 +32,19 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -68,6 +74,8 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
   private final KMSClientProvider[] providers;
   private final AtomicInteger currentIdx;
 
+  private RetryPolicy retryPolicy = null;
+
   public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
       Configuration conf) {
     this(shuffle(providers), Time.monotonicNow(), conf);
@@ -79,24 +87,79 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     super(conf);
     this.providers = providers;
     this.currentIdx = new AtomicInteger((int)(seed % providers.length));
+    int maxNumRetries = conf.getInt(CommonConfigurationKeysPublic.
+        KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length);
+    int sleepBaseMillis = conf.getInt(CommonConfigurationKeysPublic.
+        KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY,
+        CommonConfigurationKeysPublic.
+            KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
+    int sleepMaxMillis = conf.getInt(CommonConfigurationKeysPublic.
+        KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
+        CommonConfigurationKeysPublic.
+            KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
+    Preconditions.checkState(maxNumRetries >= 0);
+    Preconditions.checkState(sleepBaseMillis >= 0);
+    Preconditions.checkState(sleepMaxMillis >= 0);
+    this.retryPolicy = RetryPolicies.failoverOnNetworkException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, maxNumRetries, 0, sleepBaseMillis,
+        sleepMaxMillis);
   }
 
   @VisibleForTesting
-  KMSClientProvider[] getProviders() {
+  public KMSClientProvider[] getProviders() {
     return providers;
   }
 
   private <T> T doOp(ProviderCallable<T> op, int currPos)
       throws IOException {
+    if (providers.length == 0) {
+      throw new IOException("No providers configured !");
+    }
     IOException ex = null;
-    for (int i = 0; i < providers.length; i++) {
+    int numFailovers = 0;
+    for (int i = 0;; i++, numFailovers++) {
       KMSClientProvider provider = providers[(currPos + i) % providers.length];
       try {
         return op.call(provider);
+      } catch (AccessControlException ace) {
+        // No need to retry on AccessControlException
+        // and AuthorizationException.
+        // This assumes all the servers are configured with identical
+        // permissions and identical key acls.
+        throw ace;
       } catch (IOException ioe) {
-        LOG.warn("KMS provider at [{}] threw an IOException!! {}",
-            provider.getKMSUrl(), StringUtils.stringifyException(ioe));
+        LOG.warn("KMS provider at [{}] threw an IOException: ",
+            provider.getKMSUrl(), ioe);
         ex = ioe;
+
+        RetryAction action = null;
+        try {
+          action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+        } catch (Exception e) {
+          if (e instanceof IOException) {
+            throw (IOException)e;
+          }
+          throw new IOException(e);
+        }
+        if (action.action == RetryAction.RetryDecision.FAIL) {
+          LOG.warn("Aborting since the Request has failed with all KMS"
+              + " providers(depending on {}={} setting and numProviders={})"
+              + " in the group OR the exception is not recoverable",
+              CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY,
+              getConf().getInt(
+                  CommonConfigurationKeysPublic.
+                  KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length),
+              providers.length);
+          throw ex;
+        }
+        if (((numFailovers + 1) % providers.length) == 0) {
+          // Sleep only after we try all the providers for every cycle.
+          try {
+            Thread.sleep(action.delayMillis);
+          } catch (InterruptedException e) {
+            throw new InterruptedIOException("Thread Interrupted");
+          }
+        }
       } catch (Exception e) {
         if (e instanceof RuntimeException) {
           throw (RuntimeException)e;
@@ -105,12 +168,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
       }
     }
-    if (ex != null) {
-      LOG.warn("Aborting since the Request has failed with all KMS"
-          + " providers in the group. !!");
-      throw ex;
-    }
-    throw new IOException("No providers configured !!");
   }
 
   private int nextIdx() {
@@ -159,15 +216,24 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+    Preconditions.checkArgument(providers.length > 0,
+        "No providers are configured");
+    boolean success = false;
+    IOException e = null;
     for (KMSClientProvider provider : providers) {
       try {
         provider.warmUpEncryptedKeys(keyNames);
+        success = true;
       } catch (IOException ioe) {
+        e = ioe;
         LOG.error(
             "Error warming up keys for provider with url"
-            + "[" + provider.getKMSUrl() + "]");
+            + "[" + provider.getKMSUrl() + "]", ioe);
       }
     }
+    if (!success && e != null) {
+      throw e;
+    }
   }
 
   // This request is sent to all providers in the load-balancing group

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java

@@ -19,13 +19,13 @@ package org.apache.hadoop.crypto.random;
 
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenSSL secure random using JNI.
@@ -44,8 +44,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
 @InterfaceAudience.Private
 public class OpensslSecureRandom extends Random {
   private static final long serialVersionUID = -7828193502768789584L;
-  private static final Log LOG =
-      LogFactory.getLog(OpensslSecureRandom.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslSecureRandom.class.getName());
   
   /** If native SecureRandom unavailable, use java SecureRandom */
   private java.security.SecureRandom fallback = null;

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java

@@ -23,12 +23,12 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
@@ -39,7 +39,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  */
 @InterfaceAudience.Private
 public class OsSecureRandom extends Random implements Closeable, Configurable {
-  public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OsSecureRandom.class);
   
   private static final long serialVersionUID = 6391500337172057900L;
 
@@ -112,7 +113,7 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
   @Override
   synchronized public void close() {
     if (stream != null) {
-      IOUtils.cleanup(LOG, stream);
+      IOUtils.cleanupWithLogger(LOG, stream);
       stream = null;
     }
   }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -33,8 +33,6 @@ import java.util.NoSuchElementException;
 import java.util.StringTokenizer;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -53,6 +51,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class provides an interface for implementors of a Hadoop file system
@@ -67,7 +67,7 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class AbstractFileSystem {
-  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
+  static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
 
   /** Recording statistics per a file system class. */
   private static final Map<URI, Statistics> 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -27,14 +27,14 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Abstract Checksumed Fs.
@@ -110,8 +110,8 @@ public abstract class ChecksumFs extends FilterFs {
    * It verifies that data matches checksums.
    *******************************************************/
   private static class ChecksumFSInputChecker extends FSInputChecker {
-    public static final Log LOG 
-      = LogFactory.getLog(FSInputChecker.class);
+    public static final Logger LOG =
+        LoggerFactory.getLogger(FSInputChecker.class);
     private static final int HEADER_LENGTH = 8;
     
     private ChecksumFs fs;

+ 31 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -608,6 +608,8 @@ public class CommonConfigurationKeysPublic {
    */
   public static final String HADOOP_TOKEN_FILES =
       "hadoop.token.files";
+  public static final String HADOOP_HTTP_AUTHENTICATION_TYPE =
+    "hadoop.http.authentication.type";
 
   /**
    * @see
@@ -719,6 +721,35 @@ public class CommonConfigurationKeysPublic {
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
   public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
 
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  /** Default value is the number of providers specified. */
+  public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
+      "hadoop.security.kms.client.failover.max.retries";
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY =
+      "hadoop.security.kms.client.failover.sleep.base.millis";
+  /**  Default value is 100 ms. */
+  public static final int KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT  = 100;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY =
+      "hadoop.security.kms.client.failover.sleep.max.millis";
+  /** Default value is 2 secs. */
+  public static final int KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT  = 2000;
+
   /**
    * @see
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">

+ 4 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java

@@ -39,9 +39,6 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Evolving
 public class DF extends Shell {
 
-  /** Default DF refresh interval. */
-  public static final long DF_INTERVAL_DEFAULT = 3 * 1000;
-
   private final String dirPath;
   private final File dirFile;
   private String filesystem;
@@ -50,7 +47,8 @@ public class DF extends Shell {
   private ArrayList<String> output;
 
   public DF(File path, Configuration conf) throws IOException {
-    this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY, DF.DF_INTERVAL_DEFAULT));
+    this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY,
+        CommonConfigurationKeysPublic.FS_DF_INTERVAL_DEFAULT));
   }
 
   public DF(File path, long dfInterval) throws IOException {
@@ -214,6 +212,7 @@ public class DF extends Shell {
     if (args.length > 0)
       path = args[0];
 
-    System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());
+    System.out.println(new DF(new File(path),
+        CommonConfigurationKeysPublic.FS_DF_INTERVAL_DEFAULT).toString());
   }
 }

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -26,12 +26,12 @@ import java.util.concurrent.DelayQueue;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A daemon thread that waits for the next file system to renew.
@@ -39,8 +39,8 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 public class DelegationTokenRenewer
     extends Thread {
-  private static final Log LOG = LogFactory
-      .getLog(DelegationTokenRenewer.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(DelegationTokenRenewer.class);
 
   /** The renewable interface used by the renewer. */
   public interface Renewable {
@@ -243,7 +243,7 @@ public class DelegationTokenRenewer
         LOG.error("Interrupted while canceling token for " + fs.getUri()
             + "filesystem");
         if (LOG.isDebugEnabled()) {
-          LOG.debug(ie.getStackTrace());
+          LOG.debug("Exception in removeRenewAction: ", ie);
         }
       }
     }

+ 156 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java

@@ -17,17 +17,21 @@
  */
 package org.apache.hadoop.fs;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 
 import javax.annotation.Nonnull;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -43,9 +47,32 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * Progressable)}.
  *
  * To create missing parent directory, use {@link #recursive()}.
+ *
+ * To be more generic, {@link #opt(String, int)} and {@link #must(String, int)}
+ * variants provide implementation-agnostic way to customize the builder.
+ * Each FS-specific builder implementation can interpret the FS-specific
+ * options accordingly, for example:
+ *
+ * <code>
+ * FSDataOutputStreamBuilder builder = fs.createFile(path);
+ * builder.permission(perm)
+ *   .bufferSize(bufSize)
+ *   .opt("dfs.outputstream.builder.lazy-persist", true)
+ *   .opt("dfs.outputstream.builder.ec.policy-name", "rs-3-2-64k")
+ *   .opt("fs.local.o-direct", true)
+ *   .must("fs.s3a.fast-upload", true)
+ *   .must("fs.azure.buffer-size", 256 * 1024 * 1024);
+ * FSDataOutputStream out = builder.build();
+ * ...
+ * </code>
+ *
+ * If the option is not related to the file system, the option will be ignored.
+ * If the option is must, but not supported by the file system, a
+ * {@link IllegalArgumentException} will be thrown.
+ *
  */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public abstract class FSDataOutputStreamBuilder
     <S extends FSDataOutputStream, B extends FSDataOutputStreamBuilder<S, B>> {
   private final FileSystem fs;
@@ -60,6 +87,16 @@ public abstract class FSDataOutputStreamBuilder
   private Progressable progress = null;
   private ChecksumOpt checksumOpt = null;
 
+  /**
+   * Contains optional and mandatory parameters.
+   *
+   * It does not load default configurations from default files.
+   */
+  private final Configuration options = new Configuration(false);
+
+  /** Keep track of the keys for mandatory options. */
+  private final Set<String> mandatoryKeys = new HashSet<>();
+
   /**
    * Return the concrete implementation of the builder instance.
    */
@@ -215,11 +252,125 @@ public abstract class FSDataOutputStreamBuilder
     return getThisBuilder();
   }
 
+  /**
+   * Set optional Builder parameter.
+   */
+  public B opt(@Nonnull final String key, @Nonnull final String value) {
+    mandatoryKeys.remove(key);
+    options.set(key, value);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set optional boolean parameter for the Builder.
+   */
+  public B opt(@Nonnull final String key, boolean value) {
+    mandatoryKeys.remove(key);
+    options.setBoolean(key, value);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set optional int parameter for the Builder.
+   */
+  public B opt(@Nonnull final String key, int value) {
+    mandatoryKeys.remove(key);
+    options.setInt(key, value);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set optional float parameter for the Builder.
+   */
+  public B opt(@Nonnull final String key, float value) {
+    mandatoryKeys.remove(key);
+    options.setFloat(key, value);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set optional double parameter for the Builder.
+   */
+  public B opt(@Nonnull final String key, double value) {
+    mandatoryKeys.remove(key);
+    options.setDouble(key, value);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set an array of string values as optional parameter for the Builder.
+   */
+  public B opt(@Nonnull final String key, @Nonnull final String... values) {
+    mandatoryKeys.remove(key);
+    options.setStrings(key, values);
+    return getThisBuilder();
+  }
+
+  /**
+   * Set mandatory option to the Builder.
+   *
+   * If the option is not supported or unavailable on the {@link FileSystem},
+   * the client should expect {@link #build()} throws
+   * {@link IllegalArgumentException}.
+   */
+  public B must(@Nonnull final String key, @Nonnull final String value) {
+    mandatoryKeys.add(key);
+    options.set(key, value);
+    return getThisBuilder();
+  }
+
+  /** Set mandatory boolean option. */
+  public B must(@Nonnull final String key, boolean value) {
+    mandatoryKeys.add(key);
+    options.setBoolean(key, value);
+    return getThisBuilder();
+  }
+
+  /** Set mandatory int option. */
+  public B must(@Nonnull final String key, int value) {
+    mandatoryKeys.add(key);
+    options.setInt(key, value);
+    return getThisBuilder();
+  }
+
+  /** Set mandatory float option. */
+  public B must(@Nonnull final String key, float value) {
+    mandatoryKeys.add(key);
+    options.setFloat(key, value);
+    return getThisBuilder();
+  }
+
+  /** Set mandatory double option. */
+  public B must(@Nonnull final String key, double value) {
+    mandatoryKeys.add(key);
+    options.setDouble(key, value);
+    return getThisBuilder();
+  }
+
+  /** Set a string array as mandatory option. */
+  public B must(@Nonnull final String key, @Nonnull final String... values) {
+    mandatoryKeys.add(key);
+    options.setStrings(key, values);
+    return getThisBuilder();
+  }
+
+  protected Configuration getOptions() {
+    return options;
+  }
+
+  /**
+   * Get all the keys that are set as mandatory keys.
+   */
+  @VisibleForTesting
+  protected Set<String> getMandatoryKeys() {
+    return Collections.unmodifiableSet(mandatoryKeys);
+  }
+
   /**
    * Create the FSDataOutputStream to write on the file system.
    *
-   * @throws HadoopIllegalArgumentException if the parameters are not valid.
+   * @throws IllegalArgumentException if the parameters are not valid.
    * @throws IOException on errors when file system creates or appends the file.
    */
-  public abstract S build() throws IOException;
+  public abstract S build() throws IllegalArgumentException, IOException;
 }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java

@@ -22,11 +22,12 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.nio.ByteBuffer;
 import java.nio.IntBuffer;
 
@@ -37,8 +38,8 @@ import java.nio.IntBuffer;
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 abstract public class FSInputChecker extends FSInputStream {
-  public static final Log LOG 
-  = LogFactory.getLog(FSInputChecker.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(FSInputChecker.class);
   
   /** The file name from which data is read from */
   protected Path file;

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -35,8 +35,6 @@ import java.util.Stack;
 import java.util.TreeSet;
 import java.util.Map.Entry;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -63,6 +61,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
 
 import com.google.common.base.Preconditions;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The FileContext class provides an interface for users of the Hadoop
@@ -169,7 +169,7 @@ import org.apache.htrace.core.Tracer;
 @InterfaceStability.Stable
 public class FileContext {
   
-  public static final Log LOG = LogFactory.getLog(FileContext.class);
+  public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
   /**
    * Default permission for directory and symlink
    * In previous versions, this default permission was also used to
@@ -341,7 +341,7 @@ public class FileContext {
         throw ex;
       }
     } catch (InterruptedException ex) {
-      LOG.error(ex);
+      LOG.error(ex.toString());
       throw new IOException("Failed to get the AbstractFileSystem for path: "
           + uri, ex);
     }
@@ -455,7 +455,7 @@ public class FileContext {
     } catch (UnsupportedFileSystemException ex) {
       throw ex;
     } catch (IOException ex) {
-      LOG.error(ex);
+      LOG.error(ex.toString());
       throw new RuntimeException(ex);
     }
     return getFileContext(defaultAfs, aConf);

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.Serializable;
+
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.CipherSuite;
@@ -30,7 +32,9 @@ import static com.google.common.base.Preconditions.checkNotNull;
  * an encrypted file.
  */
 @InterfaceAudience.Private
-public class FileEncryptionInfo {
+public class FileEncryptionInfo implements Serializable {
+
+  private static final long serialVersionUID = 0x156abe03;
 
   private final CipherSuite cipherSuite;
   private final CryptoProtocolVersion version;

+ 117 - 55
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -23,18 +23,22 @@ import java.io.IOException;
 import java.io.InvalidObjectException;
 import java.io.ObjectInputValidation;
 import java.io.Serializable;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Set;
 
+import org.apache.hadoop.fs.FSProtos.FileStatusProto;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.fs.protocolPB.PBHelper;
 import org.apache.hadoop.io.Writable;
 
 /** Interface that represents the client side information for a file.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable<FileStatus>,
+public class FileStatus implements Writable, Comparable<Object>,
     Serializable, ObjectInputValidation {
 
   private static final long serialVersionUID = 0x13caeae8;
@@ -50,7 +54,31 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
   private String owner;
   private String group;
   private Path symlink;
-  
+  private Set<AttrFlags> attr;
+
+  private enum AttrFlags {
+    HAS_ACL,
+    HAS_CRYPT,
+    HAS_EC,
+  };
+  private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
+  private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
+    if (!(acl || crypt || ec)) {
+      return NONE;
+    }
+    EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
+    if (acl) {
+      ret.add(AttrFlags.HAS_ACL);
+    }
+    if (crypt) {
+      ret.add(AttrFlags.HAS_CRYPT);
+    }
+    if (ec) {
+      ret.add(AttrFlags.HAS_EC);
+    }
+    return ret;
+  }
+
   public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
   
   //We should deprecate this soon?
@@ -79,6 +107,15 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
                     FsPermission permission, String owner, String group, 
                     Path symlink,
                     Path path) {
+    this(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        false, false, false);
+  }
+
+  public FileStatus(long length, boolean isdir, int block_replication,
+      long blocksize, long modification_time, long access_time,
+      FsPermission permission, String owner, String group, Path symlink,
+      Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -89,7 +126,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
       this.permission = permission;
     } else if (isdir) {
       this.permission = FsPermission.getDirDefault();
-    } else if (symlink!=null) {
+    } else if (symlink != null) {
       this.permission = FsPermission.getDefault();
     } else {
       this.permission = FsPermission.getFileDefault();
@@ -98,6 +135,8 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
     this.group = (group == null) ? "" : group;
     this.symlink = symlink;
     this.path = path;
+    attr = flags(hasAcl, isEncrypted, isErasureCoded);
+
     // The variables isdir and symlink indicate the type:
     // 1. isdir implies directory, in which case symlink must be null.
     // 2. !isdir implies a file or symlink, symlink != null implies a
@@ -213,7 +252,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
    * @return true if the underlying file or directory has ACLs set.
    */
   public boolean hasAcl() {
-    return permission.getAclBit();
+    return attr.contains(AttrFlags.HAS_ACL);
   }
 
   /**
@@ -222,7 +261,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
    * @return true if the underlying file is encrypted.
    */
   public boolean isEncrypted() {
-    return permission.getEncryptedBit();
+    return attr.contains(AttrFlags.HAS_CRYPT);
   }
 
   /**
@@ -231,7 +270,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
    * @return true if the underlying file or directory is erasure coded.
    */
   public boolean isErasureCoded() {
-    return permission.getErasureCodedBit();
+    return attr.contains(AttrFlags.HAS_EC);
   }
 
   /**
@@ -304,47 +343,6 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
   public void setSymlink(final Path p) {
     symlink = p;
   }
-  
-  //////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
-    out.writeLong(getLen());
-    out.writeBoolean(isDirectory());
-    out.writeShort(getReplication());
-    out.writeLong(getBlockSize());
-    out.writeLong(getModificationTime());
-    out.writeLong(getAccessTime());
-    getPermission().write(out);
-    Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
-    Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
-    out.writeBoolean(isSymlink());
-    if (isSymlink()) {
-      Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    this.path = new Path(strPath);
-    this.length = in.readLong();
-    this.isdir = in.readBoolean();
-    this.block_replication = in.readShort();
-    blocksize = in.readLong();
-    modification_time = in.readLong();
-    access_time = in.readLong();
-    permission.readFields(in);
-    owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    group = Text.readString(in, Text.DEFAULT_MAX_LEN);
-    if (in.readBoolean()) {
-      this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
-    } else {
-      this.symlink = null;
-    }
-  }
 
   /**
    * Compare this FileStatus to another FileStatus
@@ -352,26 +350,37 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
    * @return  a negative integer, zero, or a positive integer as this object
    *   is less than, equal to, or greater than the specified object.
    */
-  @Override
   public int compareTo(FileStatus o) {
     return this.getPath().compareTo(o.getPath());
   }
-  
+
+  /**
+   * Compare this FileStatus to another FileStatus.
+   * This method was added back by HADOOP-14683 to keep binary compatibility.
+   *
+   * @param   o the FileStatus to be compared.
+   * @return  a negative integer, zero, or a positive integer as this object
+   *   is less than, equal to, or greater than the specified object.
+   * @throws ClassCastException if the specified object is not FileStatus
+   */
+  @Override
+  public int compareTo(Object o) {
+    FileStatus other = (FileStatus) o;
+    return compareTo(other);
+  }
+
   /** Compare if this object is equal to another object
    * @param   o the object to be compared.
    * @return  true if two file status has the same path name; false if not.
    */
   @Override
   public boolean equals(Object o) {
-    if (o == null) {
+    if (!(o instanceof FileStatus)) {
       return false;
     }
     if (this == o) {
       return true;
     }
-    if (!(o instanceof FileStatus)) {
-      return false;
-    }
     FileStatus other = (FileStatus)o;
     return this.getPath().equals(other.getPath());
   }
@@ -406,7 +415,11 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
     sb.append("; permission=" + permission);
     sb.append("; isSymlink=" + isSymlink());
     if(isSymlink()) {
-      sb.append("; symlink=" + symlink);
+      try {
+        sb.append("; symlink=" + getSymlink());
+      } catch (IOException e) {
+        throw new RuntimeException("Unexpected exception", e);
+      }
     }
     sb.append("; hasAcl=" + hasAcl());
     sb.append("; isEncrypted=" + isEncrypted());
@@ -415,6 +428,55 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
     return sb.toString();
   }
 
+  /**
+   * Read instance encoded as protobuf from stream.
+   * @param in Input stream
+   * @see PBHelper#convert(FileStatus)
+   * @deprecated Use the {@link PBHelper} and protobuf serialization directly.
+   */
+  @Override
+  @Deprecated
+  public void readFields(DataInput in) throws IOException {
+    int size = in.readInt();
+    if (size < 0) {
+      throw new IOException("Can't read FileStatusProto with negative " +
+          "size of " + size);
+    }
+    byte[] buf = new byte[size];
+    in.readFully(buf);
+    FileStatusProto proto = FileStatusProto.parseFrom(buf);
+    FileStatus other = PBHelper.convert(proto);
+    isdir = other.isDirectory();
+    length = other.getLen();
+    isdir = other.isDirectory();
+    block_replication = other.getReplication();
+    blocksize = other.getBlockSize();
+    modification_time = other.getModificationTime();
+    access_time = other.getAccessTime();
+    setPermission(other.getPermission());
+    setOwner(other.getOwner());
+    setGroup(other.getGroup());
+    setSymlink((other.isSymlink() ? other.getSymlink() : null));
+    setPath(other.getPath());
+    attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
+    assert (isDirectory() && getSymlink() == null) || !isDirectory();
+  }
+
+  /**
+   * Write instance encoded as protobuf to stream.
+   * @param out Output stream
+   * @see PBHelper#convert(FileStatus)
+   * @deprecated Use the {@link PBHelper} and protobuf serialization directly.
+   */
+  @Override
+  @Deprecated
+  public void write(DataOutput out) throws IOException {
+    FileStatusProto proto = PBHelper.convert(this);
+    int size = proto.getSerializedSize();
+    out.writeInt(size);
+    out.write(proto.toByteArray());
+  }
+
   @Override
   public void validateObject() throws InvalidObjectException {
     if (null == path) {

+ 17 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -4153,9 +4153,21 @@ public abstract class FileSystem extends Configured implements Closeable {
 
     @Override
     public FSDataOutputStream build() throws IOException {
-      return getFS().create(getPath(), getPermission(), getFlags(),
-          getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-          getChecksumOpt());
+      if (getFlags().contains(CreateFlag.CREATE) ||
+          getFlags().contains(CreateFlag.OVERWRITE)) {
+        if (isRecursive()) {
+          return getFS().create(getPath(), getPermission(), getFlags(),
+              getBufferSize(), getReplication(), getBlockSize(), getProgress(),
+              getChecksumOpt());
+        } else {
+          return getFS().createNonRecursive(getPath(), getPermission(),
+              getFlags(), getBufferSize(), getReplication(), getBlockSize(),
+              getProgress());
+        }
+      } else if (getFlags().contains(CreateFlag.APPEND)) {
+        return getFS().append(getPath(), getBufferSize(), getProgress());
+      }
+      throw new IOException("Must specify either create, overwrite or append");
     }
 
     @Override
@@ -4174,8 +4186,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * HADOOP-14384. Temporarily reduce the visibility of method before the
    * builder interface becomes stable.
    */
-  @InterfaceAudience.Private
-  protected FSDataOutputStreamBuilder createFile(Path path) {
+  public FSDataOutputStreamBuilder createFile(Path path) {
     return new FileSystemDataOutputStreamBuilder(this, path)
         .create().overwrite(true);
   }
@@ -4185,8 +4196,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * @param path file path.
    * @return a {@link FSDataOutputStreamBuilder} to build file append request.
    */
-  @InterfaceAudience.Private
-  protected FSDataOutputStreamBuilder appendFile(Path path) {
+  public FSDataOutputStreamBuilder appendFile(Path path) {
     return new FileSystemDataOutputStreamBuilder(this, path).append();
   }
 }

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -45,8 +45,6 @@ import java.util.zip.ZipFile;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -57,6 +55,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of file-processing util methods
@@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceStability.Evolving
 public class FileUtil {
 
-  private static final Log LOG = LogFactory.getLog(FileUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
 
   /* The error code is defined in winutils to indicate insufficient
    * privilege to create symbolic links. This value need to keep in
@@ -697,7 +697,7 @@ public class FileUtil {
         entry = tis.getNextTarEntry();
       }
     } finally {
-      IOUtils.cleanup(LOG, tis, inputStream);
+      IOUtils.cleanupWithLogger(LOG, tis, inputStream);
     }
   }
 
@@ -1287,7 +1287,7 @@ public class FileUtil {
       bos = new BufferedOutputStream(fos);
       jos = new JarOutputStream(bos, jarManifest);
     } finally {
-      IOUtils.cleanup(LOG, jos, bos, fos);
+      IOUtils.cleanupWithLogger(LOG, jos, bos, fos);
     }
     String[] jarCp = {classPathJar.getCanonicalPath(),
                         unexpandedWildcardClasspath.toString()};

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -24,8 +24,6 @@ import java.util.Arrays;
 import java.util.LinkedList;
 
 import org.apache.commons.lang.WordUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -39,12 +37,14 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Provide command line access to a FileSystem. */
 @InterfaceAudience.Private
 public class FsShell extends Configured implements Tool {
   
-  static final Log LOG = LogFactory.getLog(FsShell.class);
+  static final Logger LOG = LoggerFactory.getLogger(FsShell.class);
 
   private static final int MAX_LINE_WIDTH = 80;
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java

@@ -22,7 +22,6 @@ import java.util.LinkedList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.ChmodParser;
@@ -32,6 +31,7 @@ import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.shell.FsCommand;
 import org.apache.hadoop.fs.shell.PathData;
 import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
 
 /**
  * This class is the home for file permissions related commands.
@@ -41,7 +41,7 @@ import org.apache.hadoop.util.Shell;
 @InterfaceStability.Unstable
 public class FsShellPermissions extends FsCommand {
 
-  static Log LOG = FsShell.LOG;
+  static final Logger LOG = FsShell.LOG;
   
   /**
    * Register the permission related commands with the factory

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java

@@ -23,6 +23,10 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLConnection;
 
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +37,8 @@ import org.apache.hadoop.conf.Configuration;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 class FsUrlConnection extends URLConnection {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FsUrlConnection.class);
 
   private Configuration conf;
 
@@ -40,12 +46,16 @@ class FsUrlConnection extends URLConnection {
 
   FsUrlConnection(Configuration conf, URL url) {
     super(url);
+    Preconditions.checkArgument(conf != null, "null conf argument");
+    Preconditions.checkArgument(url != null, "null url argument");
     this.conf = conf;
   }
 
   @Override
   public void connect() throws IOException {
+    Preconditions.checkState(is == null, "Already connected");
     try {
+      LOG.debug("Connecting to {}", url);
       FileSystem fs = FileSystem.get(url.toURI(), conf);
       is = fs.open(new Path(url.getPath()));
     } catch (URISyntaxException e) {

+ 25 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java

@@ -22,6 +22,9 @@ import java.net.URLStreamHandlerFactory;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +44,18 @@ import org.apache.hadoop.conf.Configuration;
 public class FsUrlStreamHandlerFactory implements
     URLStreamHandlerFactory {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FsUrlStreamHandlerFactory.class);
+
+  /**
+   * These are the protocols with MUST NOT be exported, as doing so
+   * would conflict with the standard URL handlers registered by
+   * the JVM. Many things will break.
+   */
+  public static final String[] UNEXPORTED_PROTOCOLS = {
+      "http", "https"
+  };
+
   // The configuration holds supported FS implementation class names.
   private Configuration conf;
 
@@ -64,14 +79,20 @@ public class FsUrlStreamHandlerFactory implements
       throw new RuntimeException(io);
     }
     this.handler = new FsUrlStreamHandler(this.conf);
+    for (String protocol : UNEXPORTED_PROTOCOLS) {
+      protocols.put(protocol, false);
+    }
   }
 
   @Override
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
+    LOG.debug("Creating handler for protocol {}", protocol);
     if (!protocols.containsKey(protocol)) {
       boolean known = true;
       try {
-        FileSystem.getFileSystemClass(protocol, conf);
+        Class<? extends FileSystem> impl
+            = FileSystem.getFileSystemClass(protocol, conf);
+        LOG.debug("Found implementation of {}: {}", protocol, impl);
       }
       catch (IOException ex) {
         known = false;
@@ -79,9 +100,12 @@ public class FsUrlStreamHandlerFactory implements
       protocols.put(protocol, known);
     }
     if (protocols.get(protocol)) {
+      LOG.debug("Using handler for protocol {}", protocol);
       return handler;
     } else {
       // FileSystem does not know the protocol, let the VM handle this
+      LOG.debug("Unknown protocol {}, delegating to default implementation",
+          protocol);
       return null;
     }
   }

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java

@@ -23,18 +23,19 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 class Globber {
-  public static final Log LOG = LogFactory.getLog(Globber.class.getName());
+  public static final Logger LOG =
+      LoggerFactory.getLogger(Globber.class.getName());
 
   private final FileSystem fs;
   private final FileContext fc;

+ 6 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.fs;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.FileNotFoundException;
@@ -50,7 +50,8 @@ import java.util.*;
 
 public class HarFileSystem extends FileSystem {
 
-  private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HarFileSystem.class);
 
   public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
   public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
@@ -1173,7 +1174,7 @@ public class HarFileSystem extends FileSystem {
         LOG.warn("Encountered exception ", ioe);
         throw ioe;
       } finally {
-        IOUtils.cleanup(LOG, lin, in);
+        IOUtils.cleanupWithLogger(LOG, lin, in);
       }
 
       FSDataInputStream aIn = fs.open(archiveIndexPath);
@@ -1198,7 +1199,7 @@ public class HarFileSystem extends FileSystem {
           }
         }
       } finally {
-        IOUtils.cleanup(LOG, aIn);
+        IOUtils.cleanupWithLogger(LOG, aIn);
       }
     }
   }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -23,14 +23,15 @@ import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration; 
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** An implementation of a round-robin scheme for disk allocation for creating
  * files. The way it works is that it is kept track what disk was last
@@ -245,8 +246,8 @@ public class LocalDirAllocator {
   
   private static class AllocatorPerContext {
 
-    private final Log LOG =
-      LogFactory.getLog(AllocatorPerContext.class);
+    private static final Logger LOG =
+        LoggerFactory.getLogger(AllocatorPerContext.class);
 
     private Random dirIndexRandomizer = new Random();
     private String contextCfgItemName;

+ 51 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java

@@ -30,6 +30,9 @@ import org.apache.hadoop.fs.permission.FsPermission;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class LocatedFileStatus extends FileStatus {
+
+  private static final long serialVersionUID = 0x17339920;
+
   private BlockLocation[] locations;
 
 
@@ -42,14 +45,18 @@ public class LocatedFileStatus extends FileStatus {
    * @param stat a file status
    * @param locations a file's block locations
    */
-  public LocatedFileStatus(FileStatus stat, BlockLocation[] locations)
-  throws IOException {
+  public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) {
     this(stat.getLen(), stat.isDirectory(), stat.getReplication(),
         stat.getBlockSize(), stat.getModificationTime(),
-        stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
-        stat.getGroup(), null, stat.getPath(), locations);
+        stat.getAccessTime(), stat.getPermission(),
+        stat.getOwner(), stat.getGroup(), null, stat.getPath(),
+        stat.hasAcl(), stat.isEncrypted(), stat.isErasureCoded(), locations);
     if (stat.isSymlink()) {
-      setSymlink(stat.getSymlink());
+      try {
+        setSymlink(stat.getSymlink());
+      } catch (IOException e) {
+        throw new RuntimeException("Unexpected exception", e);
+      }
     }
   }
 
@@ -69,24 +76,55 @@ public class LocatedFileStatus extends FileStatus {
    * @param path the path's qualified name
    * @param locations a file's block locations
    */
+  @Deprecated
   public LocatedFileStatus(long length, boolean isdir,
           int block_replication,
           long blocksize, long modification_time, long access_time,
           FsPermission permission, String owner, String group, 
-          Path symlink,
-          Path path,
-          BlockLocation[] locations) {
-	  super(length, isdir, block_replication, blocksize, modification_time,
-			  access_time, permission, owner, group, symlink, path);
-	  this.locations = locations;
+          Path symlink, Path path, BlockLocation[] locations) {
+    this(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        permission.getAclBit(), permission.getEncryptedBit(),
+        permission.getErasureCodedBit(), locations);
   }
-  
+
+  /**
+   * Constructor.
+   *
+   * @param length a file's length
+   * @param isdir if the path is a directory
+   * @param block_replication the file's replication factor
+   * @param blocksize a file's block size
+   * @param modification_time a file's modification time
+   * @param access_time a file's access time
+   * @param permission a file's permission
+   * @param owner a file's owner
+   * @param group a file's group
+   * @param symlink symlink if the path is a symbolic link
+   * @param path the path's qualified name
+   * @param hasAcl entity has associated ACLs
+   * @param isEncrypted entity is encrypted
+   * @param isErasureCoded entity is erasure coded
+   * @param locations a file's block locations
+   */
+  public LocatedFileStatus(long length, boolean isdir,
+      int block_replication, long blocksize, long modification_time,
+      long access_time, FsPermission permission, String owner, String group,
+      Path symlink, Path path,
+      boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
+      BlockLocation[] locations) {
+    super(length, isdir, block_replication, blocksize, modification_time,
+        access_time, permission, owner, group, symlink, path,
+        hasAcl, isEncrypted, isErasureCoded);
+    this.locations = locations;
+  }
+
   /**
    * Get the file's block locations
    * @return the file's block locations
    */
   public BlockLocation[] getBlockLocations() {
-	  return locations;
+    return locations;
   }
   
   /**

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

@@ -19,11 +19,12 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** 
  * Provides a trash facility which supports pluggable Trash policies. 
@@ -34,8 +35,8 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
-  private static final org.apache.commons.logging.Log LOG =
-      LogFactory.getLog(Trash.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Trash.class);
 
   private TrashPolicy trashPolicy; // configured trash policy instance
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -30,8 +30,6 @@ import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +39,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Provides a <i>trash</i> feature.  Files are moved to a user's trash
  * directory, a subdirectory of their home directory named ".Trash".  Files are
@@ -54,8 +54,8 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class TrashPolicyDefault extends TrashPolicy {
-  private static final Log LOG =
-    LogFactory.getLog(TrashPolicyDefault.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TrashPolicyDefault.class);
 
   private static final Path CURRENT = new Path("Current");
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -25,8 +25,6 @@ import java.net.URI;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.net.ftp.FTP;
 import org.apache.commons.net.ftp.FTPClient;
 import org.apache.commons.net.ftp.FTPFile;
@@ -45,6 +43,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -56,8 +56,8 @@ import org.apache.hadoop.util.Progressable;
 @InterfaceStability.Stable
 public class FTPFileSystem extends FileSystem {
 
-  public static final Log LOG = LogFactory
-      .getLog(FTPFileSystem.class);
+  public static final Logger LOG = LoggerFactory
+      .getLogger(FTPFileSystem.class);
 
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 

+ 15 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -24,8 +24,6 @@ import java.io.InvalidObjectException;
 import java.io.ObjectInputValidation;
 import java.io.Serializable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +31,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A class for file/directory permissions.
@@ -41,7 +41,7 @@ import org.apache.hadoop.io.WritableFactory;
 @InterfaceStability.Stable
 public class FsPermission implements Writable, Serializable,
     ObjectInputValidation {
-  private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FsPermission.class);
   private static final long serialVersionUID = 0x2fe08564;
 
   static final WritableFactory FACTORY = new WritableFactory() {
@@ -133,11 +133,13 @@ public class FsPermission implements Writable, Serializable,
   }
 
   @Override
+  @Deprecated
   public void write(DataOutput out) throws IOException {
     out.writeShort(toShort());
   }
 
   @Override
+  @Deprecated
   public void readFields(DataInput in) throws IOException {
     fromShort(in.readShort());
   }
@@ -184,6 +186,7 @@ public class FsPermission implements Writable, Serializable,
    *
    * @return short extended short representation of this permission
    */
+  @Deprecated
   public short toExtendedShort() {
     return toShort();
   }
@@ -299,7 +302,10 @@ public class FsPermission implements Writable, Serializable,
    * Returns true if there is also an ACL (access control list).
    *
    * @return boolean true if there is also an ACL (access control list).
+   * @deprecated Get acl bit from the {@link org.apache.hadoop.fs.FileStatus}
+   * object.
    */
+  @Deprecated
   public boolean getAclBit() {
     // File system subclasses that support the ACL bit would override this.
     return false;
@@ -307,14 +313,20 @@ public class FsPermission implements Writable, Serializable,
 
   /**
    * Returns true if the file is encrypted or directory is in an encryption zone
+   * @deprecated Get encryption bit from the
+   * {@link org.apache.hadoop.fs.FileStatus} object.
    */
+  @Deprecated
   public boolean getEncryptedBit() {
     return false;
   }
 
   /**
    * Returns true if the file or directory is erasure coded.
+   * @deprecated Get ec bit from the {@link org.apache.hadoop.fs.FileStatus}
+   * object.
    */
+  @Deprecated
   public boolean getErasureCodedBit() {
     return false;
   }

+ 131 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.fs.FSProtos.*;
+
+/**
+ * Utility methods aiding conversion of fs data structures.
+ */
+public final class PBHelper {
+
+  private PBHelper() {
+    // prevent construction
+  }
+
+  public static FsPermission convert(FsPermissionProto proto)
+      throws IOException {
+    return new FsPermission((short)proto.getPerm());
+  }
+
+  public static FsPermissionProto convert(FsPermission p) throws IOException {
+    FsPermissionProto.Builder bld = FsPermissionProto.newBuilder();
+    bld.setPerm(p.toShort());
+    return bld.build();
+  }
+
+  public static FileStatus convert(FileStatusProto proto) throws IOException {
+    final Path path;
+    final long length;
+    final boolean isdir;
+    final short blockReplication;
+    final long blocksize;
+    final long mtime;
+    final long atime;
+    final String owner;
+    final String group;
+    final FsPermission permission;
+    final Path symlink;
+    switch (proto.getFileType()) {
+    case FT_DIR:
+      isdir = true;
+      symlink = null;
+      blocksize = 0;
+      length = 0;
+      blockReplication = 0;
+      break;
+    case FT_SYMLINK:
+      isdir = false;
+      symlink = new Path(proto.getSymlink());
+      blocksize = 0;
+      length = 0;
+      blockReplication = 0;
+      break;
+    case FT_FILE:
+      isdir = false;
+      symlink = null;
+      blocksize = proto.getBlockSize();
+      length = proto.getLength();
+      int brep = proto.getBlockReplication();
+      if ((brep & 0xffff0000) != 0) {
+        throw new IOException(String.format("Block replication 0x%08x " +
+            "doesn't fit in 16 bits.", brep));
+      }
+      blockReplication = (short)brep;
+      break;
+    default:
+      throw new IllegalStateException("Unknown type: " + proto.getFileType());
+    }
+    path = new Path(proto.getPath());
+    mtime = proto.getModificationTime();
+    atime = proto.getAccessTime();
+    permission = convert(proto.getPermission());
+    owner = proto.getOwner();
+    group = proto.getGroup();
+    int flags = proto.getFlags();
+    return new FileStatus(length, isdir, blockReplication, blocksize,
+        mtime, atime, permission, owner, group, symlink, path,
+        (flags & FileStatusProto.Flags.HAS_ACL_VALUE)   != 0,
+        (flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
+        (flags & FileStatusProto.Flags.HAS_EC_VALUE)    != 0);
+  }
+
+  public static FileStatusProto convert(FileStatus stat) throws IOException {
+    FileStatusProto.Builder bld = FileStatusProto.newBuilder();
+    bld.setPath(stat.getPath().toString());
+    if (stat.isDirectory()) {
+      bld.setFileType(FileStatusProto.FileType.FT_DIR);
+    } else if (stat.isSymlink()) {
+      bld.setFileType(FileStatusProto.FileType.FT_SYMLINK)
+         .setSymlink(stat.getSymlink().toString());
+    } else {
+      bld.setFileType(FileStatusProto.FileType.FT_FILE)
+         .setLength(stat.getLen())
+         .setBlockReplication(stat.getReplication())
+         .setBlockSize(stat.getBlockSize());
+    }
+    bld.setAccessTime(stat.getAccessTime())
+       .setModificationTime(stat.getModificationTime())
+       .setOwner(stat.getOwner())
+       .setGroup(stat.getGroup())
+       .setPermission(convert(stat.getPermission()));
+    int flags = 0;
+    flags |= stat.hasAcl()         ? FileStatusProto.Flags.HAS_ACL_VALUE   : 0;
+    flags |= stat.isEncrypted()    ? FileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
+    flags |= stat.isErasureCoded() ? FileStatusProto.Flags.HAS_EC_VALUE    : 0;
+    bld.setFlags(flags);
+    return bld.build();
+  }
+
+}

+ 18 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/package-info.java

@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java

@@ -23,19 +23,20 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.StringUtils;
 
 import com.jcraft.jsch.ChannelSftp;
 import com.jcraft.jsch.JSch;
 import com.jcraft.jsch.JSchException;
 import com.jcraft.jsch.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Concurrent/Multiple Connections. */
 class SFTPConnectionPool {
 
-  public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SFTPFileSystem.class);
   // Maximum number of allowed live connections. This doesn't mean we cannot
   // have more live connections. It means that when we have more
   // live connections than this threshold, any unused connection will be

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java

@@ -26,8 +26,6 @@ import java.net.URLDecoder;
 import java.util.ArrayList;
 import java.util.Vector;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,11 +39,14 @@ import com.jcraft.jsch.ChannelSftp;
 import com.jcraft.jsch.ChannelSftp.LsEntry;
 import com.jcraft.jsch.SftpATTRS;
 import com.jcraft.jsch.SftpException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** SFTP FileSystem. */
 public class SFTPFileSystem extends FileSystem {
 
-  public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SFTPFileSystem.class);
 
   private SFTPConnectionPool connectionPool;
   private URI uri;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -27,8 +27,6 @@ import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An abstract class for the execution of a file system command
@@ -59,7 +59,7 @@ abstract public class Command extends Configured {
   private int depth = 0;
   protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
 
-  private static final Log LOG = LogFactory.getLog(Command.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Command.class);
 
   /** allows stdout to be captured if necessary */
   public PrintStream out = System.out;

+ 110 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -26,7 +26,11 @@ import java.net.URISyntaxException;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -288,9 +292,113 @@ class CopyCommands {
   }
 
   public static class CopyFromLocal extends Put {
+    private ThreadPoolExecutor executor = null;
+    private int numThreads = 1;
+
+    private static final int MAX_THREADS =
+        Runtime.getRuntime().availableProcessors() * 2;
     public static final String NAME = "copyFromLocal";
-    public static final String USAGE = Put.USAGE;
-    public static final String DESCRIPTION = "Identical to the -put command.";
+    public static final String USAGE =
+        "[-f] [-p] [-l] [-d] [-t <thread count>] <localsrc> ... <dst>";
+    public static final String DESCRIPTION =
+        "Copy files from the local file system " +
+        "into fs. Copying fails if the file already " +
+        "exists, unless the -f flag is given.\n" +
+        "Flags:\n" +
+        "  -p : Preserves access and modification times, ownership and the" +
+        " mode.\n" +
+        "  -f : Overwrites the destination if it already exists.\n" +
+        "  -t <thread count> : Number of threads to be used, default is 1.\n" +
+        "  -l : Allow DataNode to lazily persist the file to disk. Forces" +
+        " replication factor of 1. This flag will result in reduced" +
+        " durability. Use with care.\n" +
+        "  -d : Skip creation of temporary file(<dst>._COPYING_).\n";
+
+    private void setNumberThreads(String numberThreadsString) {
+      if (numberThreadsString == null) {
+        numThreads = 1;
+      } else {
+        int parsedValue = Integer.parseInt(numberThreadsString);
+        if (parsedValue <= 1) {
+          numThreads = 1;
+        } else if (parsedValue > MAX_THREADS) {
+          numThreads = MAX_THREADS;
+        } else {
+          numThreads = parsedValue;
+        }
+      }
+    }
+
+    @Override
+    protected void processOptions(LinkedList<String> args) throws IOException {
+      CommandFormat cf =
+          new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
+      cf.addOptionWithValue("t");
+      cf.parse(args);
+      setNumberThreads(cf.getOptValue("t"));
+      setOverwrite(cf.getOpt("f"));
+      setPreserve(cf.getOpt("p"));
+      setLazyPersist(cf.getOpt("l"));
+      setDirectWrite(cf.getOpt("d"));
+      getRemoteDestination(args);
+      // should have a -r option
+      setRecursive(true);
+    }
+
+    private void copyFile(PathData src, PathData target) throws IOException {
+      if (isPathRecursable(src)) {
+        throw new PathIsDirectoryException(src.toString());
+      }
+      super.copyFileToTarget(src, target);
+    }
+
+    @Override
+    protected void copyFileToTarget(PathData src, PathData target)
+        throws IOException {
+      // if number of thread is 1, mimic put and avoid threading overhead
+      if (numThreads == 1) {
+        copyFile(src, target);
+        return;
+      }
+
+      Runnable task = () -> {
+        try {
+          copyFile(src, target);
+        } catch (IOException e) {
+          displayError(e);
+        }
+      };
+      executor.submit(task);
+    }
+
+    @Override
+    protected void processArguments(LinkedList<PathData> args)
+        throws IOException {
+      executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
+          TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
+          new ThreadPoolExecutor.CallerRunsPolicy());
+      super.processArguments(args);
+
+      // issue the command and then wait for it to finish
+      executor.shutdown();
+      try {
+        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
+      } catch (InterruptedException e) {
+        executor.shutdownNow();
+        displayError(e);
+        Thread.currentThread().interrupt();
+      }
+    }
+
+    @VisibleForTesting
+    public int getNumThreads() {
+      return numThreads;
+    }
+
+    @VisibleForTesting
+    public ThreadPoolExecutor getExecutor() {
+      return executor;
+    }
   }
  
   public static class CopyToLocal extends Get {

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
+import org.apache.hadoop.fs.shell.CopyCommands.Put;
 
 /** Various commands for moving files */
 @InterfaceAudience.Private
@@ -41,7 +41,7 @@ class MoveCommands {
   /**
    *  Move local files to a remote filesystem
    */
-  public static class MoveFromLocal extends CopyFromLocal {
+  public static class MoveFromLocal extends Put {
     public static final String NAME = "moveFromLocal";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION = 

+ 14 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -40,8 +40,10 @@ import org.apache.hadoop.fs.FileStatus;
  *   %o: Block size<br>
  *   %r: replication<br>
  *   %u: User name of owner<br>
- *   %y: UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
- *   %Y: Milliseconds since January 1, 1970 UTC<br>
+ *   %x: atime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %X: atime Milliseconds since January 1, 1970 UTC<br>
+ *   %y: mtime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %Y: mtime Milliseconds since January 1, 1970 UTC<br>
  * If the format is not specified, %y is used by default.
  */
 @InterfaceAudience.Private
@@ -62,9 +64,10 @@ class Stat extends FsCommand {
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
     "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
-    "of owner (%u), modification date (%y, %Y)." + NEWLINE +
-    "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
-    "%Y shows milliseconds since January 1, 1970 UTC." + NEWLINE +
+    "of owner (%u), access date (%x, %X)." + NEWLINE +
+    "modification date (%y, %Y)." + NEWLINE +
+    "%x and %y show UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
+    "%X and %Y show milliseconds since January 1, 1970 UTC." + NEWLINE +
     "If the format is not specified, %y is used by default." + NEWLINE;
 
   protected final SimpleDateFormat timeFmt;
@@ -127,6 +130,12 @@ class Stat extends FsCommand {
           case 'u':
             buf.append(stat.getOwner());
             break;
+          case 'x':
+            buf.append(timeFmt.format(new Date(stat.getAccessTime())));
+            break;
+          case 'X':
+            buf.append(stat.getAccessTime());
+            break;
           case 'y':
             buf.append(timeFmt.format(new Date(stat.getModificationTime())));
             break;

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java

@@ -66,4 +66,6 @@ public interface Constants {
 
   static public final FsPermission PERMISSION_555 =
       new FsPermission((short) 0555);
+
+  String CONFIG_VIEWFS_RENAME_STRATEGY = "fs.viewfs.rename.strategy";
 }

+ 58 - 21
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -126,7 +126,8 @@ public class ViewFileSystem extends FileSystem {
   Configuration config;
   InodeTree<FileSystem> fsState;  // the fs state; ie the mount table
   Path homeDir = null;
-  
+  // Default to rename within same mountpoint
+  private RenameStrategy renameStrategy = RenameStrategy.SAME_MOUNTPOINT;
   /**
    * Make the path Absolute and get the path-part of a pathname.
    * Checks that URI matches this file system 
@@ -207,6 +208,9 @@ public class ViewFileSystem extends FileSystem {
         }
       };
       workingDir = this.getHomeDirectory();
+      renameStrategy = RenameStrategy.valueOf(
+          conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+              RenameStrategy.SAME_MOUNTPOINT.toString()));
     } catch (URISyntaxException e) {
       throw new IOException("URISyntax exception: " + theUri);
     }
@@ -490,27 +494,55 @@ public class ViewFileSystem extends FileSystem {
     if (resDst.isInternalDir()) {
           throw readOnlyMountTable("rename", dst);
     }
-    /**
-    // Alternate 1: renames within same file system - valid but we disallow
-    // Alternate 2: (as described in next para - valid but we have disallowed it
-    //
-    // Note we compare the URIs. the URIs include the link targets. 
-    // hence we allow renames across mount links as long as the mount links
-    // point to the same target.
-    if (!resSrc.targetFileSystem.getUri().equals(
-              resDst.targetFileSystem.getUri())) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    */
-    
-    //
-    // Alternate 3 : renames ONLY within the the same mount links.
-    //
-    if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
-      throw new IOException("Renames across Mount points not supported");
+
+    URI srcUri = resSrc.targetFileSystem.getUri();
+    URI dstUri = resDst.targetFileSystem.getUri();
+
+    verifyRenameStrategy(srcUri, dstUri,
+        resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
+
+    ChRootedFileSystem srcFS = (ChRootedFileSystem) resSrc.targetFileSystem;
+    ChRootedFileSystem dstFS = (ChRootedFileSystem) resDst.targetFileSystem;
+    return srcFS.getMyFs().rename(srcFS.fullPath(resSrc.remainingPath),
+        dstFS.fullPath(resDst.remainingPath));
+  }
+
+  static void verifyRenameStrategy(URI srcUri, URI dstUri,
+      boolean isSrcDestSame, ViewFileSystem.RenameStrategy renameStrategy)
+      throws IOException {
+    switch (renameStrategy) {
+    case SAME_FILESYSTEM_ACROSS_MOUNTPOINT:
+      if (srcUri.getAuthority() != null) {
+        if (!(srcUri.getScheme().equals(dstUri.getScheme()) && srcUri
+            .getAuthority().equals(dstUri.getAuthority()))) {
+          throw new IOException("Renames across Mount points not supported");
+        }
+      }
+
+      break;
+    case SAME_TARGET_URI_ACROSS_MOUNTPOINT:
+      // Alternate 2: Rename across mountpoints with same target.
+      // i.e. Rename across alias mountpoints.
+      //
+      // Note we compare the URIs. the URIs include the link targets.
+      // hence we allow renames across mount links as long as the mount links
+      // point to the same target.
+      if (!srcUri.equals(dstUri)) {
+        throw new IOException("Renames across Mount points not supported");
+      }
+
+      break;
+    case SAME_MOUNTPOINT:
+      //
+      // Alternate 3 : renames ONLY within the the same mount links.
+      //
+      if (!isSrcDestSame) {
+        throw new IOException("Renames across Mount points not supported");
+      }
+      break;
+    default:
+      throw new IllegalArgumentException ("Unexpected rename strategy");
     }
-    return resSrc.targetFileSystem.rename(resSrc.remainingPath,
-        resDst.remainingPath);
   }
 
   @Override
@@ -1241,4 +1273,9 @@ public class ViewFileSystem extends FileSystem {
       return allPolicies;
     }
   }
+
+  enum RenameStrategy {
+    SAME_MOUNTPOINT, SAME_TARGET_URI_ACROSS_MOUNTPOINT,
+    SAME_FILESYSTEM_ACROSS_MOUNTPOINT
+  }
 }

+ 17 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -157,7 +157,9 @@ public class ViewFs extends AbstractFileSystem {
   final Configuration config;
   InodeTree<AbstractFileSystem> fsState;  // the fs state; ie the mount table
   Path homeDir = null;
-  
+  private ViewFileSystem.RenameStrategy renameStrategy =
+      ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT;
+
   static AccessControlException readOnlyMountTable(final String operation,
       final String p) {
     return new AccessControlException( 
@@ -237,6 +239,9 @@ public class ViewFs extends AbstractFileSystem {
         // return MergeFs.createMergeFs(mergeFsURIList, config);
       }
     };
+    renameStrategy = ViewFileSystem.RenameStrategy.valueOf(
+        conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+            ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT.toString()));
   }
 
   @Override
@@ -495,37 +500,23 @@ public class ViewFs extends AbstractFileSystem {
               + " is readOnly");
     }
 
-    InodeTree.ResolveResult<AbstractFileSystem> resDst = 
+    InodeTree.ResolveResult<AbstractFileSystem> resDst =
                                 fsState.resolve(getUriPath(dst), false);
     if (resDst.isInternalDir()) {
       throw new AccessControlException(
           "Cannot Rename within internal dirs of mount table: dest=" + dst
               + " is readOnly");
     }
-    
-    /**
-    // Alternate 1: renames within same file system - valid but we disallow
-    // Alternate 2: (as described in next para - valid but we have disallowed it
-    //
-    // Note we compare the URIs. the URIs include the link targets. 
-    // hence we allow renames across mount links as long as the mount links
-    // point to the same target.
-    if (!resSrc.targetFileSystem.getUri().equals(
-              resDst.targetFileSystem.getUri())) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    */
-    
-    //
-    // Alternate 3 : renames ONLY within the the same mount links.
-    //
-
-    if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    
-    resSrc.targetFileSystem.renameInternal(resSrc.remainingPath,
-      resDst.remainingPath, overwrite);
+    //Alternate 1: renames within same file system
+    URI srcUri = resSrc.targetFileSystem.getUri();
+    URI dstUri = resDst.targetFileSystem.getUri();
+    ViewFileSystem.verifyRenameStrategy(srcUri, dstUri,
+        resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
+
+    ChRootedFs srcFS = (ChRootedFs) resSrc.targetFileSystem;
+    ChRootedFs dstFS = (ChRootedFs) resDst.targetFileSystem;
+    srcFS.getMyFs().renameInternal(srcFS.fullPath(resSrc.remainingPath),
+        dstFS.fullPath(resDst.remainingPath), overwrite);
   }
 
   @Override

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -47,6 +45,8 @@ import org.apache.zookeeper.KeeperException.Code;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -141,7 +141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   @VisibleForTesting
   protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
 
-  public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ActiveStandbyElector.class);
 
   private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
 
@@ -712,7 +713,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   }
 
   private void fatalError(String errorMessage) {
-    LOG.fatal(errorMessage);
+    LOG.error(errorMessage);
     reset();
     appClient.notifyFatalError(errorMessage);
   }
@@ -824,10 +825,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         createConnection();
         success = true;
       } catch(IOException e) {
-        LOG.warn(e);
+        LOG.warn(e.toString());
         sleepFor(5000);
       } catch(KeeperException e) {
-        LOG.warn(e);
+        LOG.warn(e.toString());
         sleepFor(5000);
       }
       ++connectionRetryCount;
@@ -866,7 +867,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     try {
       tempZk.close();
     } catch(InterruptedException e) {
-      LOG.warn(e);
+      LOG.warn(e.toString());
     }
     zkConnectionState = ConnectionState.TERMINATED;
     wantToBeInElection = false;

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -19,9 +19,6 @@ package org.apache.hadoop.ha;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -32,6 +29,8 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ipc.RPC;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The FailOverController is responsible for electing an active service
@@ -43,7 +42,8 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class FailoverController {
 
-  private static final Log LOG = LogFactory.getLog(FailoverController.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FailoverController.class);
 
   private final int gracefulFenceTimeout;
   private final int rpcTimeoutToNewActive;
@@ -252,7 +252,7 @@ public class FailoverController {
         } catch (FailoverFailedException ffe) {
           msg += ". Failback to " + fromSvc +
             " failed (" + ffe.getMessage() + ")";
-          LOG.fatal(msg);
+          LOG.error(msg);
         }
       }
       throw new FailoverFailedException(msg, cause);

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -28,8 +28,6 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -43,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A command-line tool for making calls in the HAServiceProtocol.
@@ -62,7 +62,7 @@ public abstract class HAAdmin extends Configured implements Tool {
    * operation, which is why it is not documented in the usage below.
    */
   private static final String FORCEMANUAL = "forcemanual";
-  private static final Log LOG = LogFactory.getLog(HAAdmin.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
 
   private int rpcTimeoutForChecks = -1;
   
@@ -449,7 +449,7 @@ public abstract class HAAdmin extends Configured implements Tool {
     
     if (cmdLine.hasOption(FORCEMANUAL)) {
       if (!confirmForceManual()) {
-        LOG.fatal("Aborted");
+        LOG.error("Aborted");
         return -1;
       }
       // Instruct the NNs to honor this request even if they're

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -23,8 +23,6 @@ import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -35,6 +33,8 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.util.Daemon;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is a daemon which runs in a loop, periodically heartbeating
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class HealthMonitor {
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       HealthMonitor.class);
 
   private Daemon daemon;
@@ -283,7 +283,7 @@ public class HealthMonitor {
       setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
         @Override
         public void uncaughtException(Thread t, Throwable e) {
-          LOG.fatal("Health monitor failed", e);
+          LOG.error("Health monitor failed", e);
           enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
         }
       });

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java

@@ -22,8 +22,6 @@ import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class parses the configured list of fencing methods, and
@@ -61,7 +61,7 @@ public class NodeFencer {
   private static final Pattern HASH_COMMENT_RE =
     Pattern.compile("#.*$");
 
-  private static final Log LOG = LogFactory.getLog(NodeFencer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(NodeFencer.class);
 
   /**
    * Standard fencing methods included with Hadoop.

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java

@@ -25,10 +25,10 @@ import java.io.OutputStreamWriter;
 import java.net.InetSocketAddress;
 import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Fencer method that uses PowerShell to remotely connect to a machine and kill
@@ -41,7 +41,8 @@ import org.apache.hadoop.util.StringUtils;
  */
 public class PowerShellFencer extends Configured implements FenceMethod {
 
-  private static final Log LOG = LogFactory.getLog(PowerShellFencer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PowerShellFencer
+      .class);
 
 
   @Override

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java

@@ -21,12 +21,12 @@ import java.io.IOException;
 import java.lang.reflect.Field;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Fencing method that runs a shell command. It should be specified
@@ -61,8 +61,7 @@ public class ShellCommandFencer
   private static final String TARGET_PREFIX = "target_";
 
   @VisibleForTesting
-  static Log LOG = LogFactory.getLog(
-      ShellCommandFencer.class);
+  static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class);
 
   @Override
   public void checkArgs(String args) throws BadFencingConfigurationException {

+ 7 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

@@ -23,8 +23,6 @@ import java.util.Collection;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -32,6 +30,8 @@ import com.jcraft.jsch.ChannelExec;
 import com.jcraft.jsch.JSch;
 import com.jcraft.jsch.JSchException;
 import com.jcraft.jsch.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This fencing implementation sshes to the target node and uses 
@@ -58,9 +58,8 @@ import com.jcraft.jsch.Session;
 public class SshFenceByTcpPort extends Configured
   implements FenceMethod {
 
-  static final Log LOG = LogFactory.getLog(
-      SshFenceByTcpPort.class);
-  
+  static final Logger LOG = LoggerFactory.getLogger(SshFenceByTcpPort.class);
+
   static final String CONF_CONNECT_TIMEOUT_KEY =
     "dfs.ha.fencing.ssh.connect-timeout";
   private static final int CONF_CONNECT_TIMEOUT_DEFAULT =
@@ -271,7 +270,7 @@ public class SshFenceByTcpPort extends Configured
    * Adapter from JSch's logger interface to our log4j
    */
   private static class LogAdapter implements com.jcraft.jsch.Logger {
-    static final Log LOG = LogFactory.getLog(
+    static final Logger LOG = LoggerFactory.getLogger(
         SshFenceByTcpPort.class.getName() + ".jsch");
 
     @Override
@@ -284,9 +283,8 @@ public class SshFenceByTcpPort extends Configured
       case com.jcraft.jsch.Logger.WARN:
         return LOG.isWarnEnabled();
       case com.jcraft.jsch.Logger.ERROR:
-        return LOG.isErrorEnabled();
       case com.jcraft.jsch.Logger.FATAL:
-        return LOG.isFatalEnabled();
+        return LOG.isErrorEnabled();
       default:
         return false;
       }
@@ -305,10 +303,8 @@ public class SshFenceByTcpPort extends Configured
         LOG.warn(message);
         break;
       case com.jcraft.jsch.Logger.ERROR:
-        LOG.error(message);
-        break;
       case com.jcraft.jsch.Logger.FATAL:
-        LOG.fatal(message);
+        LOG.error(message);
         break;
       default:
         break;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java

@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.ha;
 
+import org.slf4j.Logger;
+
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.logging.Log;
-
 /**
  * Class responsible for pumping the streams of the subprocess
  * out to log4j. stderr is pumped to WARN level and stdout is
@@ -35,7 +35,7 @@ class StreamPumper {
     STDOUT, STDERR;
   }
 
-  private final Log log;
+  private final Logger log;
   
   final Thread thread;
   final String logPrefix;
@@ -43,7 +43,7 @@ class StreamPumper {
   private final InputStream stream;
   private boolean started = false;
   
-  StreamPumper(final Log log, final String logPrefix,
+  StreamPumper(final Logger log, final String logPrefix,
       final InputStream stream, final StreamType type) {
     this.log = log;
     this.logPrefix = logPrefix;

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -28,8 +28,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -56,11 +54,13 @@ import org.apache.zookeeper.data.ACL;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.LimitedPrivate("HDFS")
 public abstract class ZKFailoverController {
 
-  static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
+  static final Logger LOG = LoggerFactory.getLogger(ZKFailoverController.class);
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
@@ -162,7 +162,7 @@ public abstract class ZKFailoverController {
 
   public int run(final String[] args) throws Exception {
     if (!localTarget.isAutoFailoverEnabled()) {
-      LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
+      LOG.error("Automatic failover is not enabled for " + localTarget + "." +
           " Please ensure that automatic failover is enabled in the " +
           "configuration before running the ZK failover controller.");
       return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
@@ -184,7 +184,7 @@ public abstract class ZKFailoverController {
         }
       });
     } catch (RuntimeException rte) {
-      LOG.fatal("The failover controller encounters runtime error: " + rte);
+      LOG.error("The failover controller encounters runtime error: " + rte);
       throw (Exception)rte.getCause();
     }
   }
@@ -195,7 +195,7 @@ public abstract class ZKFailoverController {
     try {
       initZK();
     } catch (KeeperException ke) {
-      LOG.fatal("Unable to start failover controller. Unable to connect "
+      LOG.error("Unable to start failover controller. Unable to connect "
           + "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
           + "configured value for " + ZK_QUORUM_KEY + " and ensure that "
           + "ZooKeeper is running.");
@@ -221,7 +221,7 @@ public abstract class ZKFailoverController {
     }
 
     if (!elector.parentZNodeExists()) {
-      LOG.fatal("Unable to start failover controller. "
+      LOG.error("Unable to start failover controller. "
           + "Parent znode does not exist.\n"
           + "Run with -formatZK flag to initialize ZooKeeper.");
       return ERR_CODE_NO_PARENT_ZNODE;
@@ -230,7 +230,7 @@ public abstract class ZKFailoverController {
     try {
       localTarget.checkFencingConfigured();
     } catch (BadFencingConfigurationException e) {
-      LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
+      LOG.error("Fencing is not configured for " + localTarget + ".\n" +
           "You must configure a fencing method before using automatic " +
           "failover.", e);
       return ERR_CODE_NO_FENCER;
@@ -376,7 +376,7 @@ public abstract class ZKFailoverController {
   }
   
   private synchronized void fatalError(String err) {
-    LOG.fatal("Fatal error occurred:" + err);
+    LOG.error("Fatal error occurred:" + err);
     fatalError = err;
     notifyAll();
   }
@@ -395,7 +395,7 @@ public abstract class ZKFailoverController {
 
     } catch (Throwable t) {
       String msg = "Couldn't make " + localTarget + " active";
-      LOG.fatal(msg, t);
+      LOG.error(msg, t);
       
       recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
           StringUtils.stringifyException(t)));

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.ha.protocolPB;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -42,6 +40,8 @@ import org.apache.hadoop.ipc.RPC;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is used on the server side. Calls come across the wire for the
@@ -61,7 +61,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       TransitionToActiveResponseProto.newBuilder().build();
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
       TransitionToStandbyResponseProto.newBuilder().build();
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       HAServiceProtocolServerSideTranslatorPB.class);
   
   public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -53,8 +53,6 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -103,6 +101,8 @@ import org.eclipse.jetty.util.MultiException;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
 import org.eclipse.jetty.util.thread.QueuedThreadPool;
 import org.eclipse.jetty.webapp.WebAppContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal is
@@ -117,7 +117,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public final class HttpServer2 implements FilterContainer {
-  public static final Log LOG = LogFactory.getLog(HttpServer2.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
 
   public static final String HTTP_SCHEME = "http";
   public static final String HTTPS_SCHEME = "https";
@@ -431,6 +431,7 @@ public final class HttpServer2 implements FilterContainer {
       HttpConfiguration httpConfig = new HttpConfiguration();
       httpConfig.setRequestHeaderSize(requestHeaderSize);
       httpConfig.setResponseHeaderSize(responseHeaderSize);
+      httpConfig.setSendServerVersion(false);
 
       for (URI ep : endpoints) {
         final ServerConnector connector;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java

@@ -29,11 +29,11 @@ import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.servlet.Filter;
 
@@ -47,7 +47,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_S
 public class StaticUserWebFilter extends FilterInitializer {
   static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
 
-  private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StaticUserWebFilter.class);
 
   static class User implements Principal {
     private final String name;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java

@@ -22,8 +22,6 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.util.bloom.DynamicBloomFilter;
 import org.apache.hadoop.util.bloom.Filter;
 import org.apache.hadoop.util.bloom.Key;
 import org.apache.hadoop.util.hash.Hash;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
@@ -52,7 +52,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOO
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class BloomMapFile {
-  private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BloomMapFile.class);
   public static final String BLOOM_FILE_NAME = "bloom";
   public static final int HASH_COUNT = 5;
   

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java

@@ -22,11 +22,10 @@ import java.nio.ByteOrder;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import sun.misc.Unsafe;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import com.google.common.primitives.Longs;
 import com.google.common.primitives.UnsignedBytes;
 
@@ -36,7 +35,7 @@ import com.google.common.primitives.UnsignedBytes;
  * class to be able to compare arrays that start at non-zero offsets.
  */
 abstract class FastByteComparisons {
-  static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
+  static final Logger LOG = LoggerFactory.getLogger(FastByteComparisons.class);
 
   /**
    * Lexicographically compare two byte arrays.

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -32,13 +32,12 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -49,7 +48,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class IOUtils {
-  public static final Log LOG = LogFactory.getLog(IOUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
 
   /**
    * Copies from one stream to another.
@@ -246,7 +245,10 @@ public class IOUtils {
    *
    * @param log the log to record problems to at debug level. Can be null.
    * @param closeables the objects to close
+   * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
+   * instead
    */
+  @Deprecated
   public static void cleanup(Log log, java.io.Closeable... closeables) {
     for (java.io.Closeable c : closeables) {
       if (c != null) {
@@ -291,7 +293,7 @@ public class IOUtils {
    */
   public static void closeStream(java.io.Closeable stream) {
     if (stream != null) {
-      cleanup(null, stream);
+      cleanupWithLogger(null, stream);
     }
   }
   

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java

@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +35,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
@@ -60,7 +60,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SK
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class MapFile {
-  private static final Log LOG = LogFactory.getLog(MapFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
 
   /** The name of the index file. */
   public static final String INDEX_FILE_NAME = "index";
@@ -1002,7 +1002,7 @@ public class MapFile {
       while (reader.next(key, value))               // copy all entries
         writer.append(key, value);
     } finally {
-      IOUtils.cleanup(LOG, writer, reader);
+      IOUtils.cleanupWithLogger(LOG, writer, reader);
     }
   }
 }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java

@@ -23,8 +23,6 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -33,6 +31,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
 
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manages a pool of threads which can issue readahead requests on file descriptors.
@@ -40,7 +40,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ReadaheadPool {
-  static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
+  static final Logger LOG = LoggerFactory.getLogger(ReadaheadPool.class);
   private static final int POOL_SIZE = 4;
   private static final int MAX_POOL_SIZE = 16;
   private static final int CAPACITY = 1024;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -25,7 +25,6 @@ import java.rmi.server.UID;
 import java.security.MessageDigest;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.Options.CreateOpts;
@@ -51,6 +50,8 @@ import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -203,7 +204,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class SequenceFile {
-  private static final Log LOG = LogFactory.getLog(SequenceFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SequenceFile.class);
 
   private SequenceFile() {}                         // no public ctor
 
@@ -1923,7 +1924,7 @@ public class SequenceFile {
         succeeded = true;
       } finally {
         if (!succeeded) {
-          IOUtils.cleanup(LOG, this.in);
+          IOUtils.cleanupWithLogger(LOG, this.in);
         }
       }
     }

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java

@@ -25,9 +25,10 @@ import java.io.UTFDataFormatException;
 
 import org.apache.hadoop.util.StringUtils;
 
-import org.apache.commons.logging.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** A WritableComparable for strings that uses the UTF8 encoding.
  * 
@@ -42,7 +43,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Stable
 public class UTF8 implements WritableComparable<UTF8> {
-  private static final Log LOG= LogFactory.getLog(UTF8.class);
+  private static final Logger LOG= LoggerFactory.getLogger(UTF8.class);
   private static final DataInputBuffer IBUF = new DataInputBuffer();
 
   private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java

@@ -23,8 +23,6 @@ import java.util.Set;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +31,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A global compressor/decompressor pool used to save and reuse 
@@ -41,7 +41,7 @@ import com.google.common.cache.LoadingCache;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class CodecPool {
-  private static final Log LOG = LogFactory.getLog(CodecPool.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CodecPool.class);
   
   /**
    * A global compressor pool used to save the expensive 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.io.compress;
 
 import java.util.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A factory that will find the correct codec for a given filename.
@@ -36,8 +36,8 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceStability.Evolving
 public class CompressionCodecFactory {
 
-  public static final Log LOG =
-    LogFactory.getLog(CompressionCodecFactory.class.getName());
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CompressionCodecFactory.class.getName());
   
   private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
     ServiceLoader.load(CompressionCodec.class);

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -22,14 +22,14 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -37,7 +37,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
-  private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DefaultCodec.class);
   
   Configuration conf;
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java

@@ -24,9 +24,8 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the popular 
@@ -42,7 +41,8 @@ public class Bzip2Compressor implements Compressor {
   static final int DEFAULT_BLOCK_SIZE = 9;
   static final int DEFAULT_WORK_FACTOR = 30;
 
-  private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Bzip2Compressor.class);
 
   private long stream;
   private int blockSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java

@@ -23,9 +23,8 @@ import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.io.compress.Decompressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the popular 
@@ -36,7 +35,8 @@ import org.apache.commons.logging.LogFactory;
 public class Bzip2Decompressor implements Decompressor {
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
   
-  private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Bzip2Decompressor.class);
 
   private long stream;
   private boolean conserveMemory;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java

@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.io.compress.bzip2;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of factories to create the right 
@@ -31,7 +31,7 @@ import org.apache.hadoop.util.NativeCodeLoader;
  * 
  */
 public class Bzip2Factory {
-  private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Bzip2Factory.class);
 
   private static String bzip2LibraryName = "";
   private static boolean nativeBzip2Loaded;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the lz4 compression algorithm.
  * http://code.google.com/p/lz4/
  */
 public class Lz4Compressor implements Compressor {
-  private static final Log LOG =
-      LogFactory.getLog(Lz4Compressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java

@@ -22,18 +22,18 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the lz4 compression algorithm.
  * http://code.google.com/p/lz4/
  */
 public class Lz4Decompressor implements Decompressor {
-  private static final Log LOG =
-      LogFactory.getLog(Lz4Compressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the snappy compression algorithm.
  * http://code.google.com/p/snappy/
  */
 public class SnappyCompressor implements Compressor {
-  private static final Log LOG =
-      LogFactory.getLog(SnappyCompressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SnappyCompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the snappy compression algorithm.
  * http://code.google.com/p/snappy/
  */
 public class SnappyDecompressor implements Decompressor {
-  private static final Log LOG =
-      LogFactory.getLog(SnappyDecompressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SnappyDecompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java

@@ -23,9 +23,8 @@ import java.util.zip.Deflater;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A wrapper around java.util.zip.Deflater to make it conform 
@@ -34,7 +33,8 @@ import org.apache.commons.logging.LogFactory;
  */
 public class BuiltInZlibDeflater extends Deflater implements Compressor {
 
-  private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BuiltInZlibDeflater.class);
 
   public BuiltInZlibDeflater(int level, boolean nowrap) {
     super(level, nowrap);

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java

@@ -25,9 +25,8 @@ import java.nio.ByteBuffer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the popular 
@@ -37,7 +36,8 @@ import org.apache.commons.logging.LogFactory;
  */
 public class ZlibCompressor implements Compressor {
 
-  private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZlibCompressor.class);
 
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.io.compress.zlib;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
@@ -29,6 +27,8 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of factories to create the right 
@@ -36,8 +36,8 @@ import com.google.common.annotations.VisibleForTesting;
  * 
  */
 public class ZlibFactory {
-  private static final Log LOG =
-    LogFactory.getLog(ZlibFactory.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZlibFactory.class);
 
   private static boolean nativeZlibLoaded = false;
   

+ 12 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java

@@ -18,8 +18,6 @@
 package org.apache.hadoop.io.erasurecode;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
@@ -31,6 +29,8 @@ import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
@@ -48,7 +48,7 @@ import java.lang.reflect.InvocationTargetException;
 @InterfaceAudience.Private
 public final class CodecUtil {
 
-  private static final Log LOG = LogFactory.getLog(CodecUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CodecUtil.class);
 
   public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";
 
@@ -178,8 +178,10 @@ public final class CodecUtil {
         }
       } catch (LinkageError | Exception e) {
         // Fallback to next coder if possible
-        LOG.warn("Failed to create raw erasure encoder " + rawCoderName +
-            ", fallback to next codec if possible", e);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Failed to create raw erasure encoder " + rawCoderName +
+              ", fallback to next codec if possible", e);
+        }
       }
     }
     throw new IllegalArgumentException("Fail to create raw erasure " +
@@ -198,12 +200,14 @@ public final class CodecUtil {
         }
       } catch (LinkageError | Exception e) {
         // Fallback to next coder if possible
-        LOG.warn("Failed to create raw erasure decoder " + rawCoderName +
-            ", fallback to next codec if possible", e);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Failed to create raw erasure decoder " + rawCoderName +
+                  ", fallback to next codec if possible", e);
+        }
       }
     }
     throw new IllegalArgumentException("Fail to create raw erasure " +
-        "encoder with given codec: " + codecName);
+        "decoder with given codec: " + codecName);
   }
 
   private static ErasureCodec createCodec(Configuration conf,

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
+import java.io.Serializable;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -31,7 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public final class ECSchema {
+public final class ECSchema implements Serializable {
+
+  private static final long serialVersionUID = 0x10953aa0;
+
   public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
   public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
   public static final String CODEC_NAME_KEY = "codec";

+ 9 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java

@@ -18,17 +18,17 @@
 
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Erasure code native libraries (for now, Intel ISA-L) related utilities.
  */
 public final class ErasureCodeNative {
 
-  private static final Log LOG =
-      LogFactory.getLog(ErasureCodeNative.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ErasureCodeNative.class.getName());
 
   /**
    * The reason why ISA-L library is not available, or null if it is available.
@@ -50,6 +50,11 @@ public final class ErasureCodeNative {
       }
       LOADING_FAILURE_REASON = problem;
     }
+
+    if (LOADING_FAILURE_REASON != null) {
+      LOG.warn("ISA-L support is not available in your platform... " +
+              "using builtin-java codec where applicable");
+    }
   }
 
   private ErasureCodeNative() {}

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java

@@ -30,8 +30,6 @@ import java.util.Arrays;
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -43,6 +41,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
 import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
 import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Block Compressed file, the underlying physical storage layer for TFile.
@@ -54,7 +54,7 @@ final class BCFile {
   // the current version of BCFile impl, increment them (major or minor) made
   // enough changes
   static final Version API_VERSION = new Version((short) 1, (short) 0);
-  static final Log LOG = LogFactory.getLog(BCFile.class);
+  static final Logger LOG = LoggerFactory.getLogger(BCFile.class);
 
   /**
    * Prevent the instantiation of BCFile objects.

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java

@@ -24,8 +24,6 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -35,6 +33,8 @@ import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
@@ -44,7 +44,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * Compression related stuff.
  */
 final class Compression {
-  static final Log LOG = LogFactory.getLog(Compression.class);
+  static final Logger LOG = LoggerFactory.getLogger(Compression.class);
 
   /**
    * Prevent the instantiation of class.

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java

@@ -29,8 +29,6 @@ import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Comparator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -51,6 +49,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.BytesComparator;
 import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
 import org.apache.hadoop.io.serializer.JavaSerializationComparator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A TFile is a container of key-value pairs. Both keys and values are type-less
@@ -131,7 +131,7 @@ import org.apache.hadoop.io.serializer.JavaSerializationComparator;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class TFile {
-  static final Log LOG = LogFactory.getLog(TFile.class);
+  static final Logger LOG = LoggerFactory.getLogger(TFile.class);
 
   private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
   private static final String FS_INPUT_BUF_SIZE_ATTR =
@@ -335,7 +335,7 @@ public class TFile {
           writerBCF.close();
         }
       } finally {
-        IOUtils.cleanup(LOG, blkAppender, writerBCF);
+        IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
         blkAppender = null;
         writerBCF = null;
         state = State.CLOSED;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java

@@ -25,8 +25,6 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -36,12 +34,14 @@ import org.apache.hadoop.io.file.tfile.BCFile.BlockRegion;
 import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
 import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Dumping the information of a TFile.
  */
 class TFileDumper {
-  static final Log LOG = LogFactory.getLog(TFileDumper.class);
+  static final Logger LOG = LoggerFactory.getLogger(TFileDumper.class);
 
   private TFileDumper() {
     // namespace object not constructable.
@@ -290,7 +290,7 @@ class TFileDumper {
         }
       }
     } finally {
-      IOUtils.cleanup(LOG, reader, fsdis);
+      IOUtils.cleanupWithLogger(LOG, reader, fsdis);
     }
   }
 }

+ 8 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -40,9 +40,9 @@ import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.PerformanceAdvisory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import sun.misc.Unsafe;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -98,7 +98,7 @@ public class NativeIO {
        write.  */
     public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
 
-    private static final Log LOG = LogFactory.getLog(NativeIO.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
 
     // Set to true via JNI if possible
     public static boolean fadvisePossible = false;
@@ -634,7 +634,7 @@ public class NativeIO {
     }
   }
 
-  private static final Log LOG = LogFactory.getLog(NativeIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
 
   private static boolean nativeLoaded = false;
 
@@ -940,10 +940,10 @@ public class NativeIO {
           position += transferred;
         }
       } finally {
-        IOUtils.cleanup(LOG, output);
-        IOUtils.cleanup(LOG, fos);
-        IOUtils.cleanup(LOG, input);
-        IOUtils.cleanup(LOG, fis);
+        IOUtils.cleanupWithLogger(LOG, output);
+        IOUtils.cleanupWithLogger(LOG, fos);
+        IOUtils.cleanupWithLogger(LOG, input);
+        IOUtils.cleanupWithLogger(LOG, fis);
       }
     }
   }

Some files were not shown because too many files changed in this diff