Jelajahi Sumber

Merge r1334158 through r1335790 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1335791 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 tahun lalu
induk
melakukan
06f1a815dc
100 mengubah file dengan 1240 tambahan dan 931 penghapusan
  1. 13 2
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  3. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  5. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  6. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  7. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  8. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  9. 8 32
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  10. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  11. 0 232
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java
  12. 27 83
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  13. 0 7
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
  14. 15 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  15. 23 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  16. 31 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  17. 0 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
  18. 18 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  19. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  20. 7 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  21. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  22. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  23. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
  24. 63 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
  25. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
  26. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
  27. 32 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  28. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  29. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  30. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  31. 18 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
  32. 16 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  33. 4 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  34. 8 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  35. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  36. 10 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
  37. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
  38. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  39. 88 112
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  41. 41 55
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  42. 7 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
  43. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
  44. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
  45. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  46. 4 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
  47. 4 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  48. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  49. 93 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
  50. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
  51. 93 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
  52. 5 23
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
  53. 29 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
  54. 40 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
  55. 104 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
  56. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  57. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
  58. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
  59. 17 0
      hadoop-mapreduce-project/CHANGES.txt
  60. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  61. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  62. 7 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
  63. 9 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
  64. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
  65. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
  66. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
  67. 2 17
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
  68. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
  69. 6 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
  70. 20 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
  71. 20 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
  72. 5 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
  73. 9 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
  74. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
  75. 11 13
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
  76. 4 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
  77. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
  78. 7 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  79. 3 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
  80. 3 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
  81. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java
  82. 25 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java
  83. 4 3
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
  84. 1 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
  85. 15 18
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
  86. 10 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
  87. 4 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
  88. 4 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
  89. 2 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
  90. 0 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
  91. 6 6
      hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
  92. 5 6
      hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
  93. 4 3
      hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
  94. 1 1
      hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
  95. 6 3
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
  96. 7 7
      hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
  97. 3 3
      hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
  98. 36 36
      hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
  99. 31 0
      hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml
  100. 10 0
      hadoop-tools/hadoop-rumen/pom.xml

+ 13 - 2
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -326,8 +326,6 @@ Release 2.0.0 - UNRELEASED
 
     HADOOP-8104. Inconsistent Jackson versions (tucu)
 
-    HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
-
     HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
     (szetszwo)
 
@@ -423,6 +421,14 @@ Release 2.0.0 - UNRELEASED
 
     HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
 
+    HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm)
+
+    HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme.
+    (tomwhite)
+
+    HADOOP-8359. Fix javadoc warnings in Configuration.  (Anupam Seth via
+    szetszwo)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -536,6 +542,11 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
     bobby)
 
+    HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson
+    via bobby)
+
+    HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby)
+
 Release 0.23.2 - UNRELEASED 
 
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -278,7 +278,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param key
    * @param newKeys
    * @param customMessage
-   * @deprecated use {@link addDeprecation(String key, String newKey,
+   * @deprecated use {@link #addDeprecation(String key, String newKey,
       String customMessage)} instead
    */
   @Deprecated
@@ -328,7 +328,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * 
    * @param key Key that is to be deprecated
    * @param newKeys list of keys that take up the values of deprecated key
-   * @deprecated use {@link addDeprecation(String key, String newKey)} instead
+   * @deprecated use {@link #addDeprecation(String key, String newKey)} instead
    */
   @Deprecated
   public synchronized static void addDeprecation(String key, String[] newKeys) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -346,7 +346,7 @@ public abstract class AbstractFileSystem {
             path);
       } else {
         throw new InvalidPathException(
-            "Path without scheme with non-null autorhrity:" + path);
+            "Path without scheme with non-null authority:" + path);
       }
     }
     String thisScheme = this.getUri().getScheme();

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -53,7 +53,7 @@ import org.apache.hadoop.util.Progressable;
 public class FilterFileSystem extends FileSystem {
   
   protected FileSystem fs;
-  private String swapScheme;
+  protected String swapScheme;
   
   /*
    * so that extending classes can define it

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -39,6 +39,17 @@ public class LocalFileSystem extends ChecksumFileSystem {
   public LocalFileSystem() {
     this(new RawLocalFileSystem());
   }
+  
+  @Override
+  public void initialize(URI name, Configuration conf) throws IOException {
+    if (fs.getConf() == null) {
+      fs.initialize(name, conf);
+    }
+    String scheme = name.getScheme();
+    if (!scheme.equals(fs.getUri().getScheme())) {
+      swapScheme = scheme;
+    }
+  }
 
   /**
    * Return the protocol scheme for the FileSystem.

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -223,6 +223,13 @@ public class Path implements Comparable {
      return isUriPathAbsolute();
   }
 
+  /**
+   * @return true if and only if this path represents the root of a file system
+   */
+  public boolean isRoot() {
+    return getParent() == null;
+  }
+
   /** Returns the final component of this path.*/
   public String getName() {
     String path = uri.getPath();

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -75,7 +75,8 @@ class ChRootedFileSystem extends FilterFileSystem {
   protected Path fullPath(final Path path) {
     super.checkPath(path);
     return path.isAbsolute() ? 
-        new Path(chRootPathPartString + path.toUri().getPath()) :
+        new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+            + path.toUri().getPath()) :
         new Path(chRootPathPartString + workingDir.toUri().getPath(), path);
   }
   
@@ -127,7 +128,7 @@ class ChRootedFileSystem extends FilterFileSystem {
     }
     String pathPart = p.toUri().getPath();
     return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart
-        .substring(chRootPathPartString.length() + 1);   
+        .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1));
   }
   
   @Override

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -79,7 +79,8 @@ class ChRootedFs extends AbstractFileSystem {
    */
   protected Path fullPath(final Path path) {
     super.checkPath(path);
-    return new Path(chRootPathPartString + path.toUri().getPath());
+    return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+        + path.toUri().getPath());
   }
   
   public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
@@ -127,7 +128,8 @@ class ChRootedFs extends AbstractFileSystem {
     }
     String pathPart = p.toUri().getPath();
     return  (pathPart.length() == chRootPathPartString.length()) ?
-        "" : pathPart.substring(chRootPathPartString.length() + 1);   
+        "" : pathPart.substring(chRootPathPartString.length() +
+            (chRootPathPart.isRoot() ? 0 : 1));
   }
   
 

+ 8 - 32
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -52,8 +52,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -99,6 +97,7 @@ public class HttpServer implements FilterContainer {
   // gets stored.
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
   static final String ADMINS_ACL = "admins.acl";
+  public static final String SPNEGO_FILTER = "SpnegoFilter";
 
   public static final String BIND_ADDRESS = "bind.address";
 
@@ -237,11 +236,7 @@ public class HttpServer implements FilterContainer {
     webServer.addHandler(webAppContext);
 
     addDefaultApps(contexts, appDir, conf);
-    
-    defineFilter(webAppContext, "krb5Filter", 
-        Krb5AndCertsSslSocketConnector.Krb5SslFilter.class.getName(), 
-        null, null);
-    
+        
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
     final FilterInitializer[] initializers = getFilterInitializers(conf); 
     if (initializers != null) {
@@ -424,12 +419,13 @@ public class HttpServer implements FilterContainer {
    * protect with Kerberos authentication. 
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
-   * servlets added using this method, filters (except internal Kerberized
+   +   * servlets added using this method, filters (except internal Kerberos
    * filters) are not enabled. 
    * 
    * @param name The name of the servlet (can be passed as null)
    * @param pathSpec The path spec for the servlet
    * @param clazz The servlet class
+   * @param requireAuth Require Kerberos authenticate to access servlet
    */
   public void addInternalServlet(String name, String pathSpec, 
       Class<? extends HttpServlet> clazz, boolean requireAuth) {
@@ -440,11 +436,11 @@ public class HttpServer implements FilterContainer {
     webAppContext.addServlet(holder, pathSpec);
     
     if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
-       LOG.info("Adding Kerberos filter to " + name);
+       LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
        ServletHandler handler = webAppContext.getServletHandler();
        FilterMapping fmap = new FilterMapping();
        fmap.setPathSpec(pathSpec);
-       fmap.setFilterName("krb5Filter");
+       fmap.setFilterName(SPNEGO_FILTER);
        fmap.setDispatches(Handler.ALL);
        handler.addFilterMapping(fmap);
     }
@@ -580,26 +576,14 @@ public class HttpServer implements FilterContainer {
     webServer.addConnector(sslListener);
   }
 
-  /**
-   * Configure an ssl listener on the server.
-   * @param addr address to listen on
-   * @param sslConf conf to retrieve ssl options
-   * @param needClientAuth whether client authentication is required
-   */
-  public void addSslListener(InetSocketAddress addr, Configuration sslConf,
-      boolean needClientAuth) throws IOException {
-    addSslListener(addr, sslConf, needClientAuth, false);
-  }
-
   /**
    * Configure an ssl listener on the server.
    * @param addr address to listen on
    * @param sslConf conf to retrieve ssl options
    * @param needCertsAuth whether x509 certificate authentication is required
-   * @param needKrbAuth whether to allow kerberos auth
    */
   public void addSslListener(InetSocketAddress addr, Configuration sslConf,
-      boolean needCertsAuth, boolean needKrbAuth) throws IOException {
+      boolean needCertsAuth) throws IOException {
     if (webServer.isStarted()) {
       throw new IOException("Failed to add ssl listener");
     }
@@ -612,15 +596,7 @@ public class HttpServer implements FilterContainer {
       System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
           "ssl.server.truststore.type", "jks"));
     }
-    Krb5AndCertsSslSocketConnector.MODE mode;
-    if(needCertsAuth && needKrbAuth)
-      mode = MODE.BOTH;
-    else if (!needCertsAuth && needKrbAuth)
-      mode = MODE.KRB;
-    else // Default to certificates
-      mode = MODE.CERTS;
-
-    SslSocketConnector sslListener = new Krb5AndCertsSslSocketConnector(mode);
+    SslSocketConnector sslListener = new SslSocketConnector();
     sslListener.setHost(addr.getHostName());
     sslListener.setPort(addr.getPort());
     sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -239,7 +239,6 @@ public class Text extends BinaryComparable
    */
   public void clear() {
     length = 0;
-    bytes = EMPTY_BYTES;
   }
 
   /*

+ 0 - 232
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java

@@ -1,232 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.security;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.security.Principal;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLServerSocket;
-import javax.net.ssl.SSLServerSocketFactory;
-import javax.net.ssl.SSLSocket;
-import javax.security.auth.kerberos.KerberosPrincipal;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.mortbay.io.EndPoint;
-import org.mortbay.jetty.HttpSchemes;
-import org.mortbay.jetty.Request;
-import org.mortbay.jetty.security.ServletSSL;
-import org.mortbay.jetty.security.SslSocketConnector;
-
-/**
- * Extend Jetty's {@link SslSocketConnector} to optionally also provide 
- * Kerberos5ized SSL sockets.  The only change in behavior from superclass
- * is that we no longer honor requests to turn off NeedAuthentication when
- * running with Kerberos support.
- */
-public class Krb5AndCertsSslSocketConnector extends SslSocketConnector {
-  public static final List<String> KRB5_CIPHER_SUITES = 
-    Collections.unmodifiableList(Collections.singletonList(
-          "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
-  static {
-    SecurityUtil.initKrb5CipherSuites();
-  }
-  
-  private static final Log LOG = LogFactory
-      .getLog(Krb5AndCertsSslSocketConnector.class);
-
-  private static final String REMOTE_PRINCIPAL = "remote_principal";
-
-  public enum MODE {KRB, CERTS, BOTH} // Support Kerberos, certificates or both?
-
-  private final boolean useKrb;
-  private final boolean useCerts;
-
-  public Krb5AndCertsSslSocketConnector() {
-    super();
-    useKrb = true;
-    useCerts = false;
-    
-    setPasswords();
-  }
-  
-  public Krb5AndCertsSslSocketConnector(MODE mode) {
-    super();
-    useKrb = mode == MODE.KRB || mode == MODE.BOTH;
-    useCerts = mode == MODE.CERTS || mode == MODE.BOTH;
-    setPasswords();
-    logIfDebug("useKerb = " + useKrb + ", useCerts = " + useCerts);
-  }
-
-  // If not using Certs, set passwords to random gibberish or else
-  // Jetty will actually prompt the user for some.
-  private void setPasswords() {
-   if(!useCerts) {
-     Random r = new Random();
-     System.setProperty("jetty.ssl.password", String.valueOf(r.nextLong()));
-     System.setProperty("jetty.ssl.keypassword", String.valueOf(r.nextLong()));
-   }
-  }
-  
-  @Override
-  protected SSLServerSocketFactory createFactory() throws Exception {
-    if(useCerts)
-      return super.createFactory();
-    
-    SSLContext context = super.getProvider()==null
-       ? SSLContext.getInstance(super.getProtocol())
-        :SSLContext.getInstance(super.getProtocol(), super.getProvider());
-    context.init(null, null, null);
-    
-    return context.getServerSocketFactory();
-  }
-  
-  /* (non-Javadoc)
-   * @see org.mortbay.jetty.security.SslSocketConnector#newServerSocket(java.lang.String, int, int)
-   */
-  @Override
-  protected ServerSocket newServerSocket(String host, int port, int backlog)
-      throws IOException {
-    logIfDebug("Creating new KrbServerSocket for: " + host);
-    SSLServerSocket ss = null;
-    
-    if(useCerts) // Get the server socket from the SSL super impl
-      ss = (SSLServerSocket)super.newServerSocket(host, port, backlog);
-    else { // Create a default server socket
-      try {
-        ss = (SSLServerSocket)(host == null 
-         ? createFactory().createServerSocket(port, backlog) :
-           createFactory().createServerSocket(port, backlog, InetAddress.getByName(host)));
-      } catch (Exception e)
-      {
-        LOG.warn("Could not create KRB5 Listener", e);
-        throw new IOException("Could not create KRB5 Listener: " + e.toString());
-      }
-    }
-    
-    // Add Kerberos ciphers to this socket server if needed.
-    if(useKrb) {
-      ss.setNeedClientAuth(true);
-      String [] combined;
-      if(useCerts) { // combine the cipher suites
-        String[] certs = ss.getEnabledCipherSuites();
-        combined = new String[certs.length + KRB5_CIPHER_SUITES.size()];
-        System.arraycopy(certs, 0, combined, 0, certs.length);
-        System.arraycopy(KRB5_CIPHER_SUITES.toArray(new String[0]), 0, combined,
-              certs.length, KRB5_CIPHER_SUITES.size());
-      } else { // Just enable Kerberos auth
-        combined = KRB5_CIPHER_SUITES.toArray(new String[0]);
-      }
-      
-      ss.setEnabledCipherSuites(combined);
-    }
-    
-    return ss;
-  };
-
-  @Override
-  public void customize(EndPoint endpoint, Request request) throws IOException {
-    if(useKrb) { // Add Kerberos-specific info
-      SSLSocket sslSocket = (SSLSocket)endpoint.getTransport();
-      Principal remotePrincipal = sslSocket.getSession().getPeerPrincipal();
-      logIfDebug("Remote principal = " + remotePrincipal);
-      request.setScheme(HttpSchemes.HTTPS);
-      request.setAttribute(REMOTE_PRINCIPAL, remotePrincipal);
-      
-      if(!useCerts) { // Add extra info that would have been added by super
-        String cipherSuite = sslSocket.getSession().getCipherSuite();
-        Integer keySize = Integer.valueOf(ServletSSL.deduceKeyLength(cipherSuite));;
-        
-        request.setAttribute("javax.servlet.request.cipher_suite", cipherSuite);
-        request.setAttribute("javax.servlet.request.key_size", keySize);
-      } 
-    }
-    
-    if(useCerts) super.customize(endpoint, request);
-  }
-  
-  private void logIfDebug(String s) {
-    if(LOG.isDebugEnabled())
-      LOG.debug(s);
-  }
-  
-  /**
-   * Filter that takes the Kerberos principal identified in the 
-   * {@link Krb5AndCertsSslSocketConnector} and provides it the to the servlet
-   * at runtime, setting the principal and short name.
-   */
-  public static class Krb5SslFilter implements Filter {
-    @Override
-    public void doFilter(ServletRequest req, ServletResponse resp,
-        FilterChain chain) throws IOException, ServletException {
-      final Principal princ = 
-        (Principal)req.getAttribute(Krb5AndCertsSslSocketConnector.REMOTE_PRINCIPAL);
-      
-      if(princ == null || !(princ instanceof KerberosPrincipal)) {
-        // Should never actually get here, since should be rejected at socket
-        // level.
-        LOG.warn("User not authenticated via kerberos from " + req.getRemoteAddr());
-        ((HttpServletResponse)resp).sendError(HttpServletResponse.SC_FORBIDDEN, 
-            "User not authenticated via Kerberos");
-        return;
-      }
-      
-      // Provide principal information for servlet at runtime
-      ServletRequest wrapper = 
-            new HttpServletRequestWrapper((HttpServletRequest) req) {
-        @Override
-        public Principal getUserPrincipal() {
-          return princ;
-        }
-        
-        /* 
-         * Return the full name of this remote user.
-         * @see javax.servlet.http.HttpServletRequestWrapper#getRemoteUser()
-         */
-        @Override 
-        public String getRemoteUser() {
-          return princ.getName();
-        }
-      };
-      
-      chain.doFilter(wrapper, resp);
-    }
-
-    @Override
-    public void init(FilterConfig arg0) throws ServletException {
-      /* Nothing to do here */
-    }
-
-    @Override
-    public void destroy() { /* Nothing to do here */ }
-  }
-}

+ 27 - 83
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -17,14 +17,11 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
+import java.net.URLConnection;
 import java.net.UnknownHostException;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
@@ -45,6 +42,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 
@@ -134,79 +133,6 @@ public class SecurityUtil {
     return isTGSPrincipal(ticket.getServer());
   }
 
-  /**
-   * Explicitly pull the service ticket for the specified host.  This solves a
-   * problem with Java's Kerberos SSL problem where the client cannot 
-   * authenticate against a cross-realm service.  It is necessary for clients
-   * making kerberized https requests to call this method on the target URL
-   * to ensure that in a cross-realm environment the remote host will be 
-   * successfully authenticated.  
-   * 
-   * This method is internal to Hadoop and should not be used by other 
-   * applications.  This method should not be considered stable or open: 
-   * it will be removed when the Java behavior is changed.
-   * 
-   * @param remoteHost Target URL the krb-https client will access
-   * @throws IOException if the service ticket cannot be retrieved
-   */
-  public static void fetchServiceTicket(URL remoteHost) throws IOException {
-    if(!UserGroupInformation.isSecurityEnabled())
-      return;
-    
-    String serviceName = "host/" + remoteHost.getHost();
-    if (LOG.isDebugEnabled())
-      LOG.debug("Fetching service ticket for host at: " + serviceName);
-    Object serviceCred = null;
-    Method credsToTicketMeth;
-    Class<?> krb5utilClass;
-    try {
-      Class<?> principalClass;
-      Class<?> credentialsClass;
-      
-      if (System.getProperty("java.vendor").contains("IBM")) {
-        principalClass = Class.forName("com.ibm.security.krb5.PrincipalName");
-        
-        credentialsClass = Class.forName("com.ibm.security.krb5.Credentials");
-        krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5");
-      } else {
-        principalClass = Class.forName("sun.security.krb5.PrincipalName");
-        credentialsClass = Class.forName("sun.security.krb5.Credentials");
-        krb5utilClass = Class.forName("sun.security.jgss.krb5.Krb5Util");
-      }
-      @SuppressWarnings("rawtypes")
-      Constructor principalConstructor = principalClass.getConstructor(String.class, 
-          int.class);
-      Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST");
-      Method acquireServiceCredsMeth = 
-          credentialsClass.getDeclaredMethod("acquireServiceCreds", 
-              String.class, credentialsClass);
-      Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds", 
-          KerberosTicket.class);
-      credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket", 
-          credentialsClass);
-      
-      Object principal = principalConstructor.newInstance(serviceName,
-          KRB_NT_SRV_HST.get(principalClass));
-      
-      serviceCred = acquireServiceCredsMeth.invoke(credentialsClass, 
-          principal.toString(), 
-          ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject()));
-    } catch (Exception e) {
-      throw new IOException("Can't get service ticket for: "
-          + serviceName, e);
-    }
-    if (serviceCred == null) {
-      throw new IOException("Can't get service ticket for " + serviceName);
-    }
-    try {
-      Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
-          .add(credsToTicketMeth.invoke(krb5utilClass, serviceCred));
-    } catch (Exception e) {
-      throw new IOException("Can't get service ticket for: "
-          + serviceName, e);
-    }
-  }
-  
   /**
    * Convert Kerberos principal name pattern to valid Kerberos principal
    * names. It replaces hostname pattern with hostname, which should be
@@ -513,6 +439,30 @@ public class SecurityUtil {
     }
   }
 
+  /**
+   * Open a (if need be) secure connection to a URL in a secure environment
+   * that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
+   * Namenode URLs that are protected via SPNEGO should be accessed via this
+   * method.
+   *
+   * @param url to authenticate via SPNEGO.
+   * @return A connection that has been authenticated via SPNEGO
+   * @throws IOException If unable to authenticate via SPNEGO
+   */
+  public static URLConnection openSecureHttpConnection(URL url) throws IOException {
+    if(!UserGroupInformation.isSecurityEnabled()) {
+      return url.openConnection();
+    }
+
+    AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+    try {
+      return new AuthenticatedURL().openConnection(url, token);
+    } catch (AuthenticationException e) {
+      throw new IOException("Exception trying to open authenticated connection to "
+              + url, e);
+    }
+  }
+
   /**
    * Resolves a host subject to the security requirements determined by
    * hadoop.security.token.service.use_ip.
@@ -664,10 +614,4 @@ public class SecurityUtil {
     }
   }
 
-  public static void initKrb5CipherSuites() {
-    if (UserGroupInformation.isSecurityEnabled()) {
-      System.setProperty("https.cipherSuites",
-          Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
-    }
-  }
 }

+ 0 - 7
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml

@@ -128,13 +128,6 @@
     </description>
   </property>
 
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-
-  </property>
-
   <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value>dn/_HOST@${local.realm}</value>

+ 15 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -18,11 +18,14 @@
 package org.apache.hadoop.fs;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 import java.io.*;
 
 import static org.junit.Assert.*;
+
 import org.junit.Before;
 import org.junit.Test;
 
@@ -233,4 +236,16 @@ public class TestLocalFileSystem {
     assertTrue("Did not delete file", fs.delete(file1));
     assertTrue("Did not delete non-empty dir", fs.delete(dir1));
   }
+  
+  @Test
+  public void testStatistics() throws Exception {
+    FileSystem.getLocal(new Configuration());
+    int fileSchemeCount = 0;
+    for (Statistics stats : FileSystem.getAllStatistics()) {
+      if (stats.getScheme().equals("file")) {
+        fileSchemeCount++;
+      }
+    }
+    assertEquals(1, fileSchemeCount);
+  }
 }

+ 23 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
 
   @Before
   public void setUp() throws Exception {
-    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
-    // In case previous test was killed before cleanup
-    fsTarget.delete(targetTestRoot, true);
+    initializeTargetTestRoot();
     
-    fsTarget.mkdirs(targetTestRoot);
     // Make  user and data dirs - we creates links to them in the mount table
     fsTarget.mkdirs(new Path(targetTestRoot,"user"));
     fsTarget.mkdirs(new Path(targetTestRoot,"data"));
@@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest {
     fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
   }
   
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+    // In case previous test was killed before cleanup
+    fsTarget.delete(targetTestRoot, true);
+    
+    fsTarget.mkdirs(targetTestRoot);
+  }
+  
   void setupMountPoints() {
+    ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
     ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
@@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest {
   }
   
   int getExpectedMountPoints() {
-    return 7;
+    return 8;
   }
   
   /**
@@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest {
         }
       }
     }
-    Assert.assertEquals(expectedTokenCount / 2, delTokens.size());
+    Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
   }
 
   int getExpectedDelegationTokenCountWithCredentials() {
@@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest {
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
     
+    // Make a directory under a directory that's mounted from the root of another FS
+    fsView.mkdirs(new Path("/targetRoot/dirFoo"));
+    Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
+    boolean dirFooPresent = false;
+    for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
+      if (fileStatus.getPath().getName().equals("dirFoo")) {
+        dirFooPresent = true;
+      }
+    }
+    Assert.assertTrue(dirFooPresent);
   }
   
   // rename across mount points that point to same target also fail 
@@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest {
   }
   
   int getExpectedDirPaths() {
-    return 6;
+    return 7;
   }
   
   @Test

+ 31 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.FileContextTestHelper.fileType;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
@@ -77,12 +78,8 @@ public class ViewFsBaseTest {
 
   @Before
   public void setUp() throws Exception {
-
-    targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
-    // In case previous test was killed before cleanup
-    fcTarget.delete(targetTestRoot, true);
+    initializeTargetTestRoot();
     
-    fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
     // Make  user and data dirs - we creates links to them in the mount table
     fcTarget.mkdir(new Path(targetTestRoot,"user"),
         FileContext.DEFAULT_PERM, true);
@@ -100,6 +97,7 @@ public class ViewFsBaseTest {
     
     // Set up the defaultMT in the config with our mount point links
     conf = new Configuration();
+    ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
     ConfigUtil.addLink(conf, "/user",
         new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/user2",
@@ -118,6 +116,14 @@ public class ViewFsBaseTest {
     fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
     // Also try viewfs://default/    - note authority is name of mount table
   }
+  
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+    // In case previous test was killed before cleanup
+    fcTarget.delete(targetTestRoot, true);
+    
+    fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
+  }
 
   @After
   public void tearDown() throws Exception {
@@ -128,7 +134,11 @@ public class ViewFsBaseTest {
   public void testGetMountPoints() {
     ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
     MountPoint[] mountPoints = viewfs.getMountPoints();
-    Assert.assertEquals(7, mountPoints.length); 
+    Assert.assertEquals(8, mountPoints.length);
+  }
+  
+  int getExpectedDelegationTokenCount() {
+    return 0;
   }
   
   /**
@@ -140,7 +150,7 @@ public class ViewFsBaseTest {
   public void testGetDelegationTokens() throws IOException {
     List<Token<?>> delTokens = 
         fcView.getDelegationTokens(new Path("/"), "sanjay");
-    Assert.assertEquals(0, delTokens.size()); 
+    Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
   }
 
   
@@ -281,6 +291,19 @@ public class ViewFsBaseTest {
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
     
+    // Make a directory under a directory that's mounted from the root of another FS
+    fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
+    Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
+    boolean dirFooPresent = false;
+    RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
+        "/targetRoot/"));
+    while (dirContents.hasNext()) {
+      FileStatus fileStatus = dirContents.next();
+      if (fileStatus.getPath().getName().equals("dirFoo")) {
+        dirFooPresent = true;
+      }
+    }
+    Assert.assertTrue(dirFooPresent);
   }
   
   // rename across mount points that point to same target also fail 
@@ -358,7 +381,7 @@ public class ViewFsBaseTest {
     
     FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
     FileStatus fs;
-    Assert.assertEquals(6, dirPaths.length);
+    Assert.assertEquals(7, dirPaths.length);
     fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());

+ 0 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java

@@ -192,16 +192,6 @@ public class TestText extends TestCase {
     assertTrue(text.find("\u20ac", 5)==11);
   }
 
-  public void testClear() {
-	Text text = new Text();
-	assertEquals("", text.toString());
-	assertEquals(0, text.getBytes().length);
-	text = new Text("abcd\u20acbdcd\u20ac");
-	text.clear();
-	assertEquals("", text.toString());
-	assertEquals(0, text.getBytes().length);
-  }
-
   public void testFindAfterUpdatingContents() throws Exception {
     Text text = new Text("abcd");
     text.set("a".getBytes());

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -422,6 +422,19 @@ Release 2.0.0 - UNRELEASED
     HDFS-3303. Remove Writable implementation from RemoteEditLogManifest.
     (Brandon Li via szetszwo)
 
+    HDFS-2617. Replaced Kerberized SSL for image transfer and fsck
+    with SPNEGO-based solution. (jghoman, tucu, and atm via eli)
+
+    HDFS-3365. Enable users to disable socket caching in DFS client
+    configuration (todd)
+
+    HDFS-3375. Put client name in DataXceiver thread name for readBlock
+    and keepalive (todd)
+
+    HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces
+    so that INodeFile and INodeFileUnderConstruction do not have to be used in
+    block management.  (John George via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -435,6 +448,8 @@ Release 2.0.0 - UNRELEASED
     HDFS-2476. More CPU efficient data structure for under-replicated,
     over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
 
+    HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. (eli)
+
   BUG FIXES
 
     HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
@@ -603,6 +618,9 @@ Release 2.0.0 - UNRELEASED
     HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout
     (todd)
 
+    HDFS-3376. DFSClient fails to make connection to DN if there are many
+    unusable cached sockets (todd)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -99,8 +99,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
-  public static final String  DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
-  public static final int     DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
   public static final String  DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
   public static final long    DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
   public static final String  DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
@@ -329,10 +327,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
   public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
   public static final String  DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
-  public static final String  DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal";
+  public static final String  DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
   public static final String  DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
-  public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
+  public static final String  DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
   

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -864,7 +864,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
     // Allow retry since there is no way of knowing whether the cached socket
     // is good until we actually use it.
     for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
-      Socket sock = socketCache.get(dnAddr);
+      Socket sock = null;
+      // Don't use the cache on the last attempt - it's possible that there
+      // are arbitrarily many unusable sockets in the cache, but we don't
+      // want to fail the read.
+      if (retries < nCachedConnRetry) {
+        sock = socketCache.get(dnAddr);
+      }
       if (sock == null) {
         fromCache = false;
 

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -81,7 +81,6 @@ public class HdfsConfiguration extends Configuration {
     deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
     deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
     deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
-    deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
     deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -144,7 +144,7 @@ public class HftpFileSystem extends FileSystem
   }
 
   protected URI getNamenodeSecureUri(URI uri) {
-    return DFSUtil.createUri("https", getNamenodeSecureAddr(uri));
+    return DFSUtil.createUri("http", getNamenodeSecureAddr(uri));
   }
 
   @Override
@@ -247,7 +247,7 @@ public class HftpFileSystem extends FileSystem
             c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
           } catch (Exception e) {
             LOG.info("Couldn't get a delegation token from " + nnHttpUrl + 
-            " using https.");
+            " using http.");
             if(LOG.isDebugEnabled()) {
               LOG.debug("error was ", e);
             }
@@ -686,11 +686,11 @@ public class HftpFileSystem extends FileSystem
                       Configuration conf) throws IOException {
       // update the kerberos credentials, if they are coming from a keytab
       UserGroupInformation.getLoginUser().reloginFromKeytab();
-      // use https to renew the token
+      // use http to renew the token
       InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
       return 
         DelegationTokenFetcher.renewDelegationToken
-        (DFSUtil.createUri("https", serviceAddr).toString(), 
+        (DFSUtil.createUri("http", serviceAddr).toString(),
          (Token<DelegationTokenIdentifier>) token);
     }
 
@@ -700,10 +700,10 @@ public class HftpFileSystem extends FileSystem
                        Configuration conf) throws IOException {
       // update the kerberos credentials, if they are coming from a keytab
       UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-      // use https to cancel the token
+      // use http to cancel the token
       InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
       DelegationTokenFetcher.cancelDelegationToken
-        (DFSUtil.createUri("https", serviceAddr).toString(), 
+        (DFSUtil.createUri("http", serviceAddr).toString(),
          (Token<DelegationTokenIdentifier>) token);
     }    
   }

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java

@@ -47,6 +47,9 @@ class SocketCache {
   public SocketCache(int capacity) {
     multimap = LinkedListMultimap.create();
     this.capacity = capacity;
+    if (capacity <= 0) {
+      LOG.debug("SocketCache disabled in configuration.");
+    }
   }
 
   /**
@@ -55,6 +58,10 @@ class SocketCache {
    * @return  A socket with unknown state, possibly closed underneath. Or null.
    */
   public synchronized Socket get(SocketAddress remote) {
+    if (capacity <= 0) { // disabled
+      return null;
+    }
+    
     List<Socket> socklist = multimap.get(remote);
     if (socklist == null) {
       return null;
@@ -76,6 +83,12 @@ class SocketCache {
    * @param sock socket not used by anyone.
    */
   public synchronized void put(Socket sock) {
+    if (capacity <= 0) {
+      // Cache disabled.
+      IOUtils.closeSocket(sock);
+      return;
+    }
+    
     Preconditions.checkNotNull(sock);
 
     SocketAddress remoteAddr = sock.getRemoteSocketAddress();

+ 63 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.fs.ContentSummary;
+
+/** 
+ * This interface is used by the block manager to expose a
+ * few characteristics of a collection of Block/BlockUnderConstruction.
+ */
+public interface BlockCollection {
+  /**
+   * Get the last block of the collection.
+   * Make sure it has the right type.
+   */
+  public <T extends BlockInfo> T getLastBlock() throws IOException;
+
+  /** 
+   * Get content summary.
+   */
+  public ContentSummary computeContentSummary();
+
+  /** @return the number of blocks */ 
+  public int numBlocks();
+
+  public BlockInfo[] getBlocks();
+  /**
+   * Get preferred block size for the collection 
+   * @return preferred block size in bytes
+   */
+  public long getPreferredBlockSize();
+
+  /**
+   * Get block replication for the collection 
+   * @return block replication value
+   */
+  public short getReplication();
+
+  /**
+   *  Get name of collection.
+   */
+  public String getName();
+}

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -22,18 +22,17 @@ import java.util.LinkedList;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 /**
  * BlockInfo class maintains for a given block
- * the {@link INodeFile} it is part of and datanodes where the replicas of 
+ * the {@link BlockCollection} it is part of and datanodes where the replicas of 
  * the block are stored.
  */
 @InterfaceAudience.Private
 public class BlockInfo extends Block implements
     LightWeightGSet.LinkedElement {
-  private INodeFile inode;
+  private BlockCollection inode;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
   private LightWeightGSet.LinkedElement nextLinkedElement;
@@ -77,11 +76,11 @@ public class BlockInfo extends Block implements
     this.inode = from.inode;
   }
 
-  public INodeFile getINode() {
+  public BlockCollection getINode() {
     return inode;
   }
 
-  public void setINode(INodeFile inode) {
+  public void setINode(BlockCollection inode) {
     this.inode = inode;
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java

@@ -234,7 +234,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
     blockRecoveryId = recoveryId;
     if (replicas.size() == 0) {
       NameNode.stateChangeLog.warn("BLOCK*"
-        + " INodeFileUnderConstruction.initLeaseRecovery:"
+        + " BlockInfoUnderConstruction.initLeaseRecovery:"
         + " No blocks found, lease removed.");
     }
 

+ 32 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -55,8 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -384,7 +382,7 @@ public class BlockManager {
                          numReplicas.decommissionedReplicas();
     
     if (block instanceof BlockInfo) {
-      String fileName = ((BlockInfo)block).getINode().getFullPathName();
+      String fileName = ((BlockInfo)block).getINode().getName();
       out.print(fileName + ": ");
     }
     // l: == live:, d: == decommissioned c: == corrupt e: == excess
@@ -460,7 +458,7 @@ public class BlockManager {
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, 
+  public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode, 
       Block commitBlock) throws IOException {
     if(commitBlock == null)
       return false; // not committing, this is a block allocation retry
@@ -472,7 +470,7 @@ public class BlockManager {
     
     final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
     if(countNodes(lastBlock).liveReplicas() >= minReplication)
-      completeBlock(fileINode,fileINode.numBlocks()-1, false);
+      completeBlock(fileINode, fileINode.numBlocks()-1, false);
     return b;
   }
 
@@ -483,7 +481,7 @@ public class BlockManager {
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  private BlockInfo completeBlock(final INodeFile fileINode,
+  private BlockInfo completeBlock(final MutableBlockCollection fileINode,
       final int blkIndex, boolean force) throws IOException {
     if(blkIndex < 0)
       return null;
@@ -516,7 +514,7 @@ public class BlockManager {
     return blocksMap.replaceBlock(completeBlock);
   }
 
-  private BlockInfo completeBlock(final INodeFile fileINode,
+  private BlockInfo completeBlock(final MutableBlockCollection fileINode,
       final BlockInfo block, boolean force) throws IOException {
     BlockInfo[] fileBlocks = fileINode.getBlocks();
     for(int idx = 0; idx < fileBlocks.length; idx++)
@@ -531,7 +529,7 @@ public class BlockManager {
    * regardless of whether enough replicas are present. This is necessary
    * when tailing edit logs as a Standby.
    */
-  public BlockInfo forceCompleteBlock(final INodeFile fileINode,
+  public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode,
       final BlockInfoUnderConstruction block) throws IOException {
     block.commitBlock(block);
     return completeBlock(fileINode, block, true);
@@ -552,7 +550,7 @@ public class BlockManager {
    * @return the last block locations if the block is partial or null otherwise
    */
   public LocatedBlock convertLastBlockToUnderConstruction(
-      INodeFileUnderConstruction fileINode) throws IOException {
+      MutableBlockCollection fileINode) throws IOException {
     BlockInfo oldBlock = fileINode.getLastBlock();
     if(oldBlock == null ||
         fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
@@ -923,7 +921,7 @@ public class BlockManager {
                             " does not exist. ");
     }
 
-    INodeFile inode = storedBlock.getINode();
+    BlockCollection inode = storedBlock.getINode();
     if (inode == null) {
       NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
                                    "block " + storedBlock +
@@ -1051,7 +1049,7 @@ public class BlockManager {
     int requiredReplication, numEffectiveReplicas;
     List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
     DatanodeDescriptor srcNode;
-    INodeFile fileINode = null;
+    BlockCollection fileINode = null;
     int additionalReplRequired;
 
     int scheduledWork = 0;
@@ -1065,7 +1063,7 @@ public class BlockManager {
             // block should belong to a file
             fileINode = blocksMap.getINode(block);
             // abandoned block or block reopened for append
-            if(fileINode == null || fileINode.isUnderConstruction()) {
+            if(fileINode == null || fileINode instanceof MutableBlockCollection) {
               neededReplications.remove(block, priority); // remove from neededReplications
               neededReplications.decrementReplicationIndex(priority);
               continue;
@@ -1151,7 +1149,7 @@ public class BlockManager {
           // block should belong to a file
           fileINode = blocksMap.getINode(block);
           // abandoned block or block reopened for append
-          if(fileINode == null || fileINode.isUnderConstruction()) {
+          if(fileINode == null || fileINode instanceof MutableBlockCollection) {
             neededReplications.remove(block, priority); // remove from neededReplications
             rw.targets = null;
             neededReplications.decrementReplicationIndex(priority);
@@ -1804,7 +1802,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       case COMPLETE:
       case COMMITTED:
         if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
-          return new BlockToMarkCorrupt(storedBlock,
+          return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
+              .getINode().getReplication()),
               "block is " + ucState + " and reported genstamp " +
               iblk.getGenerationStamp() + " does not match " +
               "genstamp in block map " + storedBlock.getGenerationStamp());
@@ -1824,7 +1823,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       if (!storedBlock.isComplete()) {
         return null; // not corrupt
       } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
-        return new BlockToMarkCorrupt(storedBlock,
+        return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
+            .getINode().getReplication()),
             "reported " + reportedState + " replica with genstamp " +
             iblk.getGenerationStamp() + " does not match COMPLETE block's " +
             "genstamp in block map " + storedBlock.getGenerationStamp());
@@ -1916,7 +1916,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     int numCurrentReplica = countLiveNodes(storedBlock);
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
         && numCurrentReplica >= minReplication) {
-      completeBlock(storedBlock.getINode(), storedBlock, false);
+      completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false);
     } else if (storedBlock.isComplete()) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that.
@@ -1954,7 +1954,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       return block;
     }
     assert storedBlock != null : "Block must be stored by now";
-    INodeFile fileINode = storedBlock.getINode();
+    BlockCollection fileINode = storedBlock.getINode();
     assert fileINode != null : "Block must belong to a file";
 
     // add block to the datanode
@@ -1981,7 +1981,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
 
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
         numLiveReplicas >= minReplication) {
-      storedBlock = completeBlock(fileINode, storedBlock, false);
+      storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false);
     } else if (storedBlock.isComplete()) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that
@@ -1992,7 +1992,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     }
     
     // if file is under construction, then done for now
-    if (fileINode.isUnderConstruction()) {
+    if (fileINode instanceof MutableBlockCollection) {
       return storedBlock;
     }
 
@@ -2129,7 +2129,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
    * what happened with it.
    */
   private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
-    INodeFile fileINode = block.getINode();
+    BlockCollection fileINode = block.getINode();
     if (fileINode == null) {
       // block does not belong to any file
       addToInvalidates(block);
@@ -2258,7 +2258,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
                               BlockPlacementPolicy replicator) {
     assert namesystem.hasWriteLock();
     // first form a rack to datanodes map and
-    INodeFile inode = getINode(b);
+    BlockCollection inode = getINode(b);
     final Map<String, List<DatanodeDescriptor>> rackMap
         = new HashMap<String, List<DatanodeDescriptor>>();
     for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
@@ -2379,7 +2379,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       // necessary. In that case, put block on a possibly-will-
       // be-replicated list.
       //
-      INodeFile fileINode = blocksMap.getINode(block);
+      BlockCollection fileINode = blocksMap.getINode(block);
       if (fileINode != null) {
         namesystem.decrementSafeBlockCount(block);
         updateNeededReplications(block, -1, 0);
@@ -2611,7 +2611,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       NumberReplicas num) {
     int curReplicas = num.liveReplicas();
     int curExpectedReplicas = getReplication(block);
-    INodeFile fileINode = blocksMap.getINode(block);
+    BlockCollection fileINode = blocksMap.getINode(block);
     Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
@@ -2624,7 +2624,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
         + ", corrupt replicas: " + num.corruptReplicas()
         + ", decommissioned replicas: " + num.decommissionedReplicas()
         + ", excess replicas: " + num.excessReplicas()
-        + ", Is Open File: " + fileINode.isUnderConstruction()
+        + ", Is Open File: " + (fileINode instanceof MutableBlockCollection)
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
         + srcNode + ", Is current datanode decommissioning: "
         + srcNode.isDecommissionInProgress());
@@ -2639,7 +2639,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      INodeFile fileINode = blocksMap.getINode(block);
+      BlockCollection fileINode = blocksMap.getINode(block);
       short expectedReplication = fileINode.getReplication();
       NumberReplicas num = countNodes(block);
       int numCurrentReplica = num.liveReplicas();
@@ -2662,7 +2662,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      INodeFile fileINode = blocksMap.getINode(block);
+      BlockCollection fileINode = blocksMap.getINode(block);
 
       if (fileINode != null) {
         NumberReplicas num = countNodes(block);
@@ -2679,7 +2679,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
             if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
               decommissionOnlyReplicas++;
             }
-            if (fileINode.isUnderConstruction()) {
+            if (fileINode instanceof MutableBlockCollection) {
               underReplicatedInOpenFiles++;
             }
           }
@@ -2782,11 +2782,10 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
 
   /* get replication factor of a block */
   private int getReplication(Block block) {
-    INodeFile fileINode = blocksMap.getINode(block);
+    BlockCollection fileINode = blocksMap.getINode(block);
     if (fileINode == null) { // block does not belong to any file
       return 0;
     }
-    assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
     return fileINode.getReplication();
   }
 
@@ -2859,11 +2858,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     return this.neededReplications.getCorruptBlockSize();
   }
 
-  public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
+  public BlockInfo addINode(BlockInfo block, BlockCollection iNode) {
     return blocksMap.addINode(block, iNode);
   }
 
-  public INodeFile getINode(Block b) {
+  public BlockCollection getINode(Block b) {
     return blocksMap.getINode(b);
   }
 
@@ -3003,7 +3002,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
   private static class ReplicationWork {
 
     private Block block;
-    private INodeFile fileINode;
+    private BlockCollection fileINode;
 
     private DatanodeDescriptor srcNode;
     private List<DatanodeDescriptor> containingNodes;
@@ -3014,7 +3013,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     private int priority;
 
     public ReplicationWork(Block block,
-        INodeFile fileINode,
+        BlockCollection fileINode,
         DatanodeDescriptor srcNode,
         List<DatanodeDescriptor> containingNodes,
         List<DatanodeDescriptor> liveReplicaNodes,

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -123,13 +122,13 @@ public abstract class BlockPlacementPolicy {
    * @return array of DatanodeDescriptor instances chosen as target 
    * and sorted as a pipeline.
    */
-  DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode,
+  DatanodeDescriptor[] chooseTarget(BlockCollection srcInode,
                                     int numOfReplicas,
                                     DatanodeDescriptor writer,
                                     List<DatanodeDescriptor> chosenNodes,
                                     HashMap<Node, Node> excludedNodes,
                                     long blocksize) {
-    return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer,
+    return chooseTarget(srcInode.getName(), numOfReplicas, writer,
                         chosenNodes, excludedNodes, blocksize);
   }
 
@@ -159,7 +158,7 @@ public abstract class BlockPlacementPolicy {
                    listed in the previous parameter.
    * @return the replica that is the best candidate for deletion
    */
-  abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode,
+  abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode,
                                       Block block, 
                                       short replicationFactor,
                                       Collection<DatanodeDescriptor> existingReplicas,

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -547,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
 
   @Override
-  public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+  public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
                                                  Block block,
                                                  short replicationFactor,
                                                  Collection<DatanodeDescriptor> first, 

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Iterator;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.GSet;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
@@ -93,7 +92,7 @@ class BlocksMap {
     blocks = null;
   }
 
-  INodeFile getINode(Block b) {
+  BlockCollection getINode(Block b) {
     BlockInfo info = blocks.get(b);
     return (info != null) ? info.getINode() : null;
   }
@@ -101,7 +100,7 @@ class BlocksMap {
   /**
    * Add block b belonging to the specified file inode to the map.
    */
-  BlockInfo addINode(BlockInfo b, INodeFile iNode) {
+  BlockInfo addINode(BlockInfo b, BlockCollection iNode) {
     BlockInfo info = blocks.get(b);
     if (info != b) {
       info = b;

+ 18 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java

@@ -15,24 +15,30 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.fs.ContentSummary;
 
 /** 
- * This interface is used used the pluggable block placement policy
- * to expose a few characteristics of an Inode.
+ * This interface is used by the block manager to expose a
+ * few characteristics of a collection of Block/BlockUnderConstruction.
  */
-@InterfaceAudience.Private
-public interface FSInodeInfo {
-
+public interface MutableBlockCollection extends BlockCollection {
   /**
-   * a string representation of an inode
-   * 
-   * @return the full pathname (from root) that this inode represents
+   * Set block 
    */
+  public void setBlock(int idx, BlockInfo blk);
 
-  public String getFullPathName() ;
+  /**
+   * Convert the last block of the collection to an under-construction block.
+   * Set its locations.
+   */
+  public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+                       DatanodeDescriptor[] targets) throws IOException;
 }
-    
-    

+ 16 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -85,6 +85,12 @@ class DataXceiver extends Receiver implements Runnable {
 
   private long opStartTime; //the start time of receiving an Op
   private final SocketInputWrapper socketInputWrapper;
+
+  /**
+   * Client Name used in previous operation. Not available on first request
+   * on the socket.
+   */
+  private String previousOpClientName;
   
   public static DataXceiver create(Socket s, DataNode dn,
       DataXceiverServer dataXceiverServer) throws IOException {
@@ -122,7 +128,11 @@ class DataXceiver extends Receiver implements Runnable {
    */
   private void updateCurrentThreadName(String status) {
     StringBuilder sb = new StringBuilder();
-    sb.append("DataXceiver for client ").append(remoteAddress);
+    sb.append("DataXceiver for client ");
+    if (previousOpClientName != null) {
+      sb.append(previousOpClientName).append(" at ");
+    }
+    sb.append(remoteAddress);
     if (status != null) {
       sb.append(" [").append(status).append("]");
     }
@@ -202,6 +212,8 @@ class DataXceiver extends Receiver implements Runnable {
       final String clientName,
       final long blockOffset,
       final long length) throws IOException {
+    previousOpClientName = clientName;
+
     OutputStream baseStream = NetUtils.getOutputStream(s, 
         dnConf.socketWriteTimeout);
     DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
@@ -295,7 +307,8 @@ class DataXceiver extends Receiver implements Runnable {
       final long maxBytesRcvd,
       final long latestGenerationStamp,
       DataChecksum requestedChecksum) throws IOException {
-    updateCurrentThreadName("Receiving block " + block + " client=" + clientname);
+    previousOpClientName = clientname;
+    updateCurrentThreadName("Receiving block " + block);
     final boolean isDatanode = clientname.length() == 0;
     final boolean isClient = !isDatanode;
     final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
@@ -502,7 +515,7 @@ class DataXceiver extends Receiver implements Runnable {
       final DatanodeInfo[] targets) throws IOException {
     checkAccess(null, true, blk, blockToken,
         Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
-
+    previousOpClientName = clientName;
     updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 
     final DataOutputStream out = new DataOutputStream(

+ 4 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2840,7 +2840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       if (storedBlock == null) {
         throw new IOException("Block (=" + lastblock + ") not found");
       }
-      INodeFile iFile = storedBlock.getINode();
+      INodeFile iFile = (INodeFile) storedBlock.getINode();
       if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
         throw new IOException("Unexpected block (=" + lastblock
                               + ") since the file (=" + iFile.getLocalName()
@@ -4394,7 +4394,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     
     // check file inode
-    INodeFile file = storedBlock.getINode();
+    INodeFile file = (INodeFile) storedBlock.getINode();
     if (file==null || !file.isUnderConstruction()) {
       throw new IOException("The file " + storedBlock + 
           " belonged to does not exist or it is not under construction.");
@@ -4556,7 +4556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (destinationExisted && dinfo.isDir()) {
       Path spath = new Path(src);
       Path parent = spath.getParent();
-      if (isRoot(parent)) {
+      if (parent.isRoot()) {
         overwrite = parent.toString();
       } else {
         overwrite = parent.toString() + Path.SEPARATOR;
@@ -4569,10 +4569,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
     leaseManager.changeLease(src, dst, overwrite, replaceBy);
   }
-           
-  private boolean isRoot(Path path) {
-    return path.getParent() == null;
-  }
 
   /**
    * Serializes leases. 
@@ -4710,7 +4706,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
       while (blkIterator.hasNext()) {
         Block blk = blkIterator.next();
-        INode inode = blockManager.getINode(blk);
+        INode inode = (INodeFile) blockManager.getINode(blk);
         skip++;
         if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
           String src = FSDirectory.getFullPathName(inode);

+ 8 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -27,6 +27,8 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.SecurityUtil;
 
 import org.apache.commons.logging.Log;
@@ -34,7 +36,6 @@ import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -83,11 +84,11 @@ public class GetImageServlet extends HttpServlet {
         (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
       
       if(UserGroupInformation.isSecurityEnabled() && 
-          !isValidRequestor(request.getRemoteUser(), conf)) {
+          !isValidRequestor(request.getUserPrincipal().getName(), conf)) {
         response.sendError(HttpServletResponse.SC_FORBIDDEN, 
             "Only Namenode and Secondary Namenode may access this servlet");
         LOG.warn("Received non-NN/SNN request for image or edits from " 
-            + request.getRemoteHost());
+            + request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
         return;
       }
       
@@ -156,15 +157,10 @@ public class GetImageServlet extends HttpServlet {
               }
               
               // issue a HTTP get request to download the new fsimage 
-              MD5Hash downloadImageDigest = reloginIfNecessary().doAs(
-                  new PrivilegedExceptionAction<MD5Hash>() {
-                  @Override
-                  public MD5Hash run() throws Exception {
-                    return TransferFsImage.downloadImageToStorage(
+              MD5Hash downloadImageDigest =
+                TransferFsImage.downloadImageToStorage(
                         parsedParams.getInfoServer(), txid,
                         nnImage.getStorage(), true);
-                    }
-              });
               nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
               
               // Now that we have a new checkpoint, we might be able to
@@ -176,18 +172,6 @@ public class GetImageServlet extends HttpServlet {
           }
           return null;
         }
-        
-        // We may have lost our ticket since the last time we tried to open
-        // an http connection, so log in just in case.
-        private UserGroupInformation reloginIfNecessary() throws IOException {
-          // This method is only called on the NN, therefore it is safe to
-          // use these key values.
-          return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-                  SecurityUtil.getServerPrincipal(conf
-                      .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-                      NameNode.getAddress(conf).getHostName()),
-              conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
-        }       
       });
       
     } catch (Throwable t) {
@@ -232,18 +216,10 @@ public class GetImageServlet extends HttpServlet {
     
     Set<String> validRequestors = new HashSet<String>();
 
-    validRequestors.add(
-        SecurityUtil.getServerPrincipal(conf
-            .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()));
     validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
             .getAddress(conf).getHostName()));
-    validRequestors.add(
-        SecurityUtil.getServerPrincipal(conf
-            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()));
     validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
@@ -251,10 +227,6 @@ public class GetImageServlet extends HttpServlet {
 
     if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
       Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
-      validRequestors.add(
-          SecurityUtil.getServerPrincipal(otherNnConf
-              .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-              NameNode.getAddress(otherNnConf).getHostName()));
       validRequestors.add(
           SecurityUtil.getServerPrincipal(otherNnConf
               .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
@@ -263,11 +235,11 @@ public class GetImageServlet extends HttpServlet {
 
     for(String v : validRequestors) {
       if(v != null && v.equals(remoteUser)) {
-        if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser);
+        if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
         return true;
       }
     }
-    if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser);
+    if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
     return false;
   }
   

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -38,7 +38,7 @@ import com.google.common.primitives.SignedBytes;
  * directory inodes.
  */
 @InterfaceAudience.Private
-abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+abstract class INode implements Comparable<byte[]> {
   /*
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.
@@ -264,7 +264,6 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
     this.name = name;
   }
 
-  @Override
   public String getFullPathName() {
     // Get the full path name of this inode.
     return FSDirectory.getFullPathName(this);

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 
 /** I-node for closed file. */
-public class INodeFile extends INode {
+@InterfaceAudience.Private
+public class INodeFile extends INode implements BlockCollection {
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
 
   //Number of bits for Block size
@@ -167,6 +170,12 @@ public class INodeFile extends INode {
     blocks = null;
     return 1;
   }
+  
+  public String getName() {
+    // Get the full path name of this inode.
+    return getFullPathName();
+  }
+
 
   @Override
   long[] computeContentSummary(long[] summary) {

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java

@@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
 
 import com.google.common.base.Joiner;
 
 /**
  * I-node for file being written.
  */
-public class INodeFileUnderConstruction extends INodeFile {
+public class INodeFileUnderConstruction extends INodeFile 
+                                        implements MutableBlockCollection {
   private  String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -164,10 +164,8 @@ public class NameNode {
     DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
     DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
     DFS_NAMENODE_HTTP_ADDRESS_KEY,
-    DFS_NAMENODE_HTTPS_ADDRESS_KEY,
     DFS_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-    DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
     DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
@@ -361,8 +359,9 @@ public class NameNode {
   }
   
   protected void setHttpServerAddress(Configuration conf) {
-    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
-        NetUtils.getHostPortString(getHttpAddress()));
+    String hostPort = NetUtils.getHostPortString(getHttpAddress());
+    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
+    LOG.info("Web-server up at: " + hostPort);
   }
 
   protected void loadNamesystem(Configuration conf) throws IOException {

+ 88 - 112
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
@@ -43,6 +44,7 @@ import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 /**
@@ -78,127 +80,101 @@ public class NameNodeHttpServer {
         conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
         nn.getNameNodeAddress().getHostName());
   }
-  
+
   public void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
-    
-    if(UserGroupInformation.isSecurityEnabled()) {
-      String httpsUser = SecurityUtil.getServerPrincipal(conf
-          .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost);
-      if (httpsUser == null) {
-        LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
-            + " not defined in config. Starting http server as "
-            + getDefaultServerPrincipal()
-            + ": Kerberized SSL may be not function correctly.");
-      } else {
-        // Kerberized SSL servers must be run from the host principal...
-        LOG.info("Logging in as " + httpsUser + " to start http server.");
-        SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-            DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost);
-      }
-    }
-
-    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-    try {
-      this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() {
-        @Override
-        public HttpServer run() throws IOException, InterruptedException {
-          int infoPort = bindAddress.getPort();
-          httpServer = new HttpServer("hdfs", infoHost, infoPort,
-              infoPort == 0, conf, 
-              new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
-            {
-              if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
-                //add SPNEGO authentication filter for webhdfs
-                final String name = "SPNEGO";
-                final String classname =  AuthFilter.class.getName();
-                final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-                Map<String, String> params = getAuthFilterParams(conf);
-                defineFilter(webAppContext, name, classname, params,
-                    new String[]{pathSpec});
-                LOG.info("Added filter '" + name + "' (class=" + classname + ")");
-
-                // add webhdfs packages
-                addJerseyResourcePackage(
-                    NamenodeWebHdfsMethods.class.getPackage().getName()
-                    + ";" + Param.class.getPackage().getName(), pathSpec);
-              }
+    int infoPort = bindAddress.getPort();
+
+    httpServer = new HttpServer("hdfs", infoHost, infoPort,
+                                infoPort == 0, conf,
+                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+      {
+        // Add SPNEGO support to NameNode
+        if (UserGroupInformation.isSecurityEnabled()) {
+          Map<String, String> params = new HashMap<String, String>();
+          String principalInConf = conf.get(
+            DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+          if (principalInConf != null && !principalInConf.isEmpty()) {
+            params.put("kerberos.principal",
+                       SecurityUtil.getServerPrincipal(principalInConf, infoHost));
+            String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
+            if (httpKeytab != null && !httpKeytab.isEmpty()) {
+              params.put("kerberos.keytab", httpKeytab);
             }
 
-            private Map<String, String> getAuthFilterParams(Configuration conf)
-                throws IOException {
-              Map<String, String> params = new HashMap<String, String>();
-              String principalInConf = conf
-                  .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
-              if (principalInConf != null && !principalInConf.isEmpty()) {
-                params
-                    .put(
-                        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-                        SecurityUtil.getServerPrincipal(principalInConf,
-                            infoHost));
-              }
-              String httpKeytab = conf
-                  .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
-              if (httpKeytab != null && !httpKeytab.isEmpty()) {
-                params.put(
-                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
-                    httpKeytab);
-              }
-              return params;
-            }
-          };
-
-          boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
-          boolean useKrb = UserGroupInformation.isSecurityEnabled();
-          if (certSSL || useKrb) {
-            boolean needClientAuth = conf.getBoolean(
-                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
-            InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf
-                .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-                    DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
-            Configuration sslConf = new HdfsConfiguration(false);
-            if (certSSL) {
-              sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-                  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
-            }
-            httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
-                useKrb);
-            // assume same ssl port for all datanodes
-            InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
-                conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
-                    infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
-            httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
-                datanodeSslPort.getPort());
+            params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+
+            defineFilter(webAppContext, SPNEGO_FILTER,
+                         AuthenticationFilter.class.getName(), params, null);
           }
-          httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
-          httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
-              nn.getNameNodeAddress());
-          httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
-          httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
-          setupServlets(httpServer, conf);
-          httpServer.start();
-
-          // The web-server port can be ephemeral... ensure we have the correct
-          // info
-          infoPort = httpServer.getPort();
-          httpAddress = new InetSocketAddress(infoHost, infoPort);
-          LOG.info(nn.getRole() + " Web-server up at: " + httpAddress);
-          return httpServer;
         }
-      });
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    } finally {
-      if(UserGroupInformation.isSecurityEnabled() && 
-          conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) {
-        // Go back to being the correct Namenode principal
-        LOG.info("Logging back in as NameNode user following http server start");
-        nn.loginAsNameNodeUser(conf);
+        if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
+          //add SPNEGO authentication filter for webhdfs
+          final String name = "SPNEGO";
+          final String classname = AuthFilter.class.getName();
+          final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+          Map<String, String> params = getAuthFilterParams(conf);
+          defineFilter(webAppContext, name, classname, params,
+                       new String[]{pathSpec});
+          LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+          // add webhdfs packages
+          addJerseyResourcePackage(
+            NamenodeWebHdfsMethods.class.getPackage().getName()
+            + ";" + Param.class.getPackage().getName(), pathSpec);
+        }
       }
+
+      private Map<String, String> getAuthFilterParams(Configuration conf)
+        throws IOException {
+        Map<String, String> params = new HashMap<String, String>();
+        String principalInConf = conf
+          .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+        if (principalInConf != null && !principalInConf.isEmpty()) {
+          params
+            .put(
+              DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+              SecurityUtil.getServerPrincipal(principalInConf,
+                                              bindAddress.getHostName()));
+        }
+        String httpKeytab = conf
+          .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
+        if (httpKeytab != null && !httpKeytab.isEmpty()) {
+          params.put(
+            DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+            httpKeytab);
+        }
+        return params;
+      }
+    };
+
+    boolean certSSL = conf.getBoolean("dfs.https.enable", false);
+    if (certSSL) {
+      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+      InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get(
+        "dfs.https.port", infoHost + ":" + 0));
+      Configuration sslConf = new Configuration(false);
+      if (certSSL) {
+        sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
+                                     "ssl-server.xml"));
+      }
+      httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
+      // assume same ssl port for all datanodes
+      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
+        "dfs.datanode.https.address", infoHost + ":" + 50475));
+      httpServer.setAttribute("datanode.https.port", datanodeSslPort
+        .getPort());
     }
+    httpServer.setAttribute("name.node", nn);
+    httpServer.setAttribute("name.node.address", bindAddress);
+    httpServer.setAttribute("name.system.image", nn.getFSImage());
+    httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+    setupServlets(httpServer, conf);
+    httpServer.start();
+    httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
   }
-  
+
+
   public void stop() throws Exception {
     if (httpServer != null) {
       httpServer.stop();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -734,7 +734,7 @@ class NamenodeJspHelper {
         this.inode = null;
       } else {
         this.block = new Block(blockId);
-        this.inode = blockManager.getINode(block);
+        this.inode = (INodeFile) blockManager.getINode(block);
       }
     }
 

+ 41 - 55
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -25,8 +25,10 @@ import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Date;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
@@ -44,6 +46,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
@@ -63,9 +66,9 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 import org.apache.hadoop.util.Daemon;
@@ -108,7 +111,6 @@ public class SecondaryNameNode implements Runnable {
   private volatile boolean shouldRun;
   private HttpServer infoServer;
   private int infoPort;
-  private int imagePort;
   private String infoBindAddress;
 
   private Collection<URI> checkpointDirs;
@@ -229,63 +231,47 @@ public class SecondaryNameNode implements Runnable {
 
     // Initialize other scheduling parameters from the configuration
     checkpointConf = new CheckpointConf(conf);
-    
+
     // initialize the webserver for uploading files.
-    // Kerberized SSL servers must be run from the host principal...
-    UserGroupInformation httpUGI = 
-      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-          SecurityUtil.getServerPrincipal(conf
-              .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-              infoBindAddress),
-          conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
-    try {
-      infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
-        @Override
-        public HttpServer run() throws IOException, InterruptedException {
-          LOG.info("Starting web server as: " +
-              UserGroupInformation.getCurrentUser().getUserName());
-
-          int tmpInfoPort = infoSocAddr.getPort();
-          infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
-              tmpInfoPort == 0, conf, 
-              new AccessControlList(conf.get(DFS_ADMIN, " ")));
-          
-          if(UserGroupInformation.isSecurityEnabled()) {
-            SecurityUtil.initKrb5CipherSuites();
-            InetSocketAddress secInfoSocAddr = 
-              NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
-                DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
-                DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
-            imagePort = secInfoSocAddr.getPort();
-            infoServer.addSslListener(secInfoSocAddr, conf, false, true);
+    int tmpInfoPort = infoSocAddr.getPort();
+    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
+                                tmpInfoPort == 0, conf,
+                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+      {
+        if (UserGroupInformation.isSecurityEnabled()) {
+          Map<String, String> params = new HashMap<String, String>();
+          String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+          if (principalInConf != null && !principalInConf.isEmpty()) {
+            params.put("kerberos.principal",
+                       SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
           }
-          
-          infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this);
-          infoServer.setAttribute("name.system.image", checkpointImage);
-          infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
-          infoServer.addInternalServlet("getimage", "/getimage",
-              GetImageServlet.class, true);
-          infoServer.start();
-          return infoServer;
+          String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+          if (httpKeytab != null && !httpKeytab.isEmpty()) {
+            params.put("kerberos.keytab", httpKeytab);
+          }
+          params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+
+          defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
+                       params, null);
         }
-      });
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } 
-    
+      }
+    };
+    infoServer.setAttribute("secondary.name.node", this);
+    infoServer.setAttribute("name.system.image", checkpointImage);
+    infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+    infoServer.addInternalServlet("getimage", "/getimage",
+                                  GetImageServlet.class, true);
+    infoServer.start();
+
     LOG.info("Web server init done");
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      imagePort = infoPort;
-    }
-    
-    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
-    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
-    LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
+
+    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
+    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
     LOG.info("Checkpoint Period   :" + checkpointConf.getPeriod() + " secs " +
-             "(" + checkpointConf.getPeriod()/60 + " min)");
+             "(" + checkpointConf.getPeriod() / 60 + " min)");
     LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
   }
 
@@ -434,7 +420,7 @@ public class SecondaryNameNode implements Runnable {
       throw new IOException("This is not a DFS");
     }
 
-    String configuredAddress = DFSUtil.getInfoServer(null, conf, true);
+    String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
     String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
         fsName.getHost());
     LOG.debug("Will connect to NameNode at HTTP address: " + address);
@@ -446,7 +432,7 @@ public class SecondaryNameNode implements Runnable {
    * for image transfers
    */
   private InetSocketAddress getImageListenAddress() {
-    return new InetSocketAddress(infoBindAddress, imagePort);
+    return new InetSocketAddress(infoBindAddress, infoPort);
   }
 
   /**
@@ -507,7 +493,7 @@ public class SecondaryNameNode implements Runnable {
   
   
   /**
-   * @param argv The parameters passed to this program.
+   * @param opts The parameters passed to this program.
    * @exception Exception if the filesystem does not exist.
    * @return 0 on success, non zero on error.
    */
@@ -709,7 +695,7 @@ public class SecondaryNameNode implements Runnable {
      * Construct a checkpoint image.
      * @param conf Node configuration.
      * @param imageDirs URIs of storage for image.
-     * @param editDirs URIs of storage for edit logs.
+     * @param editsDirs URIs of storage for edit logs.
      * @throws IOException If storage cannot be access.
      */
     CheckpointStorage(Configuration conf, 

+ 7 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -201,19 +201,17 @@ public class TransferFsImage {
       String queryString, List<File> localPaths,
       NNStorage dstStorage, boolean getChecksum) throws IOException {
     byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
-    String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
-    StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
-    str.append(queryString);
 
+    String str = "http://" + nnHostPort + "/getimage?" + queryString;
+    LOG.info("Opening connection to " + str);
     //
     // open connection to remote server
     //
-    URL url = new URL(str.toString());
-    
-    // Avoid Krb bug with cross-realm hosts
-    SecurityUtil.fetchServiceTicket(url);
-    HttpURLConnection connection = (HttpURLConnection) url.openConnection();
-    
+    URL url = new URL(str);
+
+    HttpURLConnection connection = (HttpURLConnection)
+      SecurityUtil.openSecureHttpConnection(url);
+
     if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
       throw new HttpGetFailedException(
           "Image transfer servlet at " + url +

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

@@ -95,7 +95,6 @@ public class BootstrapStandby implements Tool, Configurable {
   static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
   public int run(String[] args) throws Exception {
-    SecurityUtil.initKrb5CipherSuites();
     parseArgs(args);
     parseConfAndFindOtherNN();
     NameNode.checkAllowFormat(conf);
@@ -322,7 +321,7 @@ public class BootstrapStandby implements Tool, Configurable {
         "Could not determine valid IPC address for other NameNode (%s)" +
         ", got: %s", otherNNId, otherIpcAddr);
 
-    otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true);
+    otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
     otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
         otherIpcAddr.getHostName());
     

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java

@@ -92,7 +92,7 @@ public class StandbyCheckpointer {
   }
   
   private String getHttpAddress(Configuration conf) {
-    String configuredAddr = DFSUtil.getInfoServer(null, conf, true);
+    String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
     
     // Use the hostname from the RPC address as a default, in case
     // the HTTP address is configured to 0.0.0.0.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -504,7 +504,7 @@ public class DFSAdmin extends FsShell {
    */
   public int fetchImage(String[] argv, int idx) throws IOException {
     String infoServer = DFSUtil.getInfoServer(
-        HAUtil.getAddressOfActive(getDFS()), getConf(), true);
+        HAUtil.getAddressOfActive(getDFS()), getConf(), false);
     TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
         new File(argv[idx]));
     return 0;

+ 4 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -153,8 +153,7 @@ public class DFSck extends Configured implements Tool {
         url.append("&startblockafter=").append(String.valueOf(cookie));
       }
       URL path = new URL(url.toString());
-      SecurityUtil.fetchServiceTicket(path);
-      URLConnection connection = path.openConnection();
+      URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
       InputStream stream = connection.getInputStream();
       BufferedReader input = new BufferedReader(new InputStreamReader(
           stream, "UTF-8"));
@@ -222,16 +221,11 @@ public class DFSck extends Configured implements Tool {
       return null;
     }
     
-    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, true);
+    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
   }
 
   private int doWork(final String[] args) throws IOException {
-    String proto = "http://";
-    if (UserGroupInformation.isSecurityEnabled()) {
-      SecurityUtil.initKrb5CipherSuites();
-      proto = "https://";
-    }
-    final StringBuilder url = new StringBuilder(proto);
+    final StringBuilder url = new StringBuilder("http://");
     
     String namenodeAddress = getCurrentNamenodeAddress();
     if (namenodeAddress == null) {
@@ -279,8 +273,7 @@ public class DFSck extends Configured implements Tool {
       return listCorruptFileBlocks(dir, url.toString());
     }
     URL path = new URL(url.toString());
-    SecurityUtil.fetchServiceTicket(path);
-    URLConnection connection = path.openConnection();
+    URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
     InputStream stream = connection.getInputStream();
     BufferedReader input = new BufferedReader(new InputStreamReader(
                                               stream, "UTF-8"));

+ 4 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -72,11 +72,6 @@ public class DelegationTokenFetcher {
   private static final String RENEW = "renew";
   private static final String PRINT = "print";
 
-  static {
-    // Enable Kerberos sockets
-    System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA");
-  }
-
   private static void printUsage(PrintStream err) throws IOException {
     err.println("fetchdt retrieves delegation tokens from the NameNode");
     err.println();
@@ -106,7 +101,7 @@ public class DelegationTokenFetcher {
     final Configuration conf = new HdfsConfiguration();
     Options fetcherOptions = new Options();
     fetcherOptions.addOption(WEBSERVICE, true,
-        "HTTPS url to reach the NameNode at");
+        "HTTP url to reach the NameNode at");
     fetcherOptions.addOption(RENEWER, true,
         "Name of the delegation token renewer");
     fetcherOptions.addOption(CANCEL, false, "cancel the token");
@@ -224,8 +219,7 @@ public class DelegationTokenFetcher {
       }
       
       URL remoteURL = new URL(url.toString());
-      SecurityUtil.fetchServiceTicket(remoteURL);
-      URLConnection connection = URLUtils.openConnection(remoteURL);
+      URLConnection connection = SecurityUtil.openSecureHttpConnection(remoteURL);
       InputStream in = connection.getInputStream();
       Credentials ts = new Credentials();
       dis = new DataInputStream(in);
@@ -264,7 +258,7 @@ public class DelegationTokenFetcher {
     
     try {
       URL url = new URL(buf.toString());
-      SecurityUtil.fetchServiceTicket(url);
+      connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
       connection = (HttpURLConnection)URLUtils.openConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error renewing token: " + 
@@ -358,8 +352,7 @@ public class DelegationTokenFetcher {
     HttpURLConnection connection=null;
     try {
       URL url = new URL(buf.toString());
-      SecurityUtil.fetchServiceTicket(url);
-      connection = (HttpURLConnection)URLUtils.openConnection(url);
+      connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error cancelling token: " + 
             connection.getResponseMessage());

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -858,4 +858,15 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.kerberos.internal.spnego.principal</name>
+  <value>${dfs.web.authentication.kerberos.principal}</value>
+</property>
+
+<property>
+  <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
+  <value>${dfs.web.authentication.kerberos.principal}</value>
+</property>
+
+
 </configuration>

+ 93 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java

@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+/**
+ * Make sure that ViewFileSystem works when the root of an FS is mounted to a
+ * ViewFileSystem mount point.
+ */
+public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
+
+  private static MiniDFSCluster cluster;
+  private static Configuration CONF = new Configuration();
+  private static FileSystem fHdfs;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    SupportsBlocks = true;
+    CONF.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+    
+    cluster = new MiniDFSCluster.Builder(CONF)
+      .numDataNodes(2)
+      .build();
+    cluster.waitClusterUp();
+    
+    fHdfs = cluster.getFileSystem();
+  }
+      
+  @AfterClass
+  public static void clusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    fsTarget = fHdfs;
+    super.setUp();
+  }
+
+  /**
+   * Override this so that we don't set the targetTestRoot to any path under the
+   * root of the FS, and so that we don't try to delete the test dir, but rather
+   * only its contents.
+   */
+  @Override
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = fHdfs.makeQualified(new Path("/"));
+    for (FileStatus status : fHdfs.listStatus(targetTestRoot)) {
+      fHdfs.delete(status.getPath(), true);
+    }
+  }
+
+  @Override
+  int getExpectedDelegationTokenCount() {
+    return 8;
+  }
+
+  @Override
+  int getExpectedDelegationTokenCountWithCredentials() {
+    return 1;
+  }
+}

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java

@@ -105,17 +105,17 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
   // additional mount.
   @Override
   int getExpectedDirPaths() {
-    return 7;
+    return 8;
   }
   
   @Override
   int getExpectedMountPoints() {
-    return 8;
+    return 9;
   }
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 8;
+    return 9;
   }
 
   @Override

+ 93 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java

@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+/**
+ * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
+ * mount point.
+ */
+public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
+  
+  private static MiniDFSCluster cluster;
+  private static HdfsConfiguration CONF = new HdfsConfiguration();
+  private static FileContext fc;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    SupportsBlocks = true;
+    CONF.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+    cluster.waitClusterUp();
+    fc = FileContext.getFileContext(cluster.getURI(0), CONF);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fcTarget = fc;
+    super.setUp();    
+  }
+  
+  /**
+   * Override this so that we don't set the targetTestRoot to any path under the
+   * root of the FS, and so that we don't try to delete the test dir, but rather
+   * only its contents.
+   */
+  @Override
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = fc.makeQualified(new Path("/"));
+    RemoteIterator<FileStatus> dirContents = fc.listStatus(targetTestRoot);
+    while (dirContents.hasNext()) {
+      fc.delete(dirContents.next().getPath(), true);
+    }
+  }
+  
+  /**
+   * This overrides the default implementation since hdfs does have delegation
+   * tokens.
+   */
+  @Override
+  int getExpectedDelegationTokenCount() {
+    return 8;
+  }
+}

+ 5 - 23
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.fs.viewfs;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
-import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
@@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Test;
-
 
 public class TestViewFsHdfs extends ViewFsBaseTest {
 
   private static MiniDFSCluster cluster;
-  private static Path defaultWorkingDirectory;
   private static HdfsConfiguration CONF = new HdfsConfiguration();
   private static FileContext fc;
   
@@ -57,7 +49,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(0), CONF);
-    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+    Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
@@ -73,25 +65,15 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
     // create the test root on local_fs
     fcTarget = fc;
     super.setUp();
-    
   }
-
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-  
   
-  /*
-   * This overides the default implementation since hdfs does have delegation
+  /**
+   * This overrides the default implementation since hdfs does have delegation
    * tokens.
    */
   @Override
-  @Test
-  public void testGetDelegationTokens() throws IOException {
-    List<Token<?>> delTokens = 
-        fcView.getDelegationTokens(new Path("/"), "sanjay");
-    Assert.assertEquals(7, delTokens.size()); 
+  int getExpectedDelegationTokenCount() {
+    return 8;
   }
  
 }

+ 29 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java

@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.io.IOUtils;
 
 import org.apache.hadoop.security.token.Token;
 import org.junit.Test;
@@ -230,6 +232,33 @@ public class TestConnCache {
 
     in.close();
   }
+  
+  /**
+   * Test that the socket cache can be disabled by setting the capacity to
+   * 0. Regression test for HDFS-3365.
+   */
+  @Test
+  public void testDisableCache() throws IOException {
+    LOG.info("Starting testDisableCache()");
+
+    // Reading with the normally configured filesystem should
+    // cache a socket.
+    DFSTestUtil.readFile(fs, testFile);
+    assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
+    
+    // Configure a new instance with no caching, ensure that it doesn't
+    // cache anything
+    Configuration confWithoutCache = new Configuration(fs.getConf());
+    confWithoutCache.setInt(
+        DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
+    FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
+    try {
+      DFSTestUtil.readFile(fsWithoutCache, testFile);
+      assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
+    } finally {
+      fsWithoutCache.close();
+    }
+  }
 
   @AfterClass
   public static void teardownCluster() throws Exception {

+ 40 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java

@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
 import static org.junit.Assert.*;
 
+import java.io.InputStream;
 import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.net.Socket;
@@ -40,6 +42,8 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.io.NullOutputStream;
+
 public class TestDataTransferKeepalive {
   Configuration conf = new HdfsConfiguration();
   private MiniDFSCluster cluster;
@@ -56,6 +60,8 @@ public class TestDataTransferKeepalive {
   public void setup() throws Exception {
     conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
         KEEPALIVE_TIMEOUT);
+    conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+        0);
     
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(1).build();
@@ -143,6 +149,40 @@ public class TestDataTransferKeepalive {
       IOUtils.closeStream(stm);
     }
   }
+  
+  @Test(timeout=30000)
+  public void testManyClosedSocketsInCache() throws Exception {
+    // Make a small file
+    DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
+
+    // Insert a bunch of dead sockets in the cache, by opening
+    // many streams concurrently, reading all of the data,
+    // and then closing them.
+    InputStream[] stms = new InputStream[5];
+    try {
+      for (int i = 0; i < stms.length; i++) {
+        stms[i] = fs.open(TEST_FILE);
+      }
+      for (InputStream stm : stms) {
+        IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
+      }
+    } finally {
+      IOUtils.cleanup(null, stms);
+    }
+    
+    DFSClient client = ((DistributedFileSystem)fs).dfs;
+    assertEquals(5, client.socketCache.size());
+    
+    // Let all the xceivers timeout
+    Thread.sleep(1500);
+    assertXceiverCount(0);
+
+    // Client side still has the sockets cached
+    assertEquals(5, client.socketCache.size());
+
+    // Reading should not throw an exception.
+    DFSTestUtil.readFile(fs, TEST_FILE);
+  }
 
   private void assertXceiverCount(int expected) {
     // Subtract 1, since the DataXceiverServer

+ 104 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java

@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test when RBW block is removed. Invalidation of the corrupted block happens
+ * and then the under replicated block gets replicated to the datanode.
+ */
+public class TestRBWBlockInvalidation {
+  private static NumberReplicas countReplicas(final FSNamesystem namesystem,
+      ExtendedBlock block) {
+    return namesystem.getBlockManager().countNodes(block.getLocalBlock());
+  }
+
+  /**
+   * Test when a block's replica is removed from RBW folder in one of the
+   * datanode, namenode should ask to invalidate that corrupted block and
+   * schedule replication for one more replica for that under replicated block.
+   */
+  @Test
+  public void testBlockInvalidationWhenRBWReplicaMissedInDN()
+      throws IOException, InterruptedException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    FSDataOutputStream out = null;
+    try {
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      FileSystem fs = cluster.getFileSystem();
+      Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
+      out = fs.create(testPath, (short) 3);
+      out.writeBytes("HDFS-3157: " + testPath);
+      out.hsync();
+      String bpid = namesystem.getBlockPoolId();
+      ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
+      Block block = blk.getLocalBlock();
+      // Deleting partial block and its meta information from the RBW folder
+      // of first datanode.
+      DataNode dn = cluster.getDataNodes().get(0);
+      File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
+      File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
+      assertTrue("Could not delete the block file from the RBW folder",
+          blockFile.delete());
+      assertTrue("Could not delete the block meta file from the RBW folder",
+          metaFile.delete());
+      out.close();
+      assertEquals("The corrupt replica could not be invalidated", 0,
+          countReplicas(namesystem, blk).corruptReplicas());
+      /*
+       * Sleep for 3 seconds, for under replicated block to get replicated. As
+       * one second will be taken by ReplicationMonitor and one more second for
+       * invalidated block to get deleted from the datanode.
+       */
+      Thread.sleep(3000);
+      blk = DFSTestUtil.getFirstBlock(fs, testPath);
+      assertEquals("There should be three live replicas", 3,
+          countReplicas(namesystem, blk).liveReplicas());
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+      cluster.shutdown();
+    }
+  }
+}

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -136,6 +136,11 @@ public class DataNodeTestUtils {
       ) throws IOException {
     return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b);
   }
+  
+  public static File getMetaFile(DataNode dn, String bpid, Block b)
+      throws IOException {
+    return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b);
+  }
 
   public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks
       ) throws IOException {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java

@@ -36,6 +36,12 @@ public class FsDatasetTestUtil {
       ) throws IOException {
     return ((FsDatasetImpl)fsd).getBlockFile(bpid, b);
   }
+  
+  public static File getMetaFile(FsDatasetSpi<?> fsd, String bpid, Block b)
+      throws IOException {
+    return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
+        .getGenerationStamp());
+  }
 
   public static boolean unlinkBlock(FsDatasetSpi<?> fsd,
       ExtendedBlock block, int numLinks) throws IOException {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java

@@ -46,9 +46,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -585,7 +585,7 @@ public class TestDNFencing {
     }
 
     @Override
-    public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+    public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
         Block block, short replicationFactor,
         Collection<DatanodeDescriptor> first,
         Collection<DatanodeDescriptor> second) {

+ 17 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -284,6 +284,9 @@ Release 2.0.0 - UNRELEASED
     MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
     (Bikas Saha via bobby)
 
+    MAPREDUCE-4231. Update RAID to use the new BlockCollection interface.
+    (szetszwo)
+
 Release 0.23.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -321,8 +324,13 @@ Release 0.23.3 - UNRELEASED
 
     MAPREDUCE-4210. Expose listener address for WebApp (Daryn Sharp via bobby)
 
+    MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby)
+
   OPTIMIZATIONS
 
+    MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
+    Sharp via bobby)
+
   BUG FIXES
 
     MAPREDUCE-4092.  commitJob Exception does not fail job (Jon Eagles via
@@ -458,6 +466,15 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-4048. NullPointerException exception while accessing the
     Application Master UI (Devaraj K via bobby)
 
+    MAPREDUCE-4220. RM apps page starttime/endtime sorts are incorrect
+    (Jonathan Eagles via bobby)
+
+    MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup.
+    (tomwhite)
+
+    MAPREDUCE-4215. RM app page shows 500 error on appid parse error 
+    (Jonathon Eagles via tgraves)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

@@ -50,7 +50,9 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -77,7 +79,8 @@ class YarnChild {
 
     String host = args[0];
     int port = Integer.parseInt(args[1]);
-    final InetSocketAddress address = new InetSocketAddress(host, port);
+    final InetSocketAddress address =
+        NetUtils.createSocketAddrForHost(host, port);
     final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
     int jvmIdInt = Integer.parseInt(args[3]);
     JVMId jvmId = new JVMId(firstTaskid.getJobID(),
@@ -214,8 +217,7 @@ class YarnChild {
     LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() +
         "; from file=" + jobTokenFile);
     Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
-    jt.setService(new Text(address.getAddress().getHostAddress() + ":"
-        + address.getPort()));
+    SecurityUtil.setTokenService(jt, address);
     UserGroupInformation current = UserGroupInformation.getCurrentUser();
     current.addToken(jt);
     for (Token<? extends TokenIdentifier> tok : credentials.getAllTokens()) {

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -180,6 +180,11 @@ public class MRClientService extends AbstractService
     private RecordFactory recordFactory = 
       RecordFactoryProvider.getRecordFactory(null);
 
+    @Override
+    public InetSocketAddress getConnectAddress() {
+      return getBindAddress();
+    }
+    
     private Job verifyAndGetJob(JobId jobID, 
         boolean modifyAccess) throws YarnRemoteException {
       Job job = appContext.getJob(jobID);

+ 7 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.app.launcher;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedAction;
 import java.util.HashSet;
@@ -34,7 +35,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.ShuffleHandler;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.ContainerToken;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 import org.apache.hadoop.yarn.util.Records;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -321,13 +322,13 @@ public class ContainerLauncherImpl extends AbstractService implements
       final String containerManagerBindAddr, ContainerToken containerToken)
       throws IOException {
 
+    final InetSocketAddress cmAddr =
+        NetUtils.createSocketAddr(containerManagerBindAddr);
     UserGroupInformation user = UserGroupInformation.getCurrentUser();
 
     if (UserGroupInformation.isSecurityEnabled()) {
-      Token<ContainerTokenIdentifier> token = new Token<ContainerTokenIdentifier>(
-          containerToken.getIdentifier().array(), containerToken
-              .getPassword().array(), new Text(containerToken.getKind()),
-          new Text(containerToken.getService()));
+      Token<ContainerTokenIdentifier> token =
+          ProtoUtils.convertFromProtoFormat(containerToken, cmAddr);
       // the user in createRemoteUser in this context has to be ContainerID
       user = UserGroupInformation.createRemoteUser(containerID.toString());
       user.addToken(token);
@@ -338,8 +339,7 @@ public class ContainerLauncherImpl extends AbstractService implements
           @Override
           public ContainerManager run() {
             return (ContainerManager) rpc.getProxy(ContainerManager.class,
-                NetUtils.createSocketAddr(containerManagerBindAddr),
-                getConfig());
+                cmAddr, getConfig());
           }
         });
     return proxy;

+ 9 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -133,15 +134,14 @@ public abstract class RMCommunicator extends AbstractService  {
 
   protected void register() {
     //Register
-    String host = clientService.getBindAddress().getAddress()
-        .getCanonicalHostName();
+    InetSocketAddress serviceAddr = clientService.getBindAddress();
     try {
       RegisterApplicationMasterRequest request =
         recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class);
       request.setApplicationAttemptId(applicationAttemptId);
-      request.setHost(host);
-      request.setRpcPort(clientService.getBindAddress().getPort());
-      request.setTrackingUrl(host + ":" + clientService.getHttpPort());
+      request.setHost(serviceAddr.getHostName());
+      request.setRpcPort(serviceAddr.getPort());
+      request.setTrackingUrl(serviceAddr.getHostName() + ":" + clientService.getHttpPort());
       RegisterApplicationMasterResponse response =
         scheduler.registerApplicationMaster(request);
       minContainerCapability = response.getMinimumResourceCapability();
@@ -262,9 +262,6 @@ public abstract class RMCommunicator extends AbstractService  {
     if (UserGroupInformation.isSecurityEnabled()) {
       String tokenURLEncodedStr = System.getenv().get(
           ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("AppMasterToken is " + tokenURLEncodedStr);
-      }
       Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();
 
       try {
@@ -273,6 +270,10 @@ public abstract class RMCommunicator extends AbstractService  {
         throw new YarnException(e);
       }
 
+      SecurityUtil.setTokenService(token, serviceAddr);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("AppMasterToken is " + token);
+      }
       currentUser.addToken(token);
     }
 

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapreduce.v2.api;
 
+import java.net.InetSocketAddress;
+
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
@@ -45,6 +47,11 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 
 public interface MRClientProtocol {
+  /**
+   * Address to which the client is connected
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getConnectAddress();
   public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException;
   public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws YarnRemoteException;
   public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws YarnRemoteException;

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java

@@ -104,6 +104,11 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol {
         MRClientProtocolPB.class, clientVersion, addr, conf);
   }
   
+  @Override
+  public InetSocketAddress getConnectAddress() {
+    return RPC.getServerAddress(proxy);
+  }
+
   @Override
   public GetJobReportResponse getJobReport(GetJobReportRequest request)
       throws YarnRemoteException {

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java

@@ -122,6 +122,11 @@ public class TestRPCFactories {
   
   public class MRClientProtocolTestImpl implements MRClientProtocol {
 
+    @Override
+    public InetSocketAddress getConnectAddress() {
+      return null;
+    }
+    
     @Override
     public GetJobReportResponse getJobReport(GetJobReportRequest request)
         throws YarnRemoteException {

+ 2 - 17
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java

@@ -35,13 +35,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Master;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.mapreduce.v2.LogParams;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -388,21 +386,8 @@ public class Cluster {
    */
   public Token<DelegationTokenIdentifier> 
       getDelegationToken(Text renewer) throws IOException, InterruptedException{
-    Token<DelegationTokenIdentifier> result =
-      client.getDelegationToken(renewer);
-
-    if (result == null) {
-      return result;
-    }
-
-    InetSocketAddress addr = Master.getMasterAddress(conf);
-    StringBuilder service = new StringBuilder();
-    service.append(NetUtils.normalizeHostName(addr.getAddress().
-                                              getHostAddress()));
-    service.append(':');
-    service.append(addr.getPort());
-    result.setService(new Text(service.toString()));
-    return result;
+    // client has already set the service
+    return client.getDelegationToken(renewer);
   }
 
   /**

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java

@@ -23,6 +23,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.Iterator;
 import java.util.Locale;
 import java.util.Map;
@@ -54,7 +55,8 @@ public abstract class FileSystemCounterGroup<C extends Counter>
 
   // C[] would need Array.newInstance which requires a Class<C> reference.
   // Just a few local casts probably worth not having to carry it around.
-  private final Map<String, Object[]> map = Maps.newTreeMap();
+  private final Map<String, Object[]> map =
+    new ConcurrentSkipListMap<String, Object[]>();
   private String displayName;
 
   private static final Joiner NAME_JOINER = Joiner.on('_');

+ 6 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java

@@ -19,7 +19,9 @@
 package org.apache.hadoop.mapreduce.security;
 
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -92,8 +94,11 @@ public class TokenCache {
 
   static void obtainTokensForNamenodesInternal(Credentials credentials,
       Path[] ps, Configuration conf) throws IOException {
+    Set<FileSystem> fsSet = new HashSet<FileSystem>();
     for(Path p: ps) {
-      FileSystem fs = FileSystem.get(p.toUri(), conf);
+      fsSet.add(p.getFileSystem(conf));
+    }
+    for (FileSystem fs : fsSet) {
       obtainTokensForNamenodesInternal(fs, credentials, conf);
     }
   }

+ 20 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.text.ParseException;
@@ -203,6 +204,25 @@ public class TestCounters {
     counters.incrCounter("group1", "counter2", 1);
     iterator.next();
   }
+
+  @Test
+  public void testFileSystemGroupIteratorConcurrency() {
+    Counters counters = new Counters();
+    // create 2 filesystem counter groups
+    counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
+    counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
+    
+    // Iterate over the counters in this group while updating counters in
+    // the group
+    Group group = counters.getGroup(FileSystemCounter.class.getName());
+    Iterator<Counter> iterator = group.iterator();
+    counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
+    assertTrue(iterator.hasNext());
+    iterator.next();
+    counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
+    assertTrue(iterator.hasNext());
+    iterator.next();
+  }
   
   public static void main(String[] args) throws IOException {
     new TestCounters().testCounters();

+ 20 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java

@@ -251,6 +251,26 @@ public class TestTokenCache {
     return mockFs;
   }
 
+  @Test
+  public void testSingleTokenFetch() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
+    String renewer = Master.getMasterPrincipal(conf);
+    Credentials credentials = new Credentials();
+    
+    FileSystem mockFs = mock(FileSystem.class);
+    when(mockFs.getCanonicalServiceName()).thenReturn("host:0");
+    when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0"));
+    
+    Path mockPath = mock(Path.class);
+    when(mockPath.getFileSystem(conf)).thenReturn(mockFs);
+    
+    Path[] paths = new Path[]{ mockPath, mockPath };
+    when(mockFs.getDelegationTokens("me", credentials)).thenReturn(null);
+    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
+    verify(mockFs, times(1)).getDelegationTokens(renewer, credentials);
+  }
+
   @Test
   public void testCleanUpTokenReferral() throws Exception {
     Configuration conf = new Configuration();

+ 5 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java

@@ -178,6 +178,10 @@ public class HistoryClientService extends AbstractService {
 
     private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 
+    public InetSocketAddress getConnectAddress() {
+      return getBindAddress();
+    }
+    
     private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException {
       UserGroupInformation loginUgi = null;
       Job job = null;
@@ -335,8 +339,7 @@ public class HistoryClientService extends AbstractService {
               jhsDTSecretManager);
       DelegationToken mrDToken = BuilderUtils.newDelegationToken(
         realJHSToken.getIdentifier(), realJHSToken.getKind().toString(),
-        realJHSToken.getPassword(), bindAddress.getAddress().getHostAddress()
-            + ":" + bindAddress.getPort());
+        realJHSToken.getPassword(), realJHSToken.getService().toString());
       response.setDelegationToken(mrDToken);
       return response;
       } catch (IOException i) {

+ 9 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java

@@ -32,7 +32,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobStatus;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -63,6 +62,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.YarnException;
@@ -144,7 +144,7 @@ public class ClientServiceDelegate {
     if (application != null) {
       trackingUrl = application.getTrackingUrl();
     }
-    String serviceAddr = null;
+    InetSocketAddress serviceAddr = null;
     while (application == null
         || YarnApplicationState.RUNNING == application
             .getYarnApplicationState()) {
@@ -172,25 +172,23 @@ public class ClientServiceDelegate {
         if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
           UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
               UserGroupInformation.getCurrentUser().getUserName());
-          serviceAddr = application.getHost() + ":" + application.getRpcPort();
+          serviceAddr = NetUtils.createSocketAddrForHost(
+              application.getHost(), application.getRpcPort());
           if (UserGroupInformation.isSecurityEnabled()) {
             String clientTokenEncoded = application.getClientToken();
             Token<ApplicationTokenIdentifier> clientToken =
               new Token<ApplicationTokenIdentifier>();
             clientToken.decodeFromUrlString(clientTokenEncoded);
             // RPC layer client expects ip:port as service for tokens
-            InetSocketAddress addr = NetUtils.createSocketAddr(application
-                .getHost(), application.getRpcPort());
-            clientToken.setService(new Text(addr.getAddress().getHostAddress()
-                + ":" + addr.getPort()));
+            SecurityUtil.setTokenService(clientToken, serviceAddr);
             newUgi.addToken(clientToken);
           }
           LOG.debug("Connecting to " + serviceAddr);
-          final String tempStr = serviceAddr;
+          final InetSocketAddress finalServiceAddr = serviceAddr;
           realProxy = newUgi.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
             @Override
             public MRClientProtocol run() throws IOException {
-              return instantiateAMProxy(tempStr);
+              return instantiateAMProxy(finalServiceAddr);
             }
           });
         } else {
@@ -270,13 +268,13 @@ public class ClientServiceDelegate {
     return historyServerProxy;
   }
 
-  MRClientProtocol instantiateAMProxy(final String serviceAddr)
+  MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr)
       throws IOException {
     LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
     YarnRPC rpc = YarnRPC.create(conf);
     MRClientProtocol proxy = 
          (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
-            NetUtils.createSocketAddr(serviceAddr), conf);
+            serviceAddr, conf);
     LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
     return proxy;
   }

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
 
@@ -209,4 +210,10 @@ public class NotRunningJob implements MRClientProtocol {
     /* Should not be invoked by anyone. */
     throw new NotImplementedException();
   }
+  
+  @Override
+  public InetSocketAddress getConnectAddress() {
+    /* Should not be invoked by anyone.  Normally used to set token service */
+    throw new NotImplementedException();
+  }
 }

+ 11 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java

@@ -37,8 +37,6 @@ import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -67,14 +65,14 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 
 
 // TODO: This should be part of something like yarn-client.
 public class ResourceMgrDelegate {
   private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
       
-  private final String rmAddress;
+  private final InetSocketAddress rmAddress;
   private YarnConfiguration conf;
   ClientRMProtocol applicationsManager;
   private ApplicationId applicationId;
@@ -87,11 +85,7 @@ public class ResourceMgrDelegate {
   public ResourceMgrDelegate(YarnConfiguration conf) {
     this.conf = conf;
     YarnRPC rpc = YarnRPC.create(this.conf);
-    InetSocketAddress rmAddress = conf.getSocketAddr(
-            YarnConfiguration.RM_ADDRESS,
-            YarnConfiguration.DEFAULT_RM_ADDRESS,
-            YarnConfiguration.DEFAULT_RM_PORT);
-    this.rmAddress = rmAddress.toString();
+    this.rmAddress = getRmAddress(conf);
     LOG.debug("Connecting to ResourceManager at " + rmAddress);
     applicationsManager =
         (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
@@ -109,7 +103,13 @@ public class ResourceMgrDelegate {
       ClientRMProtocol applicationsManager) {
     this.conf = conf;
     this.applicationsManager = applicationsManager;
-    this.rmAddress = applicationsManager.toString();
+    this.rmAddress = getRmAddress(conf);
+  }
+  
+  private static InetSocketAddress getRmAddress(YarnConfiguration conf) {
+    return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
+                              YarnConfiguration.DEFAULT_RM_ADDRESS,
+                              YarnConfiguration.DEFAULT_RM_PORT);
   }
   
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
@@ -168,9 +168,7 @@ public class ResourceMgrDelegate {
     org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse 
       response = applicationsManager.getDelegationToken(rmDTRequest);
     DelegationToken yarnToken = response.getRMDelegationToken();
-    return new Token<RMDelegationTokenIdentifier>(yarnToken.getIdentifier().array(),
-        yarnToken.getPassword().array(), 
-        new Text(yarnToken.getKind()), new Text(yarnToken.getService()));
+    return ProtoUtils.convertFromProtoFormat(yarnToken, rmAddress);
   }
 
 

+ 4 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java

@@ -56,7 +56,6 @@ import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.v2.LogParams;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
-import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
@@ -84,6 +83,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 
 
 /**
@@ -184,7 +184,7 @@ public class YARNRunner implements ClientProtocol {
     return resMgrDelegate.getClusterMetrics();
   }
 
-  private Token<MRDelegationTokenIdentifier> getDelegationTokenFromHS(
+  private Token<?> getDelegationTokenFromHS(
       MRClientProtocol hsProxy, Text renewer) throws IOException,
       InterruptedException {
     GetDelegationTokenRequest request = recordFactory
@@ -192,10 +192,8 @@ public class YARNRunner implements ClientProtocol {
     request.setRenewer(renewer.toString());
     DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request)
       .getDelegationToken();
-    return new Token<MRDelegationTokenIdentifier>(mrDelegationToken
-      .getIdentifier().array(), mrDelegationToken.getPassword().array(),
-      new Text(mrDelegationToken.getKind()), new Text(
-        mrDelegationToken.getService()));
+    return ProtoUtils.convertFromProtoFormat(mrDelegationToken,
+                                             hsProxy.getConnectAddress());
   }
   
   @Override

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java

@@ -368,6 +368,11 @@ public class TestClientRedirect {
       this(AMHOSTADDRESS);
     }
 
+    @Override
+    public InetSocketAddress getConnectAddress() {
+      return bindAddress;
+    }
+    
     public AMService(String hostAddress) {
       super("AMService");
       this.protocol = MRClientProtocol.class;

+ 7 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -27,6 +27,7 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
 
@@ -242,7 +243,7 @@ public class TestClientServiceDelegate {
     // should use the same proxy to AM2 and so instantiateProxy shouldn't be
     // called.
     doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(
-        clientServiceDelegate).instantiateAMProxy(any(String.class));
+        clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
 
     JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
     Assert.assertNotNull(jobStatus);
@@ -257,7 +258,7 @@ public class TestClientServiceDelegate {
     Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
 
     verify(clientServiceDelegate, times(2)).instantiateAMProxy(
-        any(String.class));
+        any(InetSocketAddress.class));
   }
   
   @Test
@@ -286,19 +287,19 @@ public class TestClientServiceDelegate {
     Assert.assertEquals("N/A", jobStatus.getJobName());
     
     verify(clientServiceDelegate, times(0)).instantiateAMProxy(
-        any(String.class));
+        any(InetSocketAddress.class));
 
     // Should not reach AM even for second and third times too.
     jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
     Assert.assertNotNull(jobStatus);
     Assert.assertEquals("N/A", jobStatus.getJobName());    
     verify(clientServiceDelegate, times(0)).instantiateAMProxy(
-        any(String.class));
+        any(InetSocketAddress.class));
     jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
     Assert.assertNotNull(jobStatus);
     Assert.assertEquals("N/A", jobStatus.getJobName());    
     verify(clientServiceDelegate, times(0)).instantiateAMProxy(
-        any(String.class));
+        any(InetSocketAddress.class));
 
     // The third time around, app is completed, so should go to JHS
     JobStatus jobStatus1 = clientServiceDelegate.getJobStatus(oldJobId);
@@ -309,7 +310,7 @@ public class TestClientServiceDelegate {
     Assert.assertEquals(1.0f, jobStatus1.getReduceProgress());
     
     verify(clientServiceDelegate, times(0)).instantiateAMProxy(
-        any(String.class));
+        any(InetSocketAddress.class));
   }
   
   @Test

+ 3 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java

@@ -26,11 +26,9 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
-import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
 import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
@@ -38,11 +36,11 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
@@ -95,9 +93,8 @@ public class TestJHSSecurity {
     // Now try talking to JHS using the delegation token
     UserGroupInformation ugi =
         UserGroupInformation.createRemoteUser("TheDarkLord");
-    ugi.addToken(new Token<MRDelegationTokenIdentifier>(token.getIdentifier()
-      .array(), token.getPassword().array(), new Text(token.getKind()),
-      new Text(token.getService())));
+    ugi.addToken(ProtoUtils.convertFromProtoFormat(
+        token, jobHistoryServer.getClientService().getBindAddress()));
     final YarnRPC rpc = YarnRPC.create(conf);
     MRClientProtocol userUsingDT =
         ugi.doAs(new PrivilegedAction<MRClientProtocol>() {

+ 3 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import org.apache.log4j.Level;
@@ -98,10 +99,8 @@ public class TestUmbilicalProtocolWithJobToken {
     JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId));
     Token<JobTokenIdentifier> token = new Token<JobTokenIdentifier>(tokenId, sm);
     sm.addTokenForJob(jobId, token);
-    Text host = new Text(addr.getAddress().getHostAddress() + ":"
-        + addr.getPort());
-    token.setService(host);
-    LOG.info("Service IP address for token is " + host);
+    SecurityUtil.setTokenService(token, addr);
+    LOG.info("Service address for token is " + token.getService());
     current.addToken(token);
     current.doAs(new PrivilegedExceptionAction<Object>() {
       @Override

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java

@@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.api.ContainerManager;
  */
 @Public
 @Stable
-public interface ContainerToken {
+public interface ContainerToken extends DelegationToken {
   /**
    * Get the token identifier.
    * @return token identifier

+ 25 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java

@@ -18,11 +18,17 @@
 
 package org.apache.hadoop.yarn.util;
 
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -192,4 +198,23 @@ public class ProtoUtils {
     return ApplicationAccessType.valueOf(e.name().replace(
         APP_ACCESS_TYPE_PREFIX, ""));
   }
+
+  /**
+   * Convert a protobuf token into a rpc token and set its service
+   * 
+   * @param protoToken the yarn token
+   * @param serviceAddr the connect address for the service
+   * @return rpc token
+   */
+  public static <T extends TokenIdentifier> Token<T>
+  convertFromProtoFormat(DelegationToken protoToken, InetSocketAddress serviceAddr) {
+    Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
+                                  protoToken.getPassword().array(),
+                                  new Text(protoToken.getKind()),
+                                  new Text(protoToken.getService()));
+    if (serviceAddr != null) {
+      SecurityUtil.setTokenService(token, serviceAddr);
+    }
+    return token;
+  }
 }

+ 4 - 3
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -275,10 +276,10 @@ public class BuilderUtils {
     containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
     containerToken.setPassword(password);
     // RPC layer client expects ip:port as service for tokens
-    InetSocketAddress addr = NetUtils.createSocketAddr(nodeId.getHost(),
+    InetSocketAddress addr = NetUtils.createSocketAddrForHost(nodeId.getHost(),
         nodeId.getPort());
-    containerToken.setService(addr.getAddress().getHostAddress() + ":"
-        + addr.getPort());
+    // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token 
+    containerToken.setService(SecurityUtil.buildTokenService(addr).toString());
     return containerToken;
   }
 

+ 1 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java

@@ -464,8 +464,7 @@ public class ClientRMService extends AbstractService implements
               realRMDTtoken.getIdentifier(),
               realRMDTtoken.getKind().toString(),
               realRMDTtoken.getPassword(),
-              clientBindAddress.getAddress().getHostAddress() + ":"
-              + clientBindAddress.getPort()
+              realRMDTtoken.getService().toString()
               ));
       return response;
     } catch(IOException io) {

+ 15 - 18
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java

@@ -32,9 +32,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
@@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 
 /**
  * The launch of the AM itself.
@@ -131,27 +132,25 @@ public class AMLauncher implements Runnable {
 
     Container container = application.getMasterContainer();
 
-    final String containerManagerBindAddress = container.getNodeId().toString();
+    final NodeId node = container.getNodeId();
+    final InetSocketAddress containerManagerBindAddress =
+        NetUtils.createSocketAddrForHost(node.getHost(), node.getPort());
 
     final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.
 
     UserGroupInformation currentUser = UserGroupInformation
         .createRemoteUser(containerId.toString());
     if (UserGroupInformation.isSecurityEnabled()) {
-      ContainerToken containerToken = container.getContainerToken();
       Token<ContainerTokenIdentifier> token =
-          new Token<ContainerTokenIdentifier>(
-              containerToken.getIdentifier().array(),
-              containerToken.getPassword().array(), new Text(
-                  containerToken.getKind()), new Text(
-                  containerToken.getService()));
+          ProtoUtils.convertFromProtoFormat(container.getContainerToken(),
+                                            containerManagerBindAddress);
       currentUser.addToken(token);
     }
     return currentUser.doAs(new PrivilegedAction<ContainerManager>() {
       @Override
       public ContainerManager run() {
         return (ContainerManager) rpc.getProxy(ContainerManager.class,
-            NetUtils.createSocketAddr(containerManagerBindAddress), conf);
+            containerManagerBindAddress, conf);
       }
     });
   }
@@ -218,22 +217,21 @@ public class AMLauncher implements Runnable {
       Token<ApplicationTokenIdentifier> token =
           new Token<ApplicationTokenIdentifier>(id,
               this.rmContext.getApplicationTokenSecretManager());
-      InetSocketAddress unresolvedAddr = conf.getSocketAddr(
+      InetSocketAddress serviceAddr = conf.getSocketAddr(
           YarnConfiguration.RM_SCHEDULER_ADDRESS,
           YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
           YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
-      String resolvedAddr =
-          unresolvedAddr.getAddress().getHostAddress() + ":"
-              + unresolvedAddr.getPort();
-      token.setService(new Text(resolvedAddr));
+      // normally the client should set the service after acquiring the token,
+      // but this token is directly provided to the tasks
+      SecurityUtil.setTokenService(token, serviceAddr);
       String appMasterTokenEncoded = token.encodeToUrlString();
-      LOG.debug("Putting appMaster token in env : " + appMasterTokenEncoded);
+      LOG.debug("Putting appMaster token in env : " + token);
       environment.put(
           ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME,
           appMasterTokenEncoded);
 
       // Add the RM token
-      credentials.addToken(new Text(resolvedAddr), token);
+      credentials.addToken(token.getService(), token);
       DataOutputBuffer dob = new DataOutputBuffer();
       credentials.writeTokenStorageToStream(dob);
       container.setContainerTokens(
@@ -245,7 +243,6 @@ public class AMLauncher implements Runnable {
           this.clientToAMSecretManager.getMasterKey(identifier);
       String encoded =
           Base64.encodeBase64URLSafeString(clientSecretKey.getEncoded());
-      LOG.debug("The encoded client secret-key to be put in env : " + encoded);
       environment.put(
           ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME, 
           encoded);

+ 10 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java

@@ -55,7 +55,15 @@ public class AppBlock extends HtmlBlock {
       puts("Bad request: requires application ID");
       return;
     }
-    ApplicationId appID = Apps.toAppID(aid);
+
+    ApplicationId appID = null;
+    try {
+      appID = Apps.toAppID(aid);
+    } catch (Exception e) {
+      puts("Invalid Application ID: " + aid);
+      return;
+    }
+
     RMContext context = getInstance(RMContext.class);
     RMApp rmApp = context.getRMApps().get(appID);
     if (rmApp == null) {
@@ -74,7 +82,7 @@ public class AppBlock extends HtmlBlock {
         && !this.aclsManager.checkAccess(callerUGI,
             ApplicationAccessType.VIEW_APP, app.getUser(), appID)) {
       puts("You (User " + remoteUser
-          + ") are not authorized to view the logs for application " + appID);
+          + ") are not authorized to view application " + appID);
       return;
     }
 

+ 4 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java

@@ -89,9 +89,11 @@ class AppsBlock extends HtmlBlock {
           td(appInfo.getName()).
           td(appInfo.getQueue()).
           td().
-            br().$title(startTime)._()._(startTime)._().
+            br().$title(String.valueOf(appInfo.getStartTime()))._().
+            _(startTime)._().
           td().
-          br().$title(finishTime)._()._(finishTime)._().
+            br().$title(String.valueOf(appInfo.getFinishTime()))._().
+            _(finishTime)._().
           td(appInfo.getState()).
           td(appInfo.getFinalStatus()).
           td().

+ 4 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java

@@ -77,8 +77,10 @@ class AppsList implements ToJSON {
           append(escapeHtml(appInfo.getUser())).append(_SEP).
           append(escapeJavaScript(escapeHtml(appInfo.getName()))).append(_SEP).
           append(escapeHtml(appInfo.getQueue())).append(_SEP);
-      appendSortable(out, startTime).append(startTime).append(_SEP);
-      appendSortable(out, finishTime).append(finishTime).append(_SEP).
+      appendSortable(out, appInfo.getStartTime()).
+          append(startTime).append(_SEP);
+      appendSortable(out, appInfo.getFinishTime()).
+          append(finishTime).append(_SEP).
           append(appInfo.getState()).append(_SEP).
           append(appInfo.getFinalStatus()).append(_SEP);
       appendProgressBar(out, appInfo.getProgress()).append(_SEP);

+ 2 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java

@@ -62,10 +62,10 @@ public class RmView extends TwoColumnLayout {
 
   private String appsTableInit() {
     AppsList list = getInstance(AppsList.class);
-    // id, user, name, queue, starttime, finishtime, state, progress, ui
+    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
     StringBuilder init = tableInit().
         append(", aoColumns:[{sType:'title-numeric'}, null, null, null, ").
-        append("null, null , null, ").
+        append("{sType:'title-numeric'}, {sType:'title-numeric'} , null, ").
         append("null,{sType:'title-numeric', bSearchable:false}, null]");
 
     // Sort by id upon page load

+ 0 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java

@@ -401,7 +401,6 @@ public class TestContainerManagerSecurity {
           appTokenSecretManager);
     SecurityUtil.setTokenService(appToken, schedulerAddr);
     currentUser.addToken(appToken);
-    SecurityUtil.setTokenService(appToken, schedulerAddr);
     
     AMRMProtocol scheduler = currentUser
         .doAs(new PrivilegedAction<AMRMProtocol>() {

+ 6 - 6
hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java

@@ -144,7 +144,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
 
   /** {@inheritDoc} */
   @Override
-  public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+  public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
       Block block, short replicationFactor,
       Collection<DatanodeDescriptor> first,
       Collection<DatanodeDescriptor> second) {
@@ -425,7 +425,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
   }
 
   /**
-   * Cache results for FSInodeInfo.getFullPathName()
+   * Cache results for getFullPathName()
    */
   static class CachedFullPathNames {
     FSNamesystem namesystem;
@@ -446,8 +446,8 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
       };
 
     static private class INodeWithHashCode {
-      FSInodeInfo inode;
-      INodeWithHashCode(FSInodeInfo inode) {
+      BlockCollection inode;
+      INodeWithHashCode(BlockCollection inode) {
         this.inode = inode;
       }
       @Override
@@ -459,11 +459,11 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
         return System.identityHashCode(inode);
       }
       String getFullPathName() {
-        return inode.getFullPathName();
+        return inode.getName();
       }
     }
 
-    public String get(FSInodeInfo inode) throws IOException {
+    public String get(BlockCollection inode) throws IOException {
       return cacheInternal.get(new INodeWithHashCode(inode));
     }
   }

+ 5 - 6
hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java

@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedFullPathNames;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedLocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.FileType;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRaidTestUtil;
@@ -241,7 +240,7 @@ public class TestBlockPlacementPolicyRaid {
       // test full path cache
       CachedFullPathNames cachedFullPathNames =
           new CachedFullPathNames(namesystem);
-      final FSInodeInfo[] inodes = NameNodeRaidTestUtil.getFSInodeInfo(
+      final BlockCollection[] inodes = NameNodeRaidTestUtil.getBlockCollections(
           namesystem, file1, file2);
 
       verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
@@ -477,14 +476,14 @@ public class TestBlockPlacementPolicyRaid {
   }
 
   private void verifyCachedFullPathNameResult(
-      CachedFullPathNames cachedFullPathNames, FSInodeInfo inode)
+      CachedFullPathNames cachedFullPathNames, BlockCollection inode)
   throws IOException {
-    String res1 = inode.getFullPathName();
+    String res1 = inode.getName();
     String res2 = cachedFullPathNames.get(inode);
     LOG.info("Actual path name: " + res1);
     LOG.info("Cached path name: " + res2);
     Assert.assertEquals(cachedFullPathNames.get(inode),
-                        inode.getFullPathName());
+                        inode.getName());
   }
 
   private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks,
@@ -503,7 +502,7 @@ public class TestBlockPlacementPolicyRaid {
   private Collection<LocatedBlock> getCompanionBlocks(
       FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
       ExtendedBlock block) throws IOException {
-    INodeFile inode = blockManager.blocksMap.getINode(block
+    INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block
         .getLocalBlock());
     FileType type = policy.getFileType(inode.getFullPathName());
     return policy.getCompanionBlocks(inode.getFullPathName(), type,

+ 4 - 3
hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java

@@ -18,16 +18,17 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 
 public class NameNodeRaidTestUtil {
-  public static FSInodeInfo[] getFSInodeInfo(final FSNamesystem namesystem,
+  public static BlockCollection[] getBlockCollections(final FSNamesystem namesystem,
       final String... files) throws UnresolvedLinkException {
-    final FSInodeInfo[] inodes = new FSInodeInfo[files.length];
+    final BlockCollection[] inodes = new BlockCollection[files.length];
     final FSDirectory dir = namesystem.dir; 
     dir.readLock();
     try {
       for(int i = 0; i < files.length; i++) {
-        inodes[i] = dir.rootDir.getNode(files[i], true);
+        inodes[i] = (BlockCollection)dir.rootDir.getNode(files[i], true);
       }
       return inodes;
     } finally {

+ 1 - 1
hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java

@@ -117,7 +117,7 @@ public class HadoopArchives implements Tool {
     // will when running the mapreduce job.
     String testJar = System.getProperty(TEST_HADOOP_ARCHIVES_JAR_PATH, null);
     if (testJar != null) {
-      ((JobConf)conf).setJar(testJar);
+      this.conf.setJar(testJar);
     }
   }
 

+ 6 - 3
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java

@@ -136,10 +136,13 @@ public class DistCp extends Configured implements Tool {
 
     Job job = null;
     try {
-      metaFolder = createMetaFolderPath();
-      jobFS = metaFolder.getFileSystem(getConf());
+      synchronized(this) {
+        //Don't cleanup while we are setting up.
+        metaFolder = createMetaFolderPath();
+        jobFS = metaFolder.getFileSystem(getConf());
 
-      job = createJob();
+        job = createJob();
+      }
       createInputFileListing(job);
 
       job.submit();

+ 7 - 7
hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java → hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java

@@ -78,8 +78,8 @@ import org.apache.hadoop.util.ToolRunner;
  * A Map-reduce program to recursively copy directories between
  * different file-systems.
  */
-public class DistCp implements Tool {
-  public static final Log LOG = LogFactory.getLog(DistCp.class);
+public class DistCpV1 implements Tool {
+  public static final Log LOG = LogFactory.getLog(DistCpV1.class);
 
   private static final String NAME = "distcp";
 
@@ -221,7 +221,7 @@ public class DistCp implements Tool {
     return conf;
   }
 
-  public DistCp(Configuration conf) {
+  public DistCpV1(Configuration conf) {
     setConf(conf);
   }
 
@@ -565,7 +565,7 @@ public class DistCp implements Tool {
     private void updateDestStatus(FileStatus src, FileStatus dst
         ) throws IOException {
       if (preserve_status) {
-        DistCp.updateDestStatus(src, dst, preseved, destFileSys);
+        DistCpV1.updateDestStatus(src, dst, preseved, destFileSys);
       }
     }
 
@@ -1049,8 +1049,8 @@ public class DistCp implements Tool {
   }
 
   public static void main(String[] args) throws Exception {
-    JobConf job = new JobConf(DistCp.class);
-    DistCp distcp = new DistCp(job);
+    JobConf job = new JobConf(DistCpV1.class);
+    DistCpV1 distcp = new DistCpV1(job);
     int res = ToolRunner.run(distcp, args);
     System.exit(res);
   }
@@ -1117,7 +1117,7 @@ public class DistCp implements Tool {
 
   //Job configuration
   private static JobConf createJobConf(Configuration conf) {
-    JobConf jobconf = new JobConf(conf, DistCp.class);
+    JobConf jobconf = new JobConf(conf, DistCpV1.class);
     jobconf.setJobName(conf.get("mapred.job.name", NAME));
 
     // turn off speculative execution, because DFS doesn't handle

+ 3 - 3
hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java

@@ -65,9 +65,9 @@ import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
 public class Logalyzer {
   // Constants
   private static Configuration fsConfig = new Configuration();
-  public static String SORT_COLUMNS = 
+  public static final String SORT_COLUMNS = 
     "logalizer.logcomparator.sort.columns";
-  public static String COLUMN_SEPARATOR = 
+  public static final String COLUMN_SEPARATOR = 
     "logalizer.logcomparator.column.separator";
   
   static {
@@ -194,7 +194,7 @@ public class Logalyzer {
     throws IOException
   {
     String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
-    DistCp.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
+    DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
   }
   
   /**

+ 36 - 36
hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java

@@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.tools.DistCp;
+import org.apache.hadoop.tools.DistCpV1;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.junit.Ignore;
@@ -64,7 +64,7 @@ public class TestCopyFiles extends TestCase {
         ).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DistCp.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
   }
   
   static final URI LOCAL_FS = URI.create("file:///");
@@ -267,7 +267,7 @@ public class TestCopyFiles extends TestCase {
     Configuration conf = new Configuration();
     FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
     MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-    ToolRunner.run(new DistCp(new Configuration()),
+    ToolRunner.run(new DistCpV1(new Configuration()),
                            new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                                          "file:///"+TEST_ROOT_DIR+"/destdat"});
     assertTrue("Source and destination directories do not match.",
@@ -287,7 +287,7 @@ public class TestCopyFiles extends TestCase {
       namenode = FileSystem.getDefaultUri(conf).toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-log",
                                          namenode+"/logs",
                                          namenode+"/srcdat",
@@ -320,7 +320,7 @@ public class TestCopyFiles extends TestCase {
         FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
         fs.mkdirs(new Path("/empty"));
 
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-log",
                                          namenode+"/logs",
                                          namenode+"/empty",
@@ -347,7 +347,7 @@ public class TestCopyFiles extends TestCase {
       final String namenode = hdfs.getUri().toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-log",
                                          namenode+"/logs",
                                          "file:///"+TEST_ROOT_DIR+"/srcdat",
@@ -376,7 +376,7 @@ public class TestCopyFiles extends TestCase {
       final String namenode = FileSystem.getDefaultUri(conf).toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-log",
                                          "/logs",
                                          namenode+"/srcdat",
@@ -403,7 +403,7 @@ public class TestCopyFiles extends TestCase {
       final String namenode = hdfs.getUri().toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-p",
                                          "-log",
                                          namenode+"/logs",
@@ -420,7 +420,7 @@ public class TestCopyFiles extends TestCase {
         updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate);
         deldir(hdfs, "/logs");
 
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-prbugp", // no t to avoid preserving mod. times
                                          "-update",
                                          "-log",
@@ -433,7 +433,7 @@ public class TestCopyFiles extends TestCase {
                  checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate));
 
         deldir(hdfs, "/logs");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-prbugp", // no t to avoid preserving mod. times
                                          "-overwrite",
                                          "-log",
@@ -483,7 +483,7 @@ public class TestCopyFiles extends TestCase {
         out.close();
         
         // Run with -skipcrccheck option
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
           "-p",
           "-update",
           "-skipcrccheck",
@@ -503,7 +503,7 @@ public class TestCopyFiles extends TestCase {
         deldir(hdfs, "/logs");
 
         // Run without the option        
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
           "-p",
           "-update",
           "-log",
@@ -533,14 +533,14 @@ public class TestCopyFiles extends TestCase {
     final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
     try {    
       MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
-      ToolRunner.run(new DistCp(new Configuration()),
+      ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                         "file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
       assertTrue("Source and destination directories do not match.",
                  checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));
   
-      assertEquals(DistCp.DuplicationException.ERROR_CODE,
-          ToolRunner.run(new DistCp(new Configuration()),
+      assertEquals(DistCpV1.DuplicationException.ERROR_CODE,
+          ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                         "file:///"+TEST_ROOT_DIR+"/src2/srcdat",
                         "file:///"+TEST_ROOT_DIR+"/destdat",}));
@@ -558,7 +558,7 @@ public class TestCopyFiles extends TestCase {
     try {    
       MyFile[] files = {createFile(root, fs)};
       //copy a dir with a single file
-      ToolRunner.run(new DistCp(new Configuration()),
+      ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                         "file:///"+TEST_ROOT_DIR+"/destdat"});
       assertTrue("Source and destination directories do not match.",
@@ -568,7 +568,7 @@ public class TestCopyFiles extends TestCase {
       String fname = files[0].getName();
       Path p = new Path(root, fname);
       FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p));
-      ToolRunner.run(new DistCp(new Configuration()),
+      ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
                         "file:///"+TEST_ROOT_DIR+"/dest2/"+fname});
       assertTrue("Source and destination directories do not match.",
@@ -578,17 +578,17 @@ public class TestCopyFiles extends TestCase {
       String[] args = {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
           "file:///"+TEST_ROOT_DIR+"/dest2/"+fname};
       Configuration conf = new Configuration();
-      JobConf job = new JobConf(conf, DistCp.class);
-      DistCp.Arguments distcpArgs = DistCp.Arguments.valueOf(args, conf);
+      JobConf job = new JobConf(conf, DistCpV1.class);
+      DistCpV1.Arguments distcpArgs = DistCpV1.Arguments.valueOf(args, conf);
       assertFalse("Single file update failed to skip copying even though the " 
-          + "file exists at destination.", DistCp.setup(conf, job, distcpArgs));
+          + "file exists at destination.", DistCpV1.setup(conf, job, distcpArgs));
       
       //copy single file to existing dir
       deldir(fs, TEST_ROOT_DIR+"/dest2");
       fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2"));
       MyFile[] files2 = {createFile(root, fs, 0)};
       String sname = files2[0].getName();
-      ToolRunner.run(new DistCp(new Configuration()),
+      ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"-update",
                         "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
                         "file:///"+TEST_ROOT_DIR+"/dest2/"});
@@ -596,7 +596,7 @@ public class TestCopyFiles extends TestCase {
           checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));     
       updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1);
       //copy single file to existing dir w/ dst name conflict
-      ToolRunner.run(new DistCp(new Configuration()),
+      ToolRunner.run(new DistCpV1(new Configuration()),
           new String[] {"-update",
                         "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
                         "file:///"+TEST_ROOT_DIR+"/dest2/"});
@@ -621,7 +621,7 @@ public class TestCopyFiles extends TestCase {
       namenode = FileSystem.getDefaultUri(conf).toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-basedir",
                                          "/basedir",
                                          namenode+"/basedir/middle/srcdat",
@@ -651,7 +651,7 @@ public class TestCopyFiles extends TestCase {
         for(int i = 0; i < srcstat.length; i++) {
           fs.setOwner(srcstat[i].getPath(), "u" + i, null);
         }
-        ToolRunner.run(new DistCp(conf),
+        ToolRunner.run(new DistCpV1(conf),
             new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"});
         assertTrue("Source and destination directories do not match.",
                    checkFiles(fs, "/destdat", files));
@@ -670,7 +670,7 @@ public class TestCopyFiles extends TestCase {
         for(int i = 0; i < srcstat.length; i++) {
           fs.setOwner(srcstat[i].getPath(), null, "g" + i);
         }
-        ToolRunner.run(new DistCp(conf),
+        ToolRunner.run(new DistCpV1(conf),
             new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"});
         assertTrue("Source and destination directories do not match.",
                    checkFiles(fs, "/destdat", files));
@@ -692,7 +692,7 @@ public class TestCopyFiles extends TestCase {
           fs.setPermission(srcstat[i].getPath(), permissions[i]);
         }
 
-        ToolRunner.run(new DistCp(conf),
+        ToolRunner.run(new DistCpV1(conf),
             new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"});
         assertTrue("Source and destination directories do not match.",
                    checkFiles(fs, "/destdat", files));
@@ -715,7 +715,7 @@ public class TestCopyFiles extends TestCase {
           fs.setTimes(srcstat[i].getPath(), 40, 50);
         }
 
-        ToolRunner.run(new DistCp(conf),
+        ToolRunner.run(new DistCpV1(conf),
             new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"});
 
         FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
@@ -753,7 +753,7 @@ public class TestCopyFiles extends TestCase {
       }
       Configuration job = mr.createJobConf();
       job.setLong("distcp.bytes.per.map", totsize / 3);
-      ToolRunner.run(new DistCp(job),
+      ToolRunner.run(new DistCpV1(job),
           new String[] {"-m", "100",
                         "-log",
                         namenode+"/logs",
@@ -771,7 +771,7 @@ public class TestCopyFiles extends TestCase {
 
       deldir(fs, "/destdat");
       deldir(fs, "/logs");
-      ToolRunner.run(new DistCp(job),
+      ToolRunner.run(new DistCpV1(job),
           new String[] {"-m", "1",
                         "-log",
                         namenode+"/logs",
@@ -795,7 +795,7 @@ public class TestCopyFiles extends TestCase {
       cluster = new MiniDFSCluster(conf, 2, true, null);
       final String nnUri = FileSystem.getDefaultUri(conf).toString();
       final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-      final DistCp distcp = new DistCp(conf);
+      final DistCpV1 distcp = new DistCpV1(conf);
       final FsShell shell = new FsShell(conf);  
 
       final String srcrootdir =  "/src_root";
@@ -927,9 +927,9 @@ public class TestCopyFiles extends TestCase {
       final String srcrootdir =  srcrootpath.toString();
       final Path dstrootpath = new Path(home, "dst_root"); 
       final String dstrootdir =  dstrootpath.toString();
-      final DistCp distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCp>() {
-        public DistCp run() {
-          return new DistCp(userConf);
+      final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
+        public DistCpV1 run() {
+          return new DistCpV1(userConf);
         }
       });
 
@@ -961,7 +961,7 @@ public class TestCopyFiles extends TestCase {
       final String nnUri = nnURI.toString();
       final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
 
-      final DistCp distcp = new DistCp(conf);
+      final DistCpV1 distcp = new DistCpV1(conf);
       final FsShell shell = new FsShell(conf);  
 
       final String srcrootdir = "/src_root";
@@ -1035,7 +1035,7 @@ public class TestCopyFiles extends TestCase {
         MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
         String destdir = TEST_ROOT_DIR + "/destdat";
         MyFile[] localFiles = createFiles(localfs, destdir);
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-delete",
                                          "-update",
                                          "-log",
@@ -1066,7 +1066,7 @@ public class TestCopyFiles extends TestCase {
       namenode = FileSystem.getDefaultUri(conf).toString();
       if (namenode.startsWith("hdfs://")) {
         MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
+        ToolRunner.run(new DistCpV1(conf), new String[] {
                                          "-log",
                                          namenode+"/logs",
                                          namenode+"/srcdat/*",

+ 31 - 0
hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml

@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <And>
+    <Class name="org.apache.hadoop.tools.rumen.LoggedJob"/>
+    <Method name="getMapperTriesToSucceed"/>
+    <Bug pattern="EI_EXPOSE_REP"/>
+    <Bug code="EI"/>
+  </And>
+  <And>
+    <Class name="org.apache.hadoop.tools.rumen.ZombieJob"/>
+    <Method name="getInputSplits"/>
+    <Bug pattern="EI_EXPOSE_REP"/>
+    <Bug code="EI"/>
+  </And>
+</FindBugsFilter>

+ 10 - 0
hadoop-tools/hadoop-rumen/pom.xml

@@ -90,6 +90,16 @@
 
   <build>
     <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+         <configuration>
+          <findbugsXmlOutput>true</findbugsXmlOutput>
+          <xmlOutput>true</xmlOutput>
+          <excludeFilterFile>${basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+          <effort>Max</effort>
+        </configuration>
+      </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini