瀏覽代碼

YARN-321. Forwarding YARN-321 branch to latest trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/YARN-321@1558872 13f79535-47bb-0310-9956-ffa450edef68
Vinod Kumar Vavilapalli 11 年之前
父節點
當前提交
fbb94a4e9b
共有 25 個文件被更改,包括 1279 次插入328 次删除
  1. 9 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 2 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  3. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  4. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  5. 474 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  6. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java
  7. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  8. 176 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  9. 10 2
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  10. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
  11. 107 96
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
  12. 103 91
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  13. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
  14. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  15. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  16. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
  17. 二進制
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
  18. 84 84
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
  19. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  20. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
  21. 10 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
  22. 110 31
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
  23. 139 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
  24. 3 0
      hadoop-yarn-project/CHANGES.txt
  25. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java

+ 9 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -98,7 +98,8 @@ Trunk (Unreleased)
     HADOOP-8844. Add a plaintext fs -text test-case.
     (Akira AJISAKA via harsh)
 
-    HADOOP-9432 Add support for markdown .md files in site documentation (stevel)
+    HADOOP-9432 Add support for markdown .md files in site documentation 
+    (stevel)
 
     HADOOP-9186.  test-patch.sh should report build failure to JIRA.
     (Binglin Chang via Colin Patrick McCabe)
@@ -110,6 +111,8 @@ Trunk (Unreleased)
 
     HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
 
+    HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -117,8 +120,8 @@ Trunk (Unreleased)
 
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
 
-    HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
-    (Devaraj K via umamahesh)
+    HADOOP-8177. MBeans shouldn't try to register when it fails to create 
+    MBeanName. (Devaraj K via umamahesh)
 
     HADOOP-8018.  Hudson auto test for HDFS has started throwing javadoc
     (Jon Eagles via bobby)
@@ -287,6 +290,9 @@ Trunk (Unreleased)
 
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
+    HADOOP-10125. no need to process RPC request if the client connection
+    has been dropped (Ming Ma via brandonli)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -104,6 +104,8 @@ case $COMMAND in
       CLASS=org.apache.hadoop.util.VersionInfo
     elif [ "$COMMAND" = "jar" ] ; then
       CLASS=org.apache.hadoop.util.RunJar
+    elif [ "$COMMAND" = "key" ] ; then
+      CLASS=org.apache.hadoop.crypto.key.KeyShell
     elif [ "$COMMAND" = "checknative" ] ; then
       CLASS=org.apache.hadoop.util.NativeLibraryChecker
     elif [ "$COMMAND" = "distcp" ] ; then

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -77,7 +77,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
   private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
     this.uri = uri;
     path = unnestUri(uri);
-    fs = FileSystem.get(conf);
+    fs = path.getFileSystem(conf);
     // Get the password from the user's environment
     String pw = System.getenv(KEYSTORE_PASSWORD_NAME);
     if (pw == null) {

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -244,6 +244,17 @@ public abstract class KeyProvider {
     return new Options(conf);
   }
 
+  /**
+   * Indicates whether this provider represents a store
+   * that is intended for transient use - such as the UserProvider
+   * is. These providers are generally used to provide access to
+   * keying material rather than for long term storage.
+   * @return true if transient, false otherwise
+   */
+  public boolean isTransient() {
+    return false;
+  }
+
   /**
    * Get the key material for a specific version of the key. This method is used
    * when decrypting data.

+ 474 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -0,0 +1,474 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.security.InvalidParameterException;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+
+import javax.crypto.KeyGenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
+import org.apache.hadoop.crypto.key.KeyProvider.Options;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * This program is the CLI utility for the KeyProvider facilities in Hadoop.
+ */
+public class KeyShell extends Configured implements Tool {
+  final static private String USAGE_PREFIX = "Usage: hadoop key " +
+  		"[generic options]\n";
+  final static private String COMMANDS =
+      "   [--help]\n" +
+      "   [" + CreateCommand.USAGE + "]\n" +
+      "   [" + RollCommand.USAGE + "]\n" +
+      "   [" + DeleteCommand.USAGE + "]\n" +
+      "   [" + ListCommand.USAGE + "]\n";
+
+  private boolean interactive = false;
+  private Command command = null;
+
+  /** allows stdout to be captured if necessary */
+  public PrintStream out = System.out;
+  /** allows stderr to be captured if necessary */
+  public PrintStream err = System.err;
+
+  private boolean userSuppliedProvider = false;
+
+  @Override
+  public int run(String[] args) throws Exception {
+    int exitCode = 0;
+    try {
+      exitCode = init(args);
+      if (exitCode != 0) {
+        return exitCode;
+      }
+      if (command.validate()) {
+          command.execute();
+      } else {
+        exitCode = -1;
+      }
+    } catch (Exception e) {
+      e.printStackTrace(err);
+      return -1;
+    }
+    return exitCode;
+  }
+
+  /**
+   * Parse the command line arguments and initialize the data
+   * <pre>
+   * % hadoop key create keyName [--size size] [--cipher algorithm]
+   *    [--provider providerPath]
+   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key list [-provider providerPath]
+   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * </pre>
+   * @param args
+   * @return
+   * @throws IOException
+   */
+  private int init(String[] args) throws IOException {
+    for (int i = 0; i < args.length; i++) { // parse command line
+      if (args[i].equals("create")) {
+        String keyName = args[++i];
+        command = new CreateCommand(keyName);
+        if (keyName.equals("--help")) {
+          printKeyShellUsage();
+          return -1;
+        }
+      } else if (args[i].equals("delete")) {
+        String keyName = args[++i];
+        command = new DeleteCommand(keyName);
+        if (keyName.equals("--help")) {
+          printKeyShellUsage();
+          return -1;
+        }
+      } else if (args[i].equals("roll")) {
+        String keyName = args[++i];
+        command = new RollCommand(keyName);
+        if (keyName.equals("--help")) {
+          printKeyShellUsage();
+          return -1;
+        }
+      } else if (args[i].equals("list")) {
+        command = new ListCommand();
+      } else if (args[i].equals("--size")) {
+        getConf().set(KeyProvider.DEFAULT_BITLENGTH_NAME, args[++i]);
+      } else if (args[i].equals("--cipher")) {
+        getConf().set(KeyProvider.DEFAULT_CIPHER_NAME, args[++i]);
+      } else if (args[i].equals("--provider")) {
+        userSuppliedProvider = true;
+        getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
+      } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) {
+        interactive = true;
+      } else if (args[i].equals("--help")) {
+        printKeyShellUsage();
+        return -1;
+      } else {
+        printKeyShellUsage();
+        ToolRunner.printGenericCommandUsage(System.err);
+        return -1;
+      }
+    }
+    return 0;
+  }
+
+  private void printKeyShellUsage() {
+    out.println(USAGE_PREFIX + COMMANDS);
+    if (command != null) {
+      out.println(command.getUsage());
+    }
+    else {
+      out.println("=========================================================" +
+      		"======");
+      out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
+      out.println("=========================================================" +
+          "======");
+      out.println(RollCommand.USAGE + ":\n\n" + RollCommand.DESC);
+      out.println("=========================================================" +
+          "======");
+      out.println(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC);
+      out.println("=========================================================" +
+          "======");
+      out.println(ListCommand.USAGE + ":\n\n" + ListCommand.DESC);
+    }
+  }
+
+  private abstract class Command {
+    protected KeyProvider provider = null;
+
+    public boolean validate() {
+      return true;
+    }
+
+    protected KeyProvider getKeyProvider() {
+      KeyProvider provider = null;
+      List<KeyProvider> providers;
+      try {
+        providers = KeyProviderFactory.getProviders(getConf());
+        if (userSuppliedProvider) {
+          provider = providers.get(0);
+        }
+        else {
+          for (KeyProvider p : providers) {
+            if (!p.isTransient()) {
+              provider = p;
+              break;
+            }
+          }
+        }
+      } catch (IOException e) {
+        e.printStackTrace(err);
+      }
+      return provider;
+    }
+
+    protected byte[] generateKey(int size, String algorithm)
+        throws NoSuchAlgorithmException {
+      out.println("Generating key using size: " + size + " and algorithm: "
+          + algorithm);
+      KeyGenerator keyGenerator = KeyGenerator.getInstance(algorithm);
+      keyGenerator.init(size);
+      byte[] key = keyGenerator.generateKey().getEncoded();
+      return key;
+    }
+
+    protected void printProviderWritten() {
+        out.println(provider.getClass().getName() + " has been updated.");
+    }
+
+    protected void warnIfTransientProvider() {
+      if (provider.isTransient()) {
+        out.println("WARNING: you are modifying a transient provider.");
+      }
+    }
+
+    public abstract void execute() throws Exception;
+
+    public abstract String getUsage();
+  }
+
+  private class ListCommand extends Command {
+    public static final String USAGE = "list <keyname> [--provider] [--help]";
+    public static final String DESC =
+        "The list subcommand displays the keynames contained within \n" +
+        "a particular provider - as configured in core-site.xml or " +
+        "indicated\nthrough the --provider argument.";
+
+    public boolean validate() {
+      boolean rc = true;
+      provider = getKeyProvider();
+      if (provider == null) {
+        out.println("There are no non-transient KeyProviders configured.\n"
+            + "Consider using the --provider option to indicate the provider\n"
+            + "to use. If you want to list a transient provider then you\n"
+            + "you MUST use the --provider argument.");
+        rc = false;
+      }
+      return rc;
+    }
+
+    public void execute() throws IOException {
+      List<String> keys;
+      try {
+        keys = provider.getKeys();
+        out.println("Listing keys for KeyProvider: " + provider.toString());
+        for (String keyName : keys) {
+          out.println(keyName);
+        }
+      } catch (IOException e) {
+        out.println("Cannot list keys for KeyProvider: " + provider.toString()
+            + ": " + e.getMessage());
+        throw e;
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
+  private class RollCommand extends Command {
+    public static final String USAGE = "roll <keyname> [--provider] [--help]";
+    public static final String DESC =
+        "The roll subcommand creates a new version of the key specified\n" +
+        "through the <keyname> argument within the provider indicated using\n" +
+        "the --provider argument";
+
+    String keyName = null;
+
+    public RollCommand(String keyName) {
+      this.keyName = keyName;
+    }
+
+    public boolean validate() {
+      boolean rc = true;
+      provider = getKeyProvider();
+      if (provider == null) {
+        out.println("There are no valid KeyProviders configured.\n"
+            + "Key will not be rolled.\n"
+            + "Consider using the --provider option to indicate the provider"
+            + " to use.");
+        rc = false;
+      }
+      if (keyName == null) {
+        out.println("There is no keyName specified. Please provide the" +
+            "mandatory <keyname>. See the usage description with --help.");
+        rc = false;
+      }
+      return rc;
+    }
+
+    public void execute() throws NoSuchAlgorithmException, IOException {
+      try {
+        Metadata md = provider.getMetadata(keyName);
+        warnIfTransientProvider();
+        out.println("Rolling key version from KeyProvider: "
+            + provider.toString() + " for key name: " + keyName);
+        try {
+          byte[] material = null;
+          material = generateKey(md.getBitLength(), md.getAlgorithm());
+          provider.rollNewVersion(keyName, material);
+          out.println(keyName + " has been successfully rolled.");
+          provider.flush();
+          printProviderWritten();
+        } catch (NoSuchAlgorithmException e) {
+          out.println("Cannot roll key: " + keyName + " within KeyProvider: "
+              + provider.toString());
+          throw e;
+        }
+      } catch (IOException e1) {
+        out.println("Cannot roll key: " + keyName + " within KeyProvider: "
+            + provider.toString());
+        throw e1;
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
+  private class DeleteCommand extends Command {
+    public static final String USAGE = "delete <keyname> [--provider] [--help]";
+    public static final String DESC =
+        "The delete subcommand deletes all of the versions of the key\n" +
+        "specified as the <keyname> argument from within the provider\n" +
+        "indicated through the --provider argument";
+
+    String keyName = null;
+    boolean cont = true;
+
+    public DeleteCommand(String keyName) {
+      this.keyName = keyName;
+    }
+
+    @Override
+    public boolean validate() {
+      provider = getKeyProvider();
+      if (provider == null) {
+        out.println("There are no valid KeyProviders configured.\n"
+            + "Nothing will be deleted.\n"
+            + "Consider using the --provider option to indicate the provider"
+            + " to use.");
+        return false;
+      }
+      if (keyName == null) {
+        out.println("There is no keyName specified. Please provide the" +
+            "mandatory <keyname>. See the usage description with --help.");
+        return false;
+      }
+      if (interactive) {
+        try {
+          cont = ToolRunner
+              .confirmPrompt("You are about to DELETE all versions of "
+                  + "the key: " + keyName + " from KeyProvider "
+                  + provider.toString() + ". Continue?:");
+          if (!cont) {
+            out.println("Nothing has been be deleted.");
+          }
+          return cont;
+        } catch (IOException e) {
+          out.println(keyName + " will not be deleted.");
+          e.printStackTrace(err);
+        }
+      }
+      return true;
+    }
+
+    public void execute() throws IOException {
+      warnIfTransientProvider();
+      out.println("Deleting key: " + keyName + " from KeyProvider: "
+          + provider.toString());
+      if (cont) {
+        try {
+          provider.deleteKey(keyName);
+          out.println(keyName + " has been successfully deleted.");
+          provider.flush();
+          printProviderWritten();
+        } catch (IOException e) {
+          out.println(keyName + "has NOT been deleted.");
+          throw e;
+        }
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
+  private class CreateCommand extends Command {
+    public static final String USAGE = "create <keyname> [--cipher] " +
+    		"[--size] [--provider] [--help]";
+    public static final String DESC =
+        "The create subcommand creates a new key for the name specified\n" +
+        "as the <keyname> argument within the provider indicated through\n" +
+        "the --provider argument. You may also indicate the specific\n" +
+        "cipher through the --cipher argument. The default for cipher is\n" +
+        "currently \"AES/CTR/NoPadding\". The default keysize is \"256\".\n" +
+        "You may also indicate the requested key length through the --size\n" +
+        "argument.";
+
+    String keyName = null;
+
+    public CreateCommand(String keyName) {
+      this.keyName = keyName;
+    }
+
+    public boolean validate() {
+      boolean rc = true;
+      provider = getKeyProvider();
+      if (provider == null) {
+        out.println("There are no valid KeyProviders configured.\nKey" +
+        		" will not be created.\n"
+            + "Consider using the --provider option to indicate the provider" +
+            " to use.");
+        rc = false;
+      }
+      if (keyName == null) {
+        out.println("There is no keyName specified. Please provide the" +
+        		"mandatory <keyname>. See the usage description with --help.");
+        rc = false;
+      }
+      return rc;
+    }
+
+    public void execute() throws IOException, NoSuchAlgorithmException {
+      warnIfTransientProvider();
+      try {
+        Options options = KeyProvider.options(getConf());
+        String alg = getAlgorithm(options.getCipher());
+        byte[] material = generateKey(options.getBitLength(), alg);
+        provider.createKey(keyName, material, options);
+        out.println(keyName + " has been successfully created.");
+        provider.flush();
+        printProviderWritten();
+      } catch (InvalidParameterException e) {
+        out.println(keyName + " has NOT been created. " + e.getMessage());
+        throw e;
+      } catch (IOException e) {
+        out.println(keyName + " has NOT been created. " + e.getMessage());
+        throw e;
+      } catch (NoSuchAlgorithmException e) {
+        out.println(keyName + " has NOT been created. " + e.getMessage());
+        throw e;
+      }
+    }
+
+    /**
+     * Get the algorithm from the cipher.
+     * @return the algorithm name
+     */
+    public String getAlgorithm(String cipher) {
+      int slash = cipher.indexOf('/');
+      if (slash == - 1) {
+        return cipher;
+      } else {
+        return cipher.substring(0, slash);
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
+  /**
+   * Main program.
+   *
+   * @param args
+   *          Command line arguments
+   * @throws Exception
+   */
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new Configuration(), new KeyShell(), args);
+    System.exit(res);
+  }
+}

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java

@@ -49,6 +49,11 @@ public class UserProvider extends KeyProvider {
     credentials = user.getCredentials();
   }
 
+  @Override
+  public boolean isTransient() {
+    return true;
+  }
+
   @Override
   public KeyVersion getKeyVersion(String versionName) {
     byte[] bytes = credentials.getSecretKey(new Text(versionName));

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -2021,6 +2021,10 @@ public abstract class Server {
           if (LOG.isDebugEnabled()) {
             LOG.debug(Thread.currentThread().getName() + ": " + call + " for RpcKind " + call.rpcKind);
           }
+          if (!call.connection.channel.isOpen()) {
+            LOG.info(Thread.currentThread().getName() + ": skipped " + call);
+            continue;
+          }
           String errorClass = null;
           String error = null;
           RpcStatusProto returnStatus = RpcStatusProto.SUCCESS;

+ 176 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key;
+
+import static org.junit.Assert.*;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.PrintStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestKeyShell {
+  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
+  private static final File tmpDir =
+      new File(System.getProperty("test.build.data", "/tmp"), "key");
+  
+  @Before
+  public void setup() throws Exception {
+    System.setOut(new PrintStream(outContent));
+    System.setErr(new PrintStream(errContent));
+  }
+  
+  @Test
+  public void testKeySuccessfulKeyLifecycle() throws Exception {
+    outContent.flush();
+    String[] args1 = {"create", "key1", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+    		"created."));
+
+    outContent.flush();
+    String[] args2 = {"list", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args2);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1"));
+
+    outContent.flush();
+    String[] args3 = {"roll", "key1", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args3);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+    		"rolled."));
+
+    outContent.flush();
+    String[] args4 = {"delete", "key1", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args4);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+    		"deleted."));
+
+    outContent.flush();
+    String[] args5 = {"list", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args5);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1"));
+  }
+  
+  @Test
+  public void testInvalidKeySize() throws Exception {
+    String[] args1 = {"create", "key1", "--size", "56", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(-1, rc);
+    assertTrue(outContent.toString().contains("key1 has NOT been created."));
+  }
+
+  @Test
+  public void testInvalidCipher() throws Exception {
+    String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(-1, rc);
+    assertTrue(outContent.toString().contains("key1 has NOT been created."));
+  }
+
+  @Test
+  public void testInvalidProvider() throws Exception {
+    String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", 
+      "sdff://file/tmp/keystore.jceks"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(-1, rc);
+    assertTrue(outContent.toString().contains("There are no valid " +
+    		"KeyProviders configured."));
+  }
+
+  @Test
+  public void testTransientProviderWarning() throws Exception {
+    String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", 
+      "user:///"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
+    		"transient provider."));
+  }
+  
+  @Test
+  public void testTransientProviderOnlyConfig() throws Exception {
+    String[] args1 = {"create", "key1"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    Configuration config = new Configuration();
+    config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
+    ks.setConf(config);
+    rc = ks.run(args1);
+    assertEquals(-1, rc);
+    assertTrue(outContent.toString().contains("There are no valid " +
+    		"KeyProviders configured."));
+  }
+
+  @Test
+  public void testFullCipher() throws Exception {
+    String[] args1 = {"create", "key1", "--cipher", "AES/CBC/pkcs5Padding", 
+        "--provider", "jceks://file" + tmpDir + "/keystore.jceks"};
+    
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+    		"created."));
+
+    outContent.flush();
+    String[] args2 = {"delete", "key1", "--provider", 
+        "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args2);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+    		"deleted."));
+  }
+}

+ 10 - 2
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -478,6 +478,16 @@ Trunk (Unreleased)
 
     HDFS-5726. Fix compilation error in AbstractINodeDiff for JDK7. (jing9)
 
+    HDFS-5768. Consolidate the serialization code in DelegationTokenSecretManager 
+    (Haohui Mai via brandonli)
+
+    HDFS-5775. Consolidate the code for serialization in CacheManager
+    (Haohui Mai via brandonli)
+
+    HDFS-5704. Change OP_UPDATE_BLOCKS with a new OP_ADD_BLOCK. (jing9)
+
+    HDFS-5777. Update LayoutVersion for the new editlog op OP_ADD_BLOCK. (jing9)
+
 Release 2.4.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -934,8 +944,6 @@ Release 2.3.0 - UNRELEASED
     HDFS-5677. Need error checking for HA cluster configuration.
     (Vincent Sheffer via cos)
 
-    HDFS-5704. Change OP_UPDATE_BLOCKS with a new OP_ADD_BLOCK. (jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java

@@ -107,11 +107,12 @@ public class LayoutVersion {
         "block IDs in the edits log and image files"),
     EDITLOG_SUPPORT_RETRYCACHE(-47, "Record ClientId and CallId in editlog to " 
         + "enable rebuilding retry cache in case of HA failover"),
-    CACHING(-48, "Support for cache pools and path-based caching"),
-    ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
+    EDITLOG_ADD_BLOCK(-48, "Add new editlog that only records allocation of "
+        + "the new block instead of the entire block list"),
+    CACHING(-49, "Support for cache pools and path-based caching"),
+    ADD_DATANODE_AND_STORAGE_UUIDS(-50, "Replace StorageID with DatanodeUuid."
         + " Use distinct StorageUuid per storage directory.");
 
-    
     final int lv;
     final int ancestorLV;
     final String description;

+ 107 - 96
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java

@@ -59,6 +59,7 @@ public class DelegationTokenSecretManager
       .getLog(DelegationTokenSecretManager.class);
   
   private final FSNamesystem namesystem;
+  private final SerializerCompat serializerCompat = new SerializerCompat();
 
   public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
       long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
@@ -150,24 +151,21 @@ public class DelegationTokenSecretManager
       throw new IOException("No delegation token found for this identifier");
     }
   }
-  
+
   /**
    * Load SecretManager state from fsimage.
    * 
    * @param in input stream to read fsimage
    * @throws IOException
    */
-  public synchronized void loadSecretManagerState(DataInput in)
+  public synchronized void loadSecretManagerStateCompat(DataInput in)
       throws IOException {
     if (running) {
       // a safety check
       throw new IOException(
           "Can't load state from image in a running SecretManager.");
     }
-    currentId = in.readInt();
-    loadAllKeys(in);
-    delegationTokenSequenceNumber = in.readInt();
-    loadCurrentTokens(in);
+    serializerCompat.load(in);
   }
   
   /**
@@ -177,12 +175,9 @@ public class DelegationTokenSecretManager
    * @param sdPath String storage directory path
    * @throws IOException
    */
-  public synchronized void saveSecretManagerState(DataOutputStream out,
+  public synchronized void saveSecretManagerStateCompat(DataOutputStream out,
       String sdPath) throws IOException {
-    out.writeInt(currentId);
-    saveAllKeys(out, sdPath);
-    out.writeInt(delegationTokenSequenceNumber);
-    saveCurrentTokens(out, sdPath);
+    serializerCompat.save(out, sdPath);
   }
   
   /**
@@ -282,91 +277,6 @@ public class DelegationTokenSecretManager
     return allKeys.size();
   }
 
-  /**
-   * Private helper methods to save delegation keys and tokens in fsimage
-   */
-  private synchronized void saveCurrentTokens(DataOutputStream out,
-      String sdPath) throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
-    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
-    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
-    out.writeInt(currentTokens.size());
-    Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
-        .iterator();
-    while (iter.hasNext()) {
-      DelegationTokenIdentifier id = iter.next();
-      id.write(out);
-      DelegationTokenInformation info = currentTokens.get(id);
-      out.writeLong(info.getRenewDate());
-      counter.increment();
-    }
-    prog.endStep(Phase.SAVING_CHECKPOINT, step);
-  }
-  
-  /*
-   * Save the current state of allKeys
-   */
-  private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
-      throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
-    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
-    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
-    out.writeInt(allKeys.size());
-    Iterator<Integer> iter = allKeys.keySet().iterator();
-    while (iter.hasNext()) {
-      Integer key = iter.next();
-      allKeys.get(key).write(out);
-      counter.increment();
-    }
-    prog.endStep(Phase.SAVING_CHECKPOINT, step);
-  }
-  
-  /**
-   * Private helper methods to load Delegation tokens from fsimage
-   */
-  private synchronized void loadCurrentTokens(DataInput in)
-      throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.DELEGATION_TOKENS);
-    prog.beginStep(Phase.LOADING_FSIMAGE, step);
-    int numberOfTokens = in.readInt();
-    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
-    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
-    for (int i = 0; i < numberOfTokens; i++) {
-      DelegationTokenIdentifier id = new DelegationTokenIdentifier();
-      id.readFields(in);
-      long expiryTime = in.readLong();
-      addPersistedDelegationToken(id, expiryTime);
-      counter.increment();
-    }
-    prog.endStep(Phase.LOADING_FSIMAGE, step);
-  }
-
-  /**
-   * Private helper method to load delegation keys from fsimage.
-   * @param in
-   * @throws IOException
-   */
-  private synchronized void loadAllKeys(DataInput in) throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.DELEGATION_KEYS);
-    prog.beginStep(Phase.LOADING_FSIMAGE, step);
-    int numberOfKeys = in.readInt();
-    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
-    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
-    for (int i = 0; i < numberOfKeys; i++) {
-      DelegationKey value = new DelegationKey();
-      value.readFields(in);
-      addKey(value);
-      counter.increment();
-    }
-    prog.endStep(Phase.LOADING_FSIMAGE, step);
-  }
-
   /**
    * Call namesystem to update editlogs for new master key.
    */
@@ -420,4 +330,105 @@ public class DelegationTokenSecretManager
     c.addToken(new Text(ugi.getShortUserName()), token);
     return c;
   }
+
+  private final class SerializerCompat {
+    private void load(DataInput in) throws IOException {
+      currentId = in.readInt();
+      loadAllKeys(in);
+      delegationTokenSequenceNumber = in.readInt();
+      loadCurrentTokens(in);
+    }
+
+    private void save(DataOutputStream out, String sdPath) throws IOException {
+      out.writeInt(currentId);
+      saveAllKeys(out, sdPath);
+      out.writeInt(delegationTokenSequenceNumber);
+      saveCurrentTokens(out, sdPath);
+    }
+
+    /**
+     * Private helper methods to save delegation keys and tokens in fsimage
+     */
+    private synchronized void saveCurrentTokens(DataOutputStream out,
+        String sdPath) throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
+      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
+      out.writeInt(currentTokens.size());
+      Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
+          .iterator();
+      while (iter.hasNext()) {
+        DelegationTokenIdentifier id = iter.next();
+        id.write(out);
+        DelegationTokenInformation info = currentTokens.get(id);
+        out.writeLong(info.getRenewDate());
+        counter.increment();
+      }
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
+    }
+
+    /*
+     * Save the current state of allKeys
+     */
+    private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
+        throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
+      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
+      out.writeInt(allKeys.size());
+      Iterator<Integer> iter = allKeys.keySet().iterator();
+      while (iter.hasNext()) {
+        Integer key = iter.next();
+        allKeys.get(key).write(out);
+        counter.increment();
+      }
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
+    }
+
+    /**
+     * Private helper methods to load Delegation tokens from fsimage
+     */
+    private synchronized void loadCurrentTokens(DataInput in)
+        throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.DELEGATION_TOKENS);
+      prog.beginStep(Phase.LOADING_FSIMAGE, step);
+      int numberOfTokens = in.readInt();
+      prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
+      for (int i = 0; i < numberOfTokens; i++) {
+        DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+        id.readFields(in);
+        long expiryTime = in.readLong();
+        addPersistedDelegationToken(id, expiryTime);
+        counter.increment();
+      }
+      prog.endStep(Phase.LOADING_FSIMAGE, step);
+    }
+
+    /**
+     * Private helper method to load delegation keys from fsimage.
+     * @param in
+     * @throws IOException
+     */
+    private synchronized void loadAllKeys(DataInput in) throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.DELEGATION_KEYS);
+      prog.beginStep(Phase.LOADING_FSIMAGE, step);
+      int numberOfKeys = in.readInt();
+      prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
+      for (int i = 0; i < numberOfKeys; i++) {
+        DelegationKey value = new DelegationKey();
+        value.readFields(in);
+        addKey(value);
+        counter.increment();
+      }
+      prog.endStep(Phase.LOADING_FSIMAGE, step);
+    }
+  }
 }

+ 103 - 91
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -160,6 +160,8 @@ public final class CacheManager {
    */
   private final ReentrantLock crmLock = new ReentrantLock();
 
+  private final SerializerCompat serializerCompat = new SerializerCompat();
+
   /**
    * The CacheReplicationMonitor.
    */
@@ -926,11 +928,9 @@ public final class CacheManager {
    * @param sdPath path of the storage directory
    * @throws IOException
    */
-  public void saveState(DataOutputStream out, String sdPath)
+  public void saveStateCompat(DataOutputStream out, String sdPath)
       throws IOException {
-    out.writeLong(nextDirectiveId);
-    savePools(out, sdPath);
-    saveDirectives(out, sdPath);
+    serializerCompat.save(out, sdPath);
   }
 
   /**
@@ -939,105 +939,117 @@ public final class CacheManager {
    * @param in DataInput from which to restore state
    * @throws IOException
    */
-  public void loadState(DataInput in) throws IOException {
-    nextDirectiveId = in.readLong();
-    // pools need to be loaded first since directives point to their parent pool
-    loadPools(in);
-    loadDirectives(in);
+  public void loadStateCompat(DataInput in) throws IOException {
+    serializerCompat.load(in);
   }
 
-  /**
-   * Save cache pools to fsimage
-   */
-  private void savePools(DataOutputStream out,
-      String sdPath) throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.CACHE_POOLS, sdPath);
-    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-    prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
-    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
-    out.writeInt(cachePools.size());
-    for (CachePool pool: cachePools.values()) {
-      FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
-      counter.increment();
+  private final class SerializerCompat {
+    private void save(DataOutputStream out, String sdPath) throws IOException {
+      out.writeLong(nextDirectiveId);
+      savePools(out, sdPath);
+      saveDirectives(out, sdPath);
     }
-    prog.endStep(Phase.SAVING_CHECKPOINT, step);
-  }
 
-  /*
-   * Save cache entries to fsimage
-   */
-  private void saveDirectives(DataOutputStream out, String sdPath)
-      throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
-    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-    prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
-    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
-    out.writeInt(directivesById.size());
-    for (CacheDirective directive : directivesById.values()) {
-      FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
-      counter.increment();
+    private void load(DataInput in) throws IOException {
+      nextDirectiveId = in.readLong();
+      // pools need to be loaded first since directives point to their parent pool
+      loadPools(in);
+      loadDirectives(in);
     }
-    prog.endStep(Phase.SAVING_CHECKPOINT, step);
-  }
 
-  /**
-   * Load cache pools from fsimage
-   */
-  private void loadPools(DataInput in)
-      throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.CACHE_POOLS);
-    prog.beginStep(Phase.LOADING_FSIMAGE, step);
-    int numberOfPools = in.readInt();
-    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
-    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
-    for (int i = 0; i < numberOfPools; i++) {
-      addCachePool(FSImageSerialization.readCachePoolInfo(in));
-      counter.increment();
+    /**
+     * Save cache pools to fsimage
+     */
+    private void savePools(DataOutputStream out,
+        String sdPath) throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.CACHE_POOLS, sdPath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
+      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
+      out.writeInt(cachePools.size());
+      for (CachePool pool: cachePools.values()) {
+        FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
+        counter.increment();
+      }
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
     }
-    prog.endStep(Phase.LOADING_FSIMAGE, step);
-  }
 
-  /**
-   * Load cache directives from the fsimage
-   */
-  private void loadDirectives(DataInput in) throws IOException {
-    StartupProgress prog = NameNode.getStartupProgress();
-    Step step = new Step(StepType.CACHE_ENTRIES);
-    prog.beginStep(Phase.LOADING_FSIMAGE, step);
-    int numDirectives = in.readInt();
-    prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
-    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
-    for (int i = 0; i < numDirectives; i++) {
-      CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
-      // Get pool reference by looking it up in the map
-      final String poolName = info.getPool();
-      CachePool pool = cachePools.get(poolName);
-      if (pool == null) {
-        throw new IOException("Directive refers to pool " + poolName +
-            ", which does not exist.");
+    /*
+     * Save cache entries to fsimage
+     */
+    private void saveDirectives(DataOutputStream out, String sdPath)
+        throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
+      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
+      out.writeInt(directivesById.size());
+      for (CacheDirective directive : directivesById.values()) {
+        FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
+        counter.increment();
       }
-      CacheDirective directive =
-          new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
-              info.getReplication(), info.getExpiration().getAbsoluteMillis());
-      boolean addedDirective = pool.getDirectiveList().add(directive);
-      assert addedDirective;
-      if (directivesById.put(directive.getId(), directive) != null) {
-        throw new IOException("A directive with ID " + directive.getId() +
-            " already exists");
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
+    }
+
+    /**
+     * Load cache pools from fsimage
+     */
+    private void loadPools(DataInput in)
+        throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.CACHE_POOLS);
+      prog.beginStep(Phase.LOADING_FSIMAGE, step);
+      int numberOfPools = in.readInt();
+      prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
+      for (int i = 0; i < numberOfPools; i++) {
+        addCachePool(FSImageSerialization.readCachePoolInfo(in));
+        counter.increment();
       }
-      List<CacheDirective> directives =
-          directivesByPath.get(directive.getPath());
-      if (directives == null) {
-        directives = new LinkedList<CacheDirective>();
-        directivesByPath.put(directive.getPath(), directives);
+      prog.endStep(Phase.LOADING_FSIMAGE, step);
+    }
+
+    /**
+     * Load cache directives from the fsimage
+     */
+    private void loadDirectives(DataInput in) throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
+      Step step = new Step(StepType.CACHE_ENTRIES);
+      prog.beginStep(Phase.LOADING_FSIMAGE, step);
+      int numDirectives = in.readInt();
+      prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
+      for (int i = 0; i < numDirectives; i++) {
+        CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
+        // Get pool reference by looking it up in the map
+        final String poolName = info.getPool();
+        CachePool pool = cachePools.get(poolName);
+        if (pool == null) {
+          throw new IOException("Directive refers to pool " + poolName +
+              ", which does not exist.");
+        }
+        CacheDirective directive =
+            new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
+                info.getReplication(), info.getExpiration().getAbsoluteMillis());
+        boolean addedDirective = pool.getDirectiveList().add(directive);
+        assert addedDirective;
+        if (directivesById.put(directive.getId(), directive) != null) {
+          throw new IOException("A directive with ID " + directive.getId() +
+              " already exists");
+        }
+        List<CacheDirective> directives =
+            directivesByPath.get(directive.getPath());
+        if (directives == null) {
+          directives = new LinkedList<CacheDirective>();
+          directivesByPath.put(directive.getPath(), directives);
+        }
+        directives.add(directive);
+        counter.increment();
       }
-      directives.add(directive);
-      counter.increment();
+      prog.endStep(Phase.LOADING_FSIMAGE, step);
     }
-    prog.endStep(Phase.LOADING_FSIMAGE, step);
   }
 
   public void waitForRescanIfNeeded() {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java

@@ -60,13 +60,13 @@ public enum FSEditLogOpCodes {
   OP_DISALLOW_SNAPSHOT          ((byte) 30),
   OP_SET_GENSTAMP_V2            ((byte) 31),
   OP_ALLOCATE_BLOCK_ID          ((byte) 32),
-  OP_ADD_CACHE_DIRECTIVE       ((byte) 33),
-  OP_REMOVE_CACHE_DIRECTIVE    ((byte) 34),
-  OP_ADD_CACHE_POOL                       ((byte) 35),
-  OP_MODIFY_CACHE_POOL                    ((byte) 36),
-  OP_REMOVE_CACHE_POOL                    ((byte) 37),
-  OP_MODIFY_CACHE_DIRECTIVE     ((byte) 38),
-  OP_ADD_BLOCK                  ((byte) 39),
+  OP_ADD_BLOCK                  ((byte) 33),
+  OP_ADD_CACHE_DIRECTIVE       ((byte) 34),
+  OP_REMOVE_CACHE_DIRECTIVE    ((byte) 35),
+  OP_ADD_CACHE_POOL                       ((byte) 36),
+  OP_MODIFY_CACHE_POOL                    ((byte) 37),
+  OP_REMOVE_CACHE_POOL                    ((byte) 38),
+  OP_MODIFY_CACHE_DIRECTIVE     ((byte) 39),
 
   // Note that fromByte(..) depends on OP_INVALID being at the last position.  
   OP_INVALID                    ((byte) -1);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -870,7 +870,7 @@ public class FSImageFormat {
         //This must not happen if security is turned on.
         return; 
       }
-      namesystem.loadSecretManagerState(in);
+      namesystem.loadSecretManagerStateCompat(in);
     }
 
     private void loadCacheManagerState(DataInput in) throws IOException {
@@ -878,7 +878,7 @@ public class FSImageFormat {
       if (!LayoutVersion.supports(Feature.CACHING, imgVersion)) {
         return;
       }
-      namesystem.getCacheManager().loadState(in);
+      namesystem.getCacheManager().loadStateCompat(in);
     }
 
     private int getLayoutVersion() {
@@ -1032,9 +1032,9 @@ public class FSImageFormat {
         sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);
         
         context.checkCancelled();
-        sourceNamesystem.saveSecretManagerState(out, sdPath);
+        sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
         context.checkCancelled();
-        sourceNamesystem.getCacheManager().saveState(out, sdPath);
+        sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
         context.checkCancelled();
         out.flush();
         context.checkCancelled();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6250,16 +6250,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * @param out save state of the secret manager
    * @param sdPath String storage directory path
    */
-  void saveSecretManagerState(DataOutputStream out, String sdPath)
+  void saveSecretManagerStateCompat(DataOutputStream out, String sdPath)
       throws IOException {
-    dtSecretManager.saveSecretManagerState(out, sdPath);
+    dtSecretManager.saveSecretManagerStateCompat(out, sdPath);
   }
 
   /**
    * @param in load the state of secret manager from input stream
    */
-  void loadSecretManagerState(DataInput in) throws IOException {
-    dtSecretManager.loadSecretManagerState(in);
+  void loadSecretManagerStateCompat(DataInput in) throws IOException {
+    dtSecretManager.loadSecretManagerStateCompat(in);
   }
 
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java

@@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader {
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
       -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-      -40, -41, -42, -43, -44, -45, -46, -47, -48, -49 };
+      -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 };
   private int imageVersion = 0;
   
   private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();

二進制
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored


+ 84 - 84
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-49</EDITS_VERSION>
+  <EDITS_VERSION>-50</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1389736494300</EXPIRY_DATE>
-        <KEY>d1a0861e6b9e394e</KEY>
+        <EXPIRY_DATE>1390519460949</EXPIRY_DATE>
+        <KEY>dc8d30edc97df67d</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1389736494302</EXPIRY_DATE>
-        <KEY>8239b8f0ed7e6ce6</KEY>
+        <EXPIRY_DATE>1390519460952</EXPIRY_DATE>
+        <KEY>096bc20b6debed03</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,18 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295288</MTIME>
-      <ATIME>1389045295288</ATIME>
+      <MTIME>1389828264873</MTIME>
+      <ATIME>1389828264873</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>7</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>9</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -59,8 +59,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295307</MTIME>
-      <ATIME>1389045295288</ATIME>
+      <MTIME>1389828265699</MTIME>
+      <ATIME>1389828264873</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -78,9 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1389045295311</TIMESTAMP>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <TIMESTAMP>1389828265705</TIMESTAMP>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -89,9 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1389045295318</TIMESTAMP>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>10</RPC_CALLID>
+      <TIMESTAMP>1389828265712</TIMESTAMP>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>12</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -101,7 +101,7 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1389045295326</TIMESTAMP>
+      <TIMESTAMP>1389828265722</TIMESTAMP>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
@@ -136,8 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>15</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,8 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>16</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,8 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>17</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -169,18 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295354</MTIME>
-      <ATIME>1389045295354</ATIME>
+      <MTIME>1389828265757</MTIME>
+      <ATIME>1389828265757</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>18</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>20</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -191,8 +191,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295357</MTIME>
-      <ATIME>1389045295354</ATIME>
+      <MTIME>1389828265759</MTIME>
+      <ATIME>1389828265757</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -253,10 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1389045295378</TIMESTAMP>
+      <TIMESTAMP>1389828265782</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>25</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>27</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -267,18 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295383</MTIME>
-      <ATIME>1389045295383</ATIME>
+      <MTIME>1389828265787</MTIME>
+      <ATIME>1389828265787</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>27</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>29</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -383,8 +383,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295484</MTIME>
-      <ATIME>1389045295383</ATIME>
+      <MTIME>1389828266540</MTIME>
+      <ATIME>1389828265787</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -418,18 +418,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295486</MTIME>
-      <ATIME>1389045295486</ATIME>
+      <MTIME>1389828266544</MTIME>
+      <ATIME>1389828266544</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>40</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -534,8 +534,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295507</MTIME>
-      <ATIME>1389045295486</ATIME>
+      <MTIME>1389828266569</MTIME>
+      <ATIME>1389828266544</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -569,18 +569,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295509</MTIME>
-      <ATIME>1389045295509</ATIME>
+      <MTIME>1389828266572</MTIME>
+      <ATIME>1389828266572</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>52</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -685,8 +685,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295532</MTIME>
-      <ATIME>1389045295509</ATIME>
+      <MTIME>1389828266599</MTIME>
+      <ATIME>1389828266572</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -718,13 +718,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1389045295535</TIMESTAMP>
+      <TIMESTAMP>1389828266603</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -735,15 +735,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1389045295540</MTIME>
-      <ATIME>1389045295540</ATIME>
+      <MTIME>1389828266633</MTIME>
+      <ATIME>1389828266633</ATIME>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>64</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>66</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -754,18 +754,18 @@
       <INODEID>16393</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045295543</MTIME>
-      <ATIME>1389045295543</ATIME>
+      <MTIME>1389828266637</MTIME>
+      <ATIME>1389828266637</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-216163116_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
         <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>65</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>67</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -821,7 +821,7 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-216163116_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_16108824_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -834,8 +834,8 @@
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1389045298180</MTIME>
-      <ATIME>1389045295543</ATIME>
+      <MTIME>1389828269751</MTIME>
+      <ATIME>1389828266637</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -861,8 +861,8 @@
       <MODE>493</MODE>
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>72</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>74</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -871,8 +871,8 @@
       <TXID>67</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>73</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>75</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -883,9 +883,9 @@
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844398258992525</EXPIRATION>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>74</RPC_CALLID>
+      <EXPIRATION>2305844399041964876</EXPIRATION>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -894,8 +894,8 @@
       <TXID>69</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>75</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>77</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -903,8 +903,8 @@
     <DATA>
       <TXID>70</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>76</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>78</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -912,8 +912,8 @@
     <DATA>
       <TXID>71</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>48c96601-9238-4d1f-b78b-ef0f1e922ba2</RPC_CLIENTID>
-      <RPC_CALLID>77</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>79</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -272,6 +272,9 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5689. MRAppMaster does not preempt reducers when scheduled maps 
     cannot be fulfilled. (lohit via kasha)
 
+    MAPREDUCE-5724. JobHistoryServer does not start if HDFS is not running. 
+    (tucu)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java

@@ -77,6 +77,13 @@ public class JHAdminConfig {
   public static final String MR_HISTORY_DONE_DIR =
     MR_HISTORY_PREFIX + "done-dir";
 
+  /**
+   * Maximum time the History server will wait for the FileSystem for History
+   * files to become available. Default value is -1, forever.
+   */
+  public static final String MR_HISTORY_MAX_START_WAIT_TIME =
+      MR_HISTORY_PREFIX + "maximum-start-wait-time-millis";
+  public static final long DEFAULT_MR_HISTORY_MAX_START_WAIT_TIME = -1;
   /**
    *  Path where history files should be stored after a job finished and before
    *  they are pulled into the job history server.

+ 10 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml

@@ -33,6 +33,10 @@
   </properties>
 
   <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-common</artifactId>
@@ -53,6 +57,12 @@
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>

+ 110 - 31
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.hs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.ConnectException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -69,6 +70,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * This class provides a way to interact with history files in a thread safe
@@ -464,7 +467,8 @@ public class HistoryFileManager extends AbstractService {
 
   private JobACLsManager aclsMgr;
 
-  private Configuration conf;
+  @VisibleForTesting
+  Configuration conf;
 
   private String serialNumberFormat;
 
@@ -491,36 +495,10 @@ public class HistoryFileManager extends AbstractService {
         + (JobHistoryUtils.SERIAL_NUMBER_DIRECTORY_DIGITS + serialNumberLowDigits)
         + "d");
 
-    String doneDirPrefix = null;
-    doneDirPrefix = JobHistoryUtils
-        .getConfiguredHistoryServerDoneDirPrefix(conf);
-    try {
-      doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(
-          new Path(doneDirPrefix));
-      doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
-      doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
-      mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
-          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
-    } catch (IOException e) {
-      throw new YarnRuntimeException("Error creating done directory: ["
-          + doneDirPrefixPath + "]", e);
-    }
-
-    String intermediateDoneDirPrefix = null;
-    intermediateDoneDirPrefix = JobHistoryUtils
-        .getConfiguredHistoryIntermediateDoneDirPrefix(conf);
-    try {
-      intermediateDoneDirPath = FileContext.getFileContext(conf).makeQualified(
-          new Path(intermediateDoneDirPrefix));
-      intermediateDoneDirFc = FileContext.getFileContext(
-          intermediateDoneDirPath.toUri(), conf);
-      mkdir(intermediateDoneDirFc, intermediateDoneDirPath, new FsPermission(
-          JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
-    } catch (IOException e) {
-      LOG.info("error creating done directory on dfs " + e);
-      throw new YarnRuntimeException("Error creating intermediate done directory: ["
-          + intermediateDoneDirPath + "]", e);
-    }
+    long maxFSWaitTime = conf.getLong(
+        JHAdminConfig.MR_HISTORY_MAX_START_WAIT_TIME,
+        JHAdminConfig.DEFAULT_MR_HISTORY_MAX_START_WAIT_TIME);
+    createHistoryDirs(new SystemClock(), 10 * 1000, maxFSWaitTime);
 
     this.aclsMgr = new JobACLsManager(conf);
 
@@ -544,6 +522,107 @@ public class HistoryFileManager extends AbstractService {
     super.serviceInit(conf);
   }
 
+  @VisibleForTesting
+  void createHistoryDirs(Clock clock, long intervalCheckMillis,
+      long timeOutMillis) throws IOException {
+    long start = clock.getTime();
+    boolean done = false;
+    int counter = 0;
+    while (!done &&
+        ((timeOutMillis == -1) || (clock.getTime() - start < timeOutMillis))) {
+      done = tryCreatingHistoryDirs(counter++ % 3 == 0); // log every 3 attempts, 30sec
+      try {
+        Thread.sleep(intervalCheckMillis);
+      } catch (InterruptedException ex) {
+        throw new YarnRuntimeException(ex);
+      }
+    }
+    if (!done) {
+      throw new YarnRuntimeException("Timed out '" + timeOutMillis+
+              "ms' waiting for FileSystem to become available");
+    }
+  }
+
+  /**
+   * DistributedFileSystem returns a RemoteException with a message stating
+   * SafeModeException in it. So this is only way to check it is because of
+   * being in safe mode.
+   */
+  private boolean isBecauseSafeMode(Throwable ex) {
+    return ex.toString().contains("SafeModeException");
+  }
+
+  /**
+   * Returns TRUE if the history dirs were created, FALSE if they could not
+   * be created because the FileSystem is not reachable or in safe mode and
+   * throws and exception otherwise.
+   */
+  @VisibleForTesting
+  boolean tryCreatingHistoryDirs(boolean logWait) throws IOException {
+    boolean succeeded = true;
+    String doneDirPrefix = JobHistoryUtils.
+        getConfiguredHistoryServerDoneDirPrefix(conf);
+    try {
+      doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(
+          new Path(doneDirPrefix));
+      doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
+      doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
+      mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
+          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
+    } catch (ConnectException ex) {
+      if (logWait) {
+        LOG.info("Waiting for FileSystem at " +
+            doneDirPrefixPath.toUri().getAuthority()  + "to be available");
+      }
+      succeeded = false;
+    } catch (IOException e) {
+      if (isBecauseSafeMode(e)) {
+        succeeded = false;
+        if (logWait) {
+          LOG.info("Waiting for FileSystem at " +
+              doneDirPrefixPath.toUri().getAuthority() +
+              "to be out of safe mode");
+        }
+      } else {
+        throw new YarnRuntimeException("Error creating done directory: ["
+            + doneDirPrefixPath + "]", e);
+      }
+    }
+    if (succeeded) {
+      String intermediateDoneDirPrefix = JobHistoryUtils.
+          getConfiguredHistoryIntermediateDoneDirPrefix(conf);
+      try {
+        intermediateDoneDirPath = FileContext.getFileContext(conf).makeQualified(
+            new Path(intermediateDoneDirPrefix));
+        intermediateDoneDirFc = FileContext.getFileContext(
+            intermediateDoneDirPath.toUri(), conf);
+        mkdir(intermediateDoneDirFc, intermediateDoneDirPath, new FsPermission(
+            JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
+      } catch (ConnectException ex) {
+        succeeded = false;
+        if (logWait) {
+          LOG.info("Waiting for FileSystem at " +
+              intermediateDoneDirPath.toUri().getAuthority() +
+              "to be available");
+        }
+      } catch (IOException e) {
+        if (isBecauseSafeMode(e)) {
+          succeeded = false;
+          if (logWait) {
+            LOG.info("Waiting for FileSystem at " +
+                intermediateDoneDirPath.toUri().getAuthority() +
+                "to be out of safe mode");
+          }
+        } else {
+          throw new YarnRuntimeException(
+              "Error creating intermediate done directory: ["
+              + intermediateDoneDirPath + "]", e);
+        }
+      }
+    }
+    return succeeded;
+  }
+
   @Override
   public void serviceStop() throws Exception {
     ShutdownThreadsHelper.shutdownExecutorService(moveToDoneExecutor);

+ 139 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java

@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.mapreduce.v2.app.ControlledClock;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.UUID;
+
+public class TestHistoryFileManager {
+  private static MiniDFSCluster dfsCluster = null;
+
+  @BeforeClass
+  public static void setUpClass() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    dfsCluster = new MiniDFSCluster.Builder(conf).build();
+  }
+
+  @AfterClass
+  public static void cleanUpClass() throws Exception {
+    dfsCluster.shutdown();
+  }
+
+  private void testTryCreateHistoryDirs(Configuration conf, boolean expected)
+      throws Exception {
+    conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, "/" + UUID.randomUUID());
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, "/" + UUID.randomUUID());
+    HistoryFileManager hfm = new HistoryFileManager();
+    hfm.conf = conf;
+    Assert.assertEquals(expected, hfm.tryCreatingHistoryDirs(false));
+  }
+
+  @Test
+  public void testCreateDirsWithoutFileSystem() throws Exception {
+    Configuration conf = new YarnConfiguration();
+    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:1");
+    testTryCreateHistoryDirs(conf, false);
+  }
+
+  @Test
+  public void testCreateDirsWithFileSystem() throws Exception {
+    dfsCluster.getFileSystem().setSafeMode(
+        HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
+    testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
+  }
+
+  @Test
+  public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
+    dfsCluster.getFileSystem().setSafeMode(
+        HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
+    testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), false);
+  }
+
+  private void testCreateHistoryDirs(Configuration conf, Clock clock)
+      throws Exception {
+    conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, "/" + UUID.randomUUID());
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, "/" + UUID.randomUUID());
+    HistoryFileManager hfm = new HistoryFileManager();
+    hfm.conf = conf;
+    hfm.createHistoryDirs(clock, 500, 2000);
+  }
+
+  @Test
+  public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout()
+      throws Exception {
+    dfsCluster.getFileSystem().setSafeMode(
+        HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(500);
+          dfsCluster.getFileSystem().setSafeMode(
+              HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+          Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
+        } catch (Exception ex) {
+          Assert.fail(ex.toString());
+        }
+      }
+    }.start();
+    testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock());
+  }
+
+  @Test(expected = YarnRuntimeException.class)
+  public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout()
+      throws Exception {
+    dfsCluster.getFileSystem().setSafeMode(
+        HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
+    final ControlledClock clock = new ControlledClock(new SystemClock());
+    clock.setTime(1);
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(500);
+          clock.setTime(3000);
+        } catch (Exception ex) {
+          Assert.fail(ex.toString());
+        }
+      }
+    }.start();
+    testCreateHistoryDirs(dfsCluster.getConfiguration(0), clock);
+  }
+
+}

+ 3 - 0
hadoop-yarn-project/CHANGES.txt

@@ -424,6 +424,9 @@ Release 2.4.0 - UNRELEASED
 
     YARN-1601. 3rd party JARs are missing from hadoop-dist output. (tucu)
 
+    YARN-1351. Invalid string format in Fair Scheduler log warn message
+    (Konstantin Weitz via Sandy Ryza)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java

@@ -387,7 +387,7 @@ public class AllocationFileLoaderService extends AbstractService {
     if (maxQueueResources.containsKey(queueName) && minQueueResources.containsKey(queueName)
         && !Resources.fitsIn(minQueueResources.get(queueName),
             maxQueueResources.get(queueName))) {
-      LOG.warn(String.format("Queue %s has max resources %d less than min resources %d",
+      LOG.warn(String.format("Queue %s has max resources %s less than min resources %s",
           queueName, maxQueueResources.get(queueName), minQueueResources.get(queueName)));
     }
   }