Prechádzať zdrojové kódy

Merging r1543902 through r1544303 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1544306 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 rokov pred
rodič
commit
16f7fb80d9
83 zmenil súbory, kde vykonal 1689 pridanie a 755 odobranie
  1. 7 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  2. 8 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 35 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
  4. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
  5. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  6. 15 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
  7. 14 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  8. 13 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  9. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  10. 27 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  11. 44 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  12. 100 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
  13. 62 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
  14. 45 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
  15. 11 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
  16. 125 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
  17. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
  18. 15 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  19. 35 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  20. 47 49
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  21. 54 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  22. 36 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
  23. 21 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  24. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  25. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  26. 50 49
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
  28. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  29. 14 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  30. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  31. 36 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  32. 8 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  33. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  34. 9 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
  35. 22 21
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  36. 8 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  37. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  38. 10 21
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
  39. 9 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
  40. 2 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
  41. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  42. 23 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  43. 1 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  44. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
  45. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
  46. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  47. 69 52
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  48. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
  49. 34 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
  50. 66 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
  51. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
  52. 33 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  53. 30 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  54. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  55. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  56. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
  57. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
  58. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  59. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  60. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  61. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
  62. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
  63. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
  64. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
  65. 144 79
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  66. 44 43
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
  67. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
  68. 20 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  69. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
  70. 6 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
  71. 22 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
  72. 16 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
  73. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  74. 17 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
  75. 1 1
      hadoop-project/pom.xml
  76. 10 0
      hadoop-yarn-project/CHANGES.txt
  77. 16 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
  78. 26 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
  79. 95 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
  80. 8 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
  81. 11 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
  82. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
  83. 10 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java

+ 7 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator {
         doSpnegoSequence(token);
         doSpnegoSequence(token);
       } else {
       } else {
         LOG.debug("Using fallback authenticator sequence.");
         LOG.debug("Using fallback authenticator sequence.");
-        getFallBackAuthenticator().authenticate(url, token);
+        Authenticator auth = getFallBackAuthenticator();
+        // Make sure that the fall back authenticator have the same
+        // ConnectionConfigurator, since the method might be overridden.
+        // Otherwise the fall back authenticator might not have the information
+        // to make the connection (e.g., SSL certificates)
+        auth.setConnectionConfigurator(connConfigurator);
+        auth.authenticate(url, token);
       }
       }
     }
     }
   }
   }

+ 8 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -383,6 +383,11 @@ Release 2.3.0 - UNRELEASED
 
 
     HADOOP-10067. Missing POM dependency on jsr305 (Robert Rati via stevel)
     HADOOP-10067. Missing POM dependency on jsr305 (Robert Rati via stevel)
 
 
+    HADOOP-10103. update commons-lang to 2.6 (Akira AJISAKA via stevel)
+
+    HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
+    via jeagles)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@@ -500,6 +505,9 @@ Release 2.2.1 - UNRELEASED
     HADOOP-10110. hadoop-auth has a build break due to missing dependency.
     HADOOP-10110. hadoop-auth has a build break due to missing dependency.
     (Chuan Liu via arp)
     (Chuan Liu via arp)
 
 
+    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
+    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
+
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 35 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java

@@ -47,17 +47,32 @@ public class DU extends Shell {
    * @throws IOException if we fail to refresh the disk usage
    * @throws IOException if we fail to refresh the disk usage
    */
    */
   public DU(File path, long interval) throws IOException {
   public DU(File path, long interval) throws IOException {
+    this(path, interval, -1L);
+  }
+  
+  /**
+   * Keeps track of disk usage.
+   * @param path the path to check disk usage in
+   * @param interval refresh the disk usage at this interval
+   * @param initialUsed use this value until next refresh
+   * @throws IOException if we fail to refresh the disk usage
+   */
+  public DU(File path, long interval, long initialUsed) throws IOException { 
     super(0);
     super(0);
-    
+
     //we set the Shell interval to 0 so it will always run our command
     //we set the Shell interval to 0 so it will always run our command
     //and use this one to set the thread sleep interval
     //and use this one to set the thread sleep interval
     this.refreshInterval = interval;
     this.refreshInterval = interval;
     this.dirPath = path.getCanonicalPath();
     this.dirPath = path.getCanonicalPath();
-    
-    //populate the used variable
-    run();
+
+    //populate the used variable if the initial value is not specified.
+    if (initialUsed < 0) {
+      run();
+    } else {
+      this.used.set(initialUsed);
+    }
   }
   }
-  
+
   /**
   /**
    * Keeps track of disk usage.
    * Keeps track of disk usage.
    * @param path the path to check disk usage in
    * @param path the path to check disk usage in
@@ -65,9 +80,23 @@ public class DU extends Shell {
    * @throws IOException if we fail to refresh the disk usage
    * @throws IOException if we fail to refresh the disk usage
    */
    */
   public DU(File path, Configuration conf) throws IOException {
   public DU(File path, Configuration conf) throws IOException {
+    this(path, conf, -1L);
+  }
+
+  /**
+   * Keeps track of disk usage.
+   * @param path the path to check disk usage in
+   * @param conf configuration object
+   * @param initialUsed use it until the next refresh.
+   * @throws IOException if we fail to refresh the disk usage
+   */
+  public DU(File path, Configuration conf, long initialUsed)
+      throws IOException {
     this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
     this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
-                CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
+                CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
   }
   }
+    
+  
 
 
   /**
   /**
    * This thread refreshes the "used" variable.
    * This thread refreshes the "used" variable.

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java

@@ -183,10 +183,13 @@ abstract public class FSOutputSummer extends OutputStream {
   }
   }
 
 
   static byte[] int2byte(int integer, byte[] bytes) {
   static byte[] int2byte(int integer, byte[] bytes) {
-    bytes[0] = (byte)((integer >>> 24) & 0xFF);
-    bytes[1] = (byte)((integer >>> 16) & 0xFF);
-    bytes[2] = (byte)((integer >>>  8) & 0xFF);
-    bytes[3] = (byte)((integer >>>  0) & 0xFF);
+    if (bytes.length != 0) {
+      bytes[0] = (byte) ((integer >>> 24) & 0xFF);
+      bytes[1] = (byte) ((integer >>> 16) & 0xFF);
+      bytes[2] = (byte) ((integer >>> 8) & 0xFF);
+      bytes[3] = (byte) ((integer >>> 0) & 0xFF);
+      return bytes;
+    }
     return bytes;
     return bytes;
   }
   }
 
 

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -472,7 +472,9 @@ public class HttpServer implements FilterContainer {
       if (conf.getBoolean(
       if (conf.getBoolean(
           CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
           CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
           CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
           CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
-        logContext.getInitParams().put(
+        @SuppressWarnings("unchecked")
+        Map<String, String> params = logContext.getInitParams();
+        params.put(
             "org.mortbay.jetty.servlet.Default.aliases", "true");
             "org.mortbay.jetty.servlet.Default.aliases", "true");
       }
       }
       logContext.setDisplayName("logs");
       logContext.setDisplayName("logs");

+ 15 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java

@@ -116,4 +116,19 @@ public class TestDU extends TestCase {
     long duSize = du.getUsed();
     long duSize = du.getUsed();
     assertTrue(String.valueOf(duSize), duSize >= 0L);
     assertTrue(String.valueOf(duSize), duSize >= 0L);
   }
   }
+
+  public void testDUSetInitialValue() throws IOException {
+    File file = new File(DU_DIR, "dataX");
+    createFile(file, 8192);
+    DU du = new DU(file, 3000, 1024);
+    du.start();
+    assertTrue("Initial usage setting not honored", du.getUsed() == 1024);
+
+    // wait until the first du runs.
+    try {
+      Thread.sleep(5000);
+    } catch (InterruptedException ie) {}
+
+    assertTrue("Usage didn't get updated", du.getUsed() == 8192);
+  }
 }
 }

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -204,6 +204,15 @@ Trunk (Unreleased)
 
 
     HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
     HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
 
 
+    HDFS-5451. Add byte and file statistics to PathBasedCacheEntry.
+    (Colin Patrick McCabe via Andrew Wang)
+
+    HDFS-5531. Combine the getNsQuota() and getDsQuota() methods in INode.
+    (szetszwo)
+
+    HDFS-5473. Consistent naming of user-visible caching classes and methods
+    (cmccabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 
 
@@ -411,6 +420,8 @@ Release 2.3.0 - UNRELEASED
     HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
     HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
     Mai via jing9)
     Mai via jing9)
 
 
+    HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@@ -515,6 +526,9 @@ Release 2.3.0 - UNRELEASED
 
 
     HDFS-1386. TestJMXGet fails in jdk7 (jeagles)
     HDFS-1386. TestJMXGet fails in jdk7 (jeagles)
 
 
+    HDFS-5532. Enable the webhdfs by default to support new HDFS web UI. (Vinay
+    via jing9)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)

+ 13 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -108,6 +108,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.ClientMmapManager;
 import org.apache.hadoop.hdfs.client.ClientMmapManager;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -117,7 +118,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -2290,41 +2291,41 @@ public class DFSClient implements java.io.Closeable {
     }
     }
   }
   }
 
 
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  public long addCacheDirective(
+      CacheDirectiveInfo info) throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
-      return namenode.addPathBasedCacheDirective(directive);
+      return namenode.addCacheDirective(info);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
       throw re.unwrapRemoteException();
     }
     }
   }
   }
   
   
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  public void modifyCacheDirective(
+      CacheDirectiveInfo info) throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
-      namenode.modifyPathBasedCacheDirective(directive);
+      namenode.modifyCacheDirective(info);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
       throw re.unwrapRemoteException();
     }
     }
   }
   }
 
 
-  public void removePathBasedCacheDirective(long id)
+  public void removeCacheDirective(long id)
       throws IOException {
       throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
-      namenode.removePathBasedCacheDirective(id);
+      namenode.removeCacheDirective(id);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
       throw re.unwrapRemoteException();
     }
     }
   }
   }
   
   
-  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
-      PathBasedCacheDirective filter) throws IOException {
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      CacheDirectiveInfo filter) throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
-      return namenode.listPathBasedCacheDirectives(0, filter);
+      return namenode.listCacheDirectives(0, filter);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
       throw re.unwrapRemoteException();
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -162,7 +162,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
   public static final int     DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final int     DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
-  public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = false;
+  public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
   public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
   public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
   public static final String  DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks";
   public static final String  DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks";

+ 27 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -73,6 +73,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -622,12 +624,19 @@ public class DFSUtil {
    * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
    * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
    * the configuration.
    * the configuration.
    *
    *
-   * @param conf configuration
    * @return list of InetSocketAddresses
    * @return list of InetSocketAddresses
    */
    */
-  public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses(
-      Configuration conf) {
-    return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+  public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
+      Configuration conf, String scheme) {
+    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
+      return getAddresses(conf, null,
+          DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
+      return getAddresses(conf, null,
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
+    } else {
+      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
+    }
   }
   }
 
 
   /**
   /**
@@ -636,18 +645,28 @@ public class DFSUtil {
    * cluster, the resolver further resolves the logical name (i.e., the authority
    * cluster, the resolver further resolves the logical name (i.e., the authority
    * in the URL) into real namenode addresses.
    * in the URL) into real namenode addresses.
    */
    */
-  public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort,
-      Configuration conf) throws IOException {
+  public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
+      throws IOException {
+    int defaultPort;
+    String scheme = uri.getScheme();
+    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
+      defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
+    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
+      defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
+    } else {
+      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
+    }
+
     ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
     ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
 
 
     if (!HAUtil.isLogicalUri(conf, uri)) {
     if (!HAUtil.isLogicalUri(conf, uri)) {
       InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
       InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
-          schemeDefaultPort);
+          defaultPort);
       ret.add(addr);
       ret.add(addr);
 
 
     } else {
     } else {
       Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
       Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
-          .getHaNnHttpAddresses(conf);
+          .getHaNnWebHdfsAddresses(conf, scheme);
 
 
       for (Map<String, InetSocketAddress> addrs : addresses.values()) {
       for (Map<String, InetSocketAddress> addrs : addresses.values()) {
         for (InetSocketAddress addr : addrs.values()) {
         for (InetSocketAddress addr : addrs.values()) {

+ 44 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -67,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -1584,78 +1585,88 @@ public class DistributedFileSystem extends FileSystem {
   }
   }
 
 
   /**
   /**
-   * Add a new PathBasedCacheDirective.
+   * Add a new CacheDirective.
    * 
    * 
-   * @param directive A directive to add.
+   * @param info Information about a directive to add.
    * @return the ID of the directive that was created.
    * @return the ID of the directive that was created.
    * @throws IOException if the directive could not be added
    * @throws IOException if the directive could not be added
    */
    */
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
-    Preconditions.checkNotNull(directive.getPath());
-    Path path = new Path(getPathName(fixRelativePart(directive.getPath()))).
+  public long addCacheDirective(
+      CacheDirectiveInfo info) throws IOException {
+    Preconditions.checkNotNull(info.getPath());
+    Path path = new Path(getPathName(fixRelativePart(info.getPath()))).
         makeQualified(getUri(), getWorkingDirectory());
         makeQualified(getUri(), getWorkingDirectory());
-    return dfs.addPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder(directive).
+    return dfs.addCacheDirective(
+        new CacheDirectiveInfo.Builder(info).
             setPath(path).
             setPath(path).
             build());
             build());
   }
   }
   
   
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
-    if (directive.getPath() != null) {
-      directive = new PathBasedCacheDirective.Builder(directive).
-          setPath(new Path(getPathName(fixRelativePart(directive.getPath()))).
+  /**
+   * Modify a CacheDirective.
+   * 
+   * @param info Information about the directive to modify.
+   *             You must set the ID to indicate which CacheDirective you want
+   *             to modify.
+   * @throws IOException if the directive could not be modified
+   */
+  public void modifyCacheDirective(
+      CacheDirectiveInfo info) throws IOException {
+    if (info.getPath() != null) {
+      info = new CacheDirectiveInfo.Builder(info).
+          setPath(new Path(getPathName(fixRelativePart(info.getPath()))).
               makeQualified(getUri(), getWorkingDirectory())).build();
               makeQualified(getUri(), getWorkingDirectory())).build();
     }
     }
-    dfs.modifyPathBasedCacheDirective(directive);
+    dfs.modifyCacheDirective(info);
   }
   }
 
 
   /**
   /**
-   * Remove a PathBasedCacheDirective.
+   * Remove a CacheDirectiveInfo.
    * 
    * 
-   * @param id identifier of the PathBasedCacheDirective to remove
+   * @param id identifier of the CacheDirectiveInfo to remove
    * @throws IOException if the directive could not be removed
    * @throws IOException if the directive could not be removed
    */
    */
-  public void removePathBasedCacheDirective(long id)
+  public void removeCacheDirective(long id)
       throws IOException {
       throws IOException {
-    dfs.removePathBasedCacheDirective(id);
+    dfs.removeCacheDirective(id);
   }
   }
   
   
   /**
   /**
-   * List the set of cached paths of a cache pool. Incrementally fetches results
-   * from the server.
+   * List cache directives.  Incrementally fetches results from the server.
    * 
    * 
    * @param filter Filter parameters to use when listing the directives, null to
    * @param filter Filter parameters to use when listing the directives, null to
    *               list all directives visible to us.
    *               list all directives visible to us.
-   * @return A RemoteIterator which returns PathBasedCacheDirective objects.
+   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
    */
    */
-  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
-      PathBasedCacheDirective filter) throws IOException {
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
     if (filter == null) {
-      filter = new PathBasedCacheDirective.Builder().build();
+      filter = new CacheDirectiveInfo.Builder().build();
     }
     }
     if (filter.getPath() != null) {
     if (filter.getPath() != null) {
-      filter = new PathBasedCacheDirective.Builder(filter).
+      filter = new CacheDirectiveInfo.Builder(filter).
           setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
           setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
           build();
           build();
     }
     }
-    final RemoteIterator<PathBasedCacheDirective> iter =
-        dfs.listPathBasedCacheDirectives(filter);
-    return new RemoteIterator<PathBasedCacheDirective>() {
+    final RemoteIterator<CacheDirectiveEntry> iter =
+        dfs.listCacheDirectives(filter);
+    return new RemoteIterator<CacheDirectiveEntry>() {
       @Override
       @Override
       public boolean hasNext() throws IOException {
       public boolean hasNext() throws IOException {
         return iter.hasNext();
         return iter.hasNext();
       }
       }
 
 
       @Override
       @Override
-      public PathBasedCacheDirective next() throws IOException {
+      public CacheDirectiveEntry next() throws IOException {
         // Although the paths we get back from the NameNode should always be
         // Although the paths we get back from the NameNode should always be
         // absolute, we call makeQualified to add the scheme and authority of
         // absolute, we call makeQualified to add the scheme and authority of
         // this DistributedFilesystem.
         // this DistributedFilesystem.
-        PathBasedCacheDirective desc = iter.next();
-        Path p = desc.getPath().makeQualified(getUri(), getWorkingDirectory());
-        return new PathBasedCacheDirective.Builder(desc).setPath(p).build();
+        CacheDirectiveEntry desc = iter.next();
+        CacheDirectiveInfo info = desc.getInfo();
+        Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
+        return new CacheDirectiveEntry(
+            new CacheDirectiveInfo.Builder(info).setPath(p).build(),
+            desc.getStats());
       }
       }
     };
     };
   }
   }

+ 100 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -25,7 +25,11 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 
 
@@ -121,4 +125,100 @@ public class HdfsAdmin {
   public void disallowSnapshot(Path path) throws IOException {
   public void disallowSnapshot(Path path) throws IOException {
     dfs.disallowSnapshot(path);
     dfs.disallowSnapshot(path);
   }
   }
+
+  /**
+   * Add a new CacheDirectiveInfo.
+   * 
+   * @param info Information about a directive to add.
+   * @return the ID of the directive that was created.
+   * @throws IOException if the directive could not be added
+   */
+  public long addCacheDirective(CacheDirectiveInfo info)
+      throws IOException {
+    return dfs.addCacheDirective(info);
+  }
+  
+  /**
+   * Modify a CacheDirective.
+   * 
+   * @param info Information about the directive to modify.
+   *             You must set the ID to indicate which CacheDirective you want
+   *             to modify.
+   * @throws IOException if the directive could not be modified
+   */
+  public void modifyCacheDirective(CacheDirectiveInfo info)
+      throws IOException {
+    dfs.modifyCacheDirective(info);
+  }
+
+  /**
+   * Remove a CacheDirective.
+   * 
+   * @param id identifier of the CacheDirectiveInfo to remove
+   * @throws IOException if the directive could not be removed
+   */
+  public void removeCacheDirective(long id)
+      throws IOException {
+    dfs.removeCacheDirective(id);
+  }
+
+  /**
+   * List cache directives. Incrementally fetches results from the server.
+   * 
+   * @param filter Filter parameters to use when listing the directives, null to
+   *               list all directives visible to us.
+   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
+   */
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      CacheDirectiveInfo filter) throws IOException {
+    return dfs.listCacheDirectives(filter);
+  }
+
+  /**
+   * Add a cache pool.
+   *
+   * @param info
+   *          The request to add a cache pool.
+   * @throws IOException 
+   *          If the request could not be completed.
+   */
+  public void addCachePool(CachePoolInfo info) throws IOException {
+    dfs.addCachePool(info);
+  }
+
+  /**
+   * Modify an existing cache pool.
+   *
+   * @param info
+   *          The request to modify a cache pool.
+   * @throws IOException 
+   *          If the request could not be completed.
+   */
+  public void modifyCachePool(CachePoolInfo info) throws IOException {
+    dfs.modifyCachePool(info);
+  }
+    
+  /**
+   * Remove a cache pool.
+   *
+   * @param poolName
+   *          Name of the cache pool to remove.
+   * @throws IOException 
+   *          if the cache pool did not exist, or could not be removed.
+   */
+  public void removeCachePool(String poolName) throws IOException {
+    dfs.removeCachePool(poolName);
+  }
+
+  /**
+   * List all cache pools.
+   *
+   * @return A remote iterator from which you can get CachePoolInfo objects.
+   *          Requests will be made as needed.
+   * @throws IOException
+   *          If there was an error listing cache pools.
+   */
+  public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+    return dfs.listCachePools();
+  }
 }
 }

+ 62 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java

@@ -30,13 +30,16 @@ import com.google.common.base.Preconditions;
  * This is an implementation class, not part of the public API.
  * This is an implementation class, not part of the public API.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public final class PathBasedCacheEntry {
+public final class CacheDirective {
   private final long entryId;
   private final long entryId;
   private final String path;
   private final String path;
   private final short replication;
   private final short replication;
   private final CachePool pool;
   private final CachePool pool;
+  private long bytesNeeded;
+  private long bytesCached;
+  private long filesAffected;
 
 
-  public PathBasedCacheEntry(long entryId, String path,
+  public CacheDirective(long entryId, String path,
       short replication, CachePool pool) {
       short replication, CachePool pool) {
     Preconditions.checkArgument(entryId > 0);
     Preconditions.checkArgument(entryId > 0);
     this.entryId = entryId;
     this.entryId = entryId;
@@ -46,6 +49,9 @@ public final class PathBasedCacheEntry {
     this.replication = replication;
     this.replication = replication;
     Preconditions.checkNotNull(path);
     Preconditions.checkNotNull(path);
     this.pool = pool;
     this.pool = pool;
+    this.bytesNeeded = 0;
+    this.bytesCached = 0;
+    this.filesAffected = 0;
   }
   }
 
 
   public long getEntryId() {
   public long getEntryId() {
@@ -64,14 +70,26 @@ public final class PathBasedCacheEntry {
     return replication;
     return replication;
   }
   }
 
 
-  public PathBasedCacheDirective toDirective() {
-    return new PathBasedCacheDirective.Builder().
+  public CacheDirectiveInfo toDirective() {
+    return new CacheDirectiveInfo.Builder().
         setId(entryId).
         setId(entryId).
         setPath(new Path(path)).
         setPath(new Path(path)).
         setReplication(replication).
         setReplication(replication).
         setPool(pool.getPoolName()).
         setPool(pool.getPoolName()).
         build();
         build();
   }
   }
+
+  public CacheDirectiveStats toStats() {
+    return new CacheDirectiveStats.Builder().
+        setBytesNeeded(bytesNeeded).
+        setBytesCached(bytesCached).
+        setFilesAffected(filesAffected).
+        build();
+  }
+
+  public CacheDirectiveEntry toEntry() {
+    return new CacheDirectiveEntry(toDirective(), toStats());
+  }
   
   
   @Override
   @Override
   public String toString() {
   public String toString() {
@@ -80,6 +98,9 @@ public final class PathBasedCacheEntry {
       append(", path:").append(path).
       append(", path:").append(path).
       append(", replication:").append(replication).
       append(", replication:").append(replication).
       append(", pool:").append(pool).
       append(", pool:").append(pool).
+      append(", bytesNeeded:").append(bytesNeeded).
+      append(", bytesCached:").append(bytesCached).
+      append(", filesAffected:").append(filesAffected).
       append(" }");
       append(" }");
     return builder.toString();
     return builder.toString();
   }
   }
@@ -91,7 +112,7 @@ public final class PathBasedCacheEntry {
     if (o.getClass() != this.getClass()) {
     if (o.getClass() != this.getClass()) {
       return false;
       return false;
     }
     }
-    PathBasedCacheEntry other = (PathBasedCacheEntry)o;
+    CacheDirective other = (CacheDirective)o;
     return entryId == other.entryId;
     return entryId == other.entryId;
   }
   }
 
 
@@ -99,4 +120,40 @@ public final class PathBasedCacheEntry {
   public int hashCode() {
   public int hashCode() {
     return new HashCodeBuilder().append(entryId).toHashCode();
     return new HashCodeBuilder().append(entryId).toHashCode();
   }
   }
+
+  public long getBytesNeeded() {
+    return bytesNeeded;
+  }
+
+  public void clearBytesNeeded() {
+    this.bytesNeeded = 0;
+  }
+
+  public void addBytesNeeded(long toAdd) {
+    this.bytesNeeded += toAdd;
+  }
+
+  public long getBytesCached() {
+    return bytesCached;
+  }
+
+  public void clearBytesCached() {
+    this.bytesCached = 0;
+  }
+
+  public void addBytesCached(long toAdd) {
+    this.bytesCached += toAdd;
+  }
+
+  public long getFilesAffected() {
+    return filesAffected;
+  }
+
+  public void clearFilesAffected() {
+    this.filesAffected = 0;
+  }
+
+  public void incrementFilesAffected() {
+    this.filesAffected++;
+  }
 };
 };

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Describes a path-based cache directive entry.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Public
+public class CacheDirectiveEntry {
+  private final CacheDirectiveInfo info;
+  private final CacheDirectiveStats stats;
+
+  public CacheDirectiveEntry(CacheDirectiveInfo info,
+      CacheDirectiveStats stats) {
+    this.info = info;
+    this.stats = stats;
+  }
+
+  public CacheDirectiveInfo getInfo() {
+    return info;
+  }
+
+  public CacheDirectiveStats getStats() {
+    return stats;
+  }
+};

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java

@@ -28,9 +28,9 @@ import org.apache.hadoop.fs.Path;
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 @InterfaceAudience.Public
 @InterfaceAudience.Public
-public class PathBasedCacheDirective {
+public class CacheDirectiveInfo {
   /**
   /**
-   * A builder for creating new PathBasedCacheDirective instances.
+   * A builder for creating new CacheDirectiveInfo instances.
    */
    */
   public static class Builder {
   public static class Builder {
     private Long id;
     private Long id;
@@ -39,12 +39,12 @@ public class PathBasedCacheDirective {
     private String pool;
     private String pool;
 
 
     /**
     /**
-     * Builds a new PathBasedCacheDirective populated with the set properties.
+     * Builds a new CacheDirectiveInfo populated with the set properties.
      * 
      * 
-     * @return New PathBasedCacheDirective.
+     * @return New CacheDirectiveInfo.
      */
      */
-    public PathBasedCacheDirective build() {
-      return new PathBasedCacheDirective(id, path, replication, pool);
+    public CacheDirectiveInfo build() {
+      return new CacheDirectiveInfo(id, path, replication, pool);
     }
     }
 
 
     /**
     /**
@@ -55,9 +55,9 @@ public class PathBasedCacheDirective {
 
 
     /**
     /**
      * Creates a builder with all elements set to the same values as the
      * Creates a builder with all elements set to the same values as the
-     * given PathBasedCacheDirective.
+     * given CacheDirectiveInfo.
      */
      */
-    public Builder(PathBasedCacheDirective directive) {
+    public Builder(CacheDirectiveInfo directive) {
       this.id = directive.getId();
       this.id = directive.getId();
       this.path = directive.getPath();
       this.path = directive.getPath();
       this.replication = directive.getReplication();
       this.replication = directive.getReplication();
@@ -114,7 +114,7 @@ public class PathBasedCacheDirective {
   private final Short replication;
   private final Short replication;
   private final String pool;
   private final String pool;
 
 
-  PathBasedCacheDirective(Long id, Path path, Short replication, String pool) {
+  CacheDirectiveInfo(Long id, Path path, Short replication, String pool) {
     this.id = id;
     this.id = id;
     this.path = path;
     this.path = path;
     this.replication = replication;
     this.replication = replication;
@@ -148,7 +148,7 @@ public class PathBasedCacheDirective {
   public String getPool() {
   public String getPool() {
     return pool;
     return pool;
   }
   }
-
+  
   @Override
   @Override
   public boolean equals(Object o) {
   public boolean equals(Object o) {
     if (o == null) {
     if (o == null) {
@@ -157,7 +157,7 @@ public class PathBasedCacheDirective {
     if (getClass() != o.getClass()) {
     if (getClass() != o.getClass()) {
       return false;
       return false;
     }
     }
-    PathBasedCacheDirective other = (PathBasedCacheDirective)o;
+    CacheDirectiveInfo other = (CacheDirectiveInfo)o;
     return new EqualsBuilder().append(getId(), other.getId()).
     return new EqualsBuilder().append(getId(), other.getId()).
         append(getPath(), other.getPath()).
         append(getPath(), other.getPath()).
         append(getReplication(), other.getReplication()).
         append(getReplication(), other.getReplication()).

+ 125 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java

@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Describes a path-based cache directive.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Public
+public class CacheDirectiveStats {
+  public static class Builder {
+    private long bytesNeeded;
+    private long bytesCached;
+    private long filesAffected;
+
+    /**
+     * Builds a new CacheDirectiveStats populated with the set properties.
+     * 
+     * @return New CacheDirectiveStats.
+     */
+    public CacheDirectiveStats build() {
+      return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected);
+    }
+
+    /**
+     * Creates an empty builder.
+     */
+    public Builder() {
+    }
+
+    /**
+     * Sets the bytes needed by this directive.
+     * 
+     * @param bytesNeeded The bytes needed.
+     * @return This builder, for call chaining.
+     */
+    public Builder setBytesNeeded(Long bytesNeeded) {
+      this.bytesNeeded = bytesNeeded;
+      return this;
+    }
+
+    /**
+     * Sets the bytes cached by this directive.
+     * 
+     * @param bytesCached The bytes cached.
+     * @return This builder, for call chaining.
+     */
+    public Builder setBytesCached(Long bytesCached) {
+      this.bytesCached = bytesCached;
+      return this;
+    }
+
+    /**
+     * Sets the files affected by this directive.
+     * 
+     * @param filesAffected The files affected.
+     * @return This builder, for call chaining.
+     */
+    public Builder setFilesAffected(Long filesAffected) {
+      this.filesAffected = filesAffected;
+      return this;
+    }
+  }
+
+  private final long bytesNeeded;
+  private final long bytesCached;
+  private final long filesAffected;
+
+  private CacheDirectiveStats(long bytesNeeded, long bytesCached,
+      long filesAffected) {
+    this.bytesNeeded = bytesNeeded;
+    this.bytesCached = bytesCached;
+    this.filesAffected = filesAffected;
+  }
+
+  /**
+   * @return The bytes needed.
+   */
+  public Long getBytesNeeded() {
+    return bytesNeeded;
+  }
+
+  /**
+   * @return The bytes cached.
+   */
+  public Long getBytesCached() {
+    return bytesCached;
+  }
+
+  /**
+   * @return The files affected.
+   */
+  public Long getFilesAffected() {
+    return filesAffected;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("{");
+    builder.append("bytesNeeded: ").append(bytesNeeded);
+    builder.append(", ").append("bytesCached: ").append(bytesCached);
+    builder.append(", ").append("filesAffected: ").append(filesAffected);
+    builder.append("}");
+    return builder.toString();
+  }
+};

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java

@@ -46,7 +46,7 @@ import org.xml.sax.SAXException;
  * This class is used in RPCs to create and modify cache pools.
  * This class is used in RPCs to create and modify cache pools.
  * It is serializable and can be stored in the edit log.
  * It is serializable and can be stored in the edit log.
  */
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class CachePoolInfo {
 public class CachePoolInfo {
   public static final Log LOG = LogFactory.getLog(CachePoolInfo.class);
   public static final Log LOG = LogFactory.getLog(CachePoolInfo.class);
@@ -225,4 +225,4 @@ public class CachePoolInfo {
         setMode(perm.getPermission()).
         setMode(perm.getPermission()).
         setWeight(weight);
         setWeight(weight);
   }
   }
-}
+}

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1097,49 +1097,49 @@ public interface ClientProtocol {
       String fromSnapshot, String toSnapshot) throws IOException;
       String fromSnapshot, String toSnapshot) throws IOException;
 
 
   /**
   /**
-   * Add a PathBasedCache entry to the CacheManager.
+   * Add a CacheDirective to the CacheManager.
    * 
    * 
-   * @param directive A PathBasedCacheDirective to be added
-   * @return A PathBasedCacheDirective associated with the added directive
+   * @param directive A CacheDirectiveInfo to be added
+   * @return A CacheDirectiveInfo associated with the added directive
    * @throws IOException if the directive could not be added
    * @throws IOException if the directive could not be added
    */
    */
   @AtMostOnce
   @AtMostOnce
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException;
+  public long addCacheDirective(
+      CacheDirectiveInfo directive) throws IOException;
 
 
   /**
   /**
-   * Modify a PathBasedCache entry in the CacheManager.
+   * Modify a CacheDirective in the CacheManager.
    * 
    * 
    * @return directive The directive to modify.  Must contain 
    * @return directive The directive to modify.  Must contain 
    *                   a directive ID.
    *                   a directive ID.
    * @throws IOException if the directive could not be modified
    * @throws IOException if the directive could not be modified
    */
    */
   @AtMostOnce
   @AtMostOnce
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException;
+  public void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException;
 
 
   /**
   /**
-   * Remove a PathBasedCacheDirective from the CacheManager.
+   * Remove a CacheDirectiveInfo from the CacheManager.
    * 
    * 
-   * @param id of a PathBasedCacheDirective
+   * @param id of a CacheDirectiveInfo
    * @throws IOException if the cache directive could not be removed
    * @throws IOException if the cache directive could not be removed
    */
    */
   @AtMostOnce
   @AtMostOnce
-  public void removePathBasedCacheDirective(long id) throws IOException;
+  public void removeCacheDirective(long id) throws IOException;
 
 
   /**
   /**
    * List the set of cached paths of a cache pool. Incrementally fetches results
    * List the set of cached paths of a cache pool. Incrementally fetches results
    * from the server.
    * from the server.
    * 
    * 
    * @param prevId The last listed entry ID, or -1 if this is the first call to
    * @param prevId The last listed entry ID, or -1 if this is the first call to
-   *               listPathBasedCacheDirectives.
+   *               listCacheDirectives.
    * @param filter Parameters to use to filter the list results, 
    * @param filter Parameters to use to filter the list results, 
    *               or null to display all directives visible to us.
    *               or null to display all directives visible to us.
-   * @return A RemoteIterator which returns PathBasedCacheDirective objects.
+   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
    */
    */
   @Idempotent
   @Idempotent
-  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
-      long prevId, PathBasedCacheDirective filter) throws IOException;
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      long prevId, CacheDirectiveInfo filter) throws IOException;
 
 
   /**
   /**
    * Add a new cache pool.
    * Add a new cache pool.

+ 35 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -35,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
@@ -44,8 +45,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@@ -106,25 +107,25 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesElementProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@@ -1035,12 +1036,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective(
-      RpcController controller, AddPathBasedCacheDirectiveRequestProto request)
+  public AddCacheDirectiveResponseProto addCacheDirective(
+      RpcController controller, AddCacheDirectiveRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      return AddPathBasedCacheDirectiveResponseProto.newBuilder().
-              setId(server.addPathBasedCacheDirective(
+      return AddCacheDirectiveResponseProto.newBuilder().
+              setId(server.addCacheDirective(
                   PBHelper.convert(request.getInfo()))).build();
                   PBHelper.convert(request.getInfo()))).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1048,26 +1049,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public ModifyPathBasedCacheDirectiveResponseProto modifyPathBasedCacheDirective(
-      RpcController controller, ModifyPathBasedCacheDirectiveRequestProto request)
+  public ModifyCacheDirectiveResponseProto modifyCacheDirective(
+      RpcController controller, ModifyCacheDirectiveRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      server.modifyPathBasedCacheDirective(
+      server.modifyCacheDirective(
           PBHelper.convert(request.getInfo()));
           PBHelper.convert(request.getInfo()));
-      return ModifyPathBasedCacheDirectiveResponseProto.newBuilder().build();
+      return ModifyCacheDirectiveResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public RemovePathBasedCacheDirectiveResponseProto
-      removePathBasedCacheDirective(RpcController controller,
-          RemovePathBasedCacheDirectiveRequestProto request)
+  public RemoveCacheDirectiveResponseProto
+      removeCacheDirective(RpcController controller,
+          RemoveCacheDirectiveRequestProto request)
               throws ServiceException {
               throws ServiceException {
     try {
     try {
-      server.removePathBasedCacheDirective(request.getId());
-      return RemovePathBasedCacheDirectiveResponseProto.
+      server.removeCacheDirective(request.getId());
+      return RemoveCacheDirectiveResponseProto.
           newBuilder().build();
           newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1075,28 +1076,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public ListPathBasedCacheDirectivesResponseProto listPathBasedCacheDirectives(
-      RpcController controller, ListPathBasedCacheDirectivesRequestProto request)
+  public ListCacheDirectivesResponseProto listCacheDirectives(
+      RpcController controller, ListCacheDirectivesRequestProto request)
           throws ServiceException {
           throws ServiceException {
     try {
     try {
-      PathBasedCacheDirective filter =
+      CacheDirectiveInfo filter =
           PBHelper.convert(request.getFilter());
           PBHelper.convert(request.getFilter());
-      RemoteIterator<PathBasedCacheDirective> iter =
-         server.listPathBasedCacheDirectives(request.getPrevId(), filter);
-      ListPathBasedCacheDirectivesResponseProto.Builder builder =
-          ListPathBasedCacheDirectivesResponseProto.newBuilder();
+      RemoteIterator<CacheDirectiveEntry> iter =
+         server.listCacheDirectives(request.getPrevId(), filter);
+      ListCacheDirectivesResponseProto.Builder builder =
+          ListCacheDirectivesResponseProto.newBuilder();
       long prevId = 0;
       long prevId = 0;
       while (iter.hasNext()) {
       while (iter.hasNext()) {
-        PathBasedCacheDirective directive = iter.next();
-        builder.addElements(
-            ListPathBasedCacheDirectivesElementProto.newBuilder().
-                setInfo(PBHelper.convert(directive)));
-        prevId = directive.getId();
+        CacheDirectiveEntry entry = iter.next();
+        builder.addElements(PBHelper.convert(entry));
+        prevId = entry.getInfo().getId();
       }
       }
       if (prevId == 0) {
       if (prevId == 0) {
         builder.setHasMore(false);
         builder.setHasMore(false);
       } else {
       } else {
-        iter = server.listPathBasedCacheDirectives(prevId, filter);
+        iter = server.listCacheDirectives(prevId, filter);
         builder.setHasMore(iter.hasNext());
         builder.setHasMore(iter.hasNext());
       }
       }
       return builder.build();
       return builder.build();

+ 47 - 49
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -32,11 +32,11 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -51,14 +51,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
@@ -100,16 +99,16 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@@ -146,7 +145,6 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRespons
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
-import com.google.common.primitives.Shorts;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
 
 
@@ -1006,11 +1004,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  public long addCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     try {
     try {
-      return rpcProxy.addPathBasedCacheDirective(null, 
-              AddPathBasedCacheDirectiveRequestProto.newBuilder().
+      return rpcProxy.addCacheDirective(null, 
+              AddCacheDirectiveRequestProto.newBuilder().
                   setInfo(PBHelper.convert(directive)).build()).getId();
                   setInfo(PBHelper.convert(directive)).build()).getId();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -1018,11 +1016,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  public void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     try {
     try {
-      rpcProxy.modifyPathBasedCacheDirective(null,
-          ModifyPathBasedCacheDirectiveRequestProto.newBuilder().
+      rpcProxy.modifyCacheDirective(null,
+          ModifyCacheDirectiveRequestProto.newBuilder().
               setInfo(PBHelper.convert(directive)).build());
               setInfo(PBHelper.convert(directive)).build());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -1030,29 +1028,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public void removePathBasedCacheDirective(long id)
+  public void removeCacheDirective(long id)
       throws IOException {
       throws IOException {
     try {
     try {
-      rpcProxy.removePathBasedCacheDirective(null,
-          RemovePathBasedCacheDirectiveRequestProto.newBuilder().
+      rpcProxy.removeCacheDirective(null,
+          RemoveCacheDirectiveRequestProto.newBuilder().
               setId(id).build());
               setId(id).build());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
   }
   }
 
 
-  private static class BatchedPathBasedCacheEntries
-      implements BatchedEntries<PathBasedCacheDirective> {
-    private ListPathBasedCacheDirectivesResponseProto response;
+  private static class BatchedCacheEntries
+      implements BatchedEntries<CacheDirectiveEntry> {
+    private ListCacheDirectivesResponseProto response;
 
 
-    BatchedPathBasedCacheEntries(
-        ListPathBasedCacheDirectivesResponseProto response) {
+    BatchedCacheEntries(
+        ListCacheDirectivesResponseProto response) {
       this.response = response;
       this.response = response;
     }
     }
 
 
     @Override
     @Override
-    public PathBasedCacheDirective get(int i) {
-      return PBHelper.convert(response.getElements(i).getInfo());
+    public CacheDirectiveEntry get(int i) {
+      return PBHelper.convert(response.getElements(i));
     }
     }
 
 
     @Override
     @Override
@@ -1066,46 +1064,46 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
     }
   }
   }
 
 
-  private class PathBasedCacheEntriesIterator
-      extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
-    private final PathBasedCacheDirective filter;
+  private class CacheEntriesIterator
+    extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
+      private final CacheDirectiveInfo filter;
 
 
-    public PathBasedCacheEntriesIterator(long prevKey,
-        PathBasedCacheDirective filter) {
+    public CacheEntriesIterator(long prevKey,
+        CacheDirectiveInfo filter) {
       super(prevKey);
       super(prevKey);
       this.filter = filter;
       this.filter = filter;
     }
     }
 
 
     @Override
     @Override
-    public BatchedEntries<PathBasedCacheDirective> makeRequest(
+    public BatchedEntries<CacheDirectiveEntry> makeRequest(
         Long nextKey) throws IOException {
         Long nextKey) throws IOException {
-      ListPathBasedCacheDirectivesResponseProto response;
+      ListCacheDirectivesResponseProto response;
       try {
       try {
-        response = rpcProxy.listPathBasedCacheDirectives(null,
-            ListPathBasedCacheDirectivesRequestProto.newBuilder().
+        response = rpcProxy.listCacheDirectives(null,
+            ListCacheDirectivesRequestProto.newBuilder().
                 setPrevId(nextKey).
                 setPrevId(nextKey).
                 setFilter(PBHelper.convert(filter)).
                 setFilter(PBHelper.convert(filter)).
                 build());
                 build());
       } catch (ServiceException e) {
       } catch (ServiceException e) {
         throw ProtobufHelper.getRemoteException(e);
         throw ProtobufHelper.getRemoteException(e);
       }
       }
-      return new BatchedPathBasedCacheEntries(response);
+      return new BatchedCacheEntries(response);
     }
     }
 
 
     @Override
     @Override
-    public Long elementToPrevKey(PathBasedCacheDirective element) {
-      return element.getId();
+    public Long elementToPrevKey(CacheDirectiveEntry element) {
+      return element.getInfo().getId();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public RemoteIterator<PathBasedCacheDirective>
-      listPathBasedCacheDirectives(long prevId,
-          PathBasedCacheDirective filter) throws IOException {
+  public RemoteIterator<CacheDirectiveEntry>
+      listCacheDirectives(long prevId,
+          CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
     if (filter == null) {
-      filter = new PathBasedCacheDirective.Builder().build();
+      filter = new CacheDirectiveInfo.Builder().build();
     }
     }
-    return new PathBasedCacheEntriesIterator(prevId, filter);
+    return new CacheEntriesIterator(prevId, filter);
   }
   }
 
 
   @Override
   @Override
@@ -1143,11 +1141,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
     }
   }
   }
 
 
-  private static class BatchedPathDirectiveEntries
-      implements BatchedEntries<CachePoolInfo> {
-    private final ListCachePoolsResponseProto proto;
+  private static class BatchedCachePoolInfo
+    implements BatchedEntries<CachePoolInfo> {
+      private final ListCachePoolsResponseProto proto;
     
     
-    public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
+    public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) {
       this.proto = proto;
       this.proto = proto;
     }
     }
       
       
@@ -1179,7 +1177,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
     public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
         throws IOException {
         throws IOException {
       try {
       try {
-        return new BatchedPathDirectiveEntries(
+        return new BatchedCachePoolInfo(
             rpcProxy.listCachePools(null, 
             rpcProxy.listCachePools(null, 
               ListCachePoolsRequestProto.newBuilder().
               ListCachePoolsRequestProto.newBuilder().
                 setPrevPoolName(prevKey).build()));
                 setPrevPoolName(prevKey).build()));

+ 54 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -37,11 +37,15 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -57,12 +61,15 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
@@ -1675,29 +1682,29 @@ public class PBHelper {
     return DataChecksum.Type.valueOf(type.getNumber());
     return DataChecksum.Type.valueOf(type.getNumber());
   }
   }
 
 
-  public static PathBasedCacheDirectiveInfoProto convert
-      (PathBasedCacheDirective directive) {
-    PathBasedCacheDirectiveInfoProto.Builder builder = 
-        PathBasedCacheDirectiveInfoProto.newBuilder();
-    if (directive.getId() != null) {
-      builder.setId(directive.getId());
+  public static CacheDirectiveInfoProto convert
+      (CacheDirectiveInfo info) {
+    CacheDirectiveInfoProto.Builder builder = 
+        CacheDirectiveInfoProto.newBuilder();
+    if (info.getId() != null) {
+      builder.setId(info.getId());
     }
     }
-    if (directive.getPath() != null) {
-      builder.setPath(directive.getPath().toUri().getPath());
+    if (info.getPath() != null) {
+      builder.setPath(info.getPath().toUri().getPath());
     }
     }
-    if (directive.getReplication() != null) {
-      builder.setReplication(directive.getReplication());
+    if (info.getReplication() != null) {
+      builder.setReplication(info.getReplication());
     }
     }
-    if (directive.getPool() != null) {
-      builder.setPool(directive.getPool());
+    if (info.getPool() != null) {
+      builder.setPool(info.getPool());
     }
     }
     return builder.build();
     return builder.build();
   }
   }
 
 
-  public static PathBasedCacheDirective convert
-      (PathBasedCacheDirectiveInfoProto proto) {
-    PathBasedCacheDirective.Builder builder =
-        new PathBasedCacheDirective.Builder();
+  public static CacheDirectiveInfo convert
+      (CacheDirectiveInfoProto proto) {
+    CacheDirectiveInfo.Builder builder =
+        new CacheDirectiveInfo.Builder();
     if (proto.hasId()) {
     if (proto.hasId()) {
       builder.setId(proto.getId());
       builder.setId(proto.getId());
     }
     }
@@ -1714,6 +1721,37 @@ public class PBHelper {
     return builder.build();
     return builder.build();
   }
   }
   
   
+  public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
+    CacheDirectiveStatsProto.Builder builder = 
+        CacheDirectiveStatsProto.newBuilder();
+    builder.setBytesNeeded(stats.getBytesNeeded());
+    builder.setBytesCached(stats.getBytesCached());
+    builder.setFilesAffected(stats.getFilesAffected());
+    return builder.build();
+  }
+  
+  public static CacheDirectiveStats convert(CacheDirectiveStatsProto proto) {
+    CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
+    builder.setBytesNeeded(proto.getBytesNeeded());
+    builder.setBytesCached(proto.getBytesCached());
+    builder.setFilesAffected(proto.getFilesAffected());
+    return builder.build();
+  }
+
+  public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
+    CacheDirectiveEntryProto.Builder builder = 
+        CacheDirectiveEntryProto.newBuilder();
+    builder.setInfo(PBHelper.convert(entry.getInfo()));
+    builder.setStats(PBHelper.convert(entry.getStats()));
+    return builder.build();
+  }
+  
+  public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
+    CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
+    CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
+    return new CacheDirectiveEntry(info, stats);
+  }
+
   public static CachePoolInfoProto convert(CachePoolInfo info) {
   public static CachePoolInfoProto convert(CachePoolInfo info) {
     CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
     CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
     builder.setPoolName(info.getPoolName());
     builder.setPoolName(info.getPoolName());

+ 36 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

@@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirective;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -197,12 +197,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     scannedBlocks = 0;
     scannedBlocks = 0;
     namesystem.writeLock();
     namesystem.writeLock();
     try {
     try {
-      rescanPathBasedCacheEntries();
-    } finally {
-      namesystem.writeUnlock();
-    }
-    namesystem.writeLock();
-    try {
+      rescanCacheDirectives();
       rescanCachedBlockMap();
       rescanCachedBlockMap();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
     } finally {
     } finally {
@@ -211,15 +206,18 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
   }
   }
 
 
   /**
   /**
-   * Scan all PathBasedCacheEntries.  Use the information to figure out
+   * Scan all CacheDirectives.  Use the information to figure out
    * what cache replication factor each block should have.
    * what cache replication factor each block should have.
    *
    *
    * @param mark       Whether the current scan is setting or clearing the mark
    * @param mark       Whether the current scan is setting or clearing the mark
    */
    */
-  private void rescanPathBasedCacheEntries() {
+  private void rescanCacheDirectives() {
     FSDirectory fsDir = namesystem.getFSDirectory();
     FSDirectory fsDir = namesystem.getFSDirectory();
-    for (PathBasedCacheEntry pce : cacheManager.getEntriesById().values()) {
+    for (CacheDirective pce : cacheManager.getEntriesById().values()) {
       scannedDirectives++;
       scannedDirectives++;
+      pce.clearBytesNeeded();
+      pce.clearBytesCached();
+      pce.clearFilesAffected();
       String path = pce.getPath();
       String path = pce.getPath();
       INode node;
       INode node;
       try {
       try {
@@ -252,18 +250,24 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
   }
   }
   
   
   /**
   /**
-   * Apply a PathBasedCacheEntry to a file.
+   * Apply a CacheDirective to a file.
    *
    *
-   * @param pce       The PathBasedCacheEntry to apply.
+   * @param pce       The CacheDirective to apply.
    * @param file      The file.
    * @param file      The file.
    */
    */
-  private void rescanFile(PathBasedCacheEntry pce, INodeFile file) {
+  private void rescanFile(CacheDirective pce, INodeFile file) {
+    pce.incrementFilesAffected();
     BlockInfo[] blockInfos = file.getBlocks();
     BlockInfo[] blockInfos = file.getBlocks();
+    long cachedTotal = 0;
+    long neededTotal = 0;
     for (BlockInfo blockInfo : blockInfos) {
     for (BlockInfo blockInfo : blockInfos) {
       if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
       if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
         // We don't try to cache blocks that are under construction.
         // We don't try to cache blocks that are under construction.
         continue;
         continue;
       }
       }
+      long neededByBlock = 
+         pce.getReplication() * blockInfo.getNumBytes();
+      neededTotal += neededByBlock;
       Block block = new Block(blockInfo.getBlockId());
       Block block = new Block(blockInfo.getBlockId());
       CachedBlock ncblock = new CachedBlock(block.getBlockId(),
       CachedBlock ncblock = new CachedBlock(block.getBlockId(),
           pce.getReplication(), mark);
           pce.getReplication(), mark);
@@ -271,17 +275,35 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
       if (ocblock == null) {
       if (ocblock == null) {
         cachedBlocks.put(ncblock);
         cachedBlocks.put(ncblock);
       } else {
       } else {
+        // Update bytesUsed using the current replication levels.
+        // Assumptions: we assume that all the blocks are the same length
+        // on each datanode.  We can assume this because we're only caching
+        // blocks in state COMMITTED.
+        // Note that if two directives are caching the same block(s), they will
+        // both get them added to their bytesCached.
+        List<DatanodeDescriptor> cachedOn =
+            ocblock.getDatanodes(Type.CACHED);
+        long cachedByBlock = Math.min(cachedOn.size(), pce.getReplication()) *
+            blockInfo.getNumBytes();
+        cachedTotal += cachedByBlock;
+
         if (mark != ocblock.getMark()) {
         if (mark != ocblock.getMark()) {
           // Mark hasn't been set in this scan, so update replication and mark.
           // Mark hasn't been set in this scan, so update replication and mark.
           ocblock.setReplicationAndMark(pce.getReplication(), mark);
           ocblock.setReplicationAndMark(pce.getReplication(), mark);
         } else {
         } else {
           // Mark already set in this scan.  Set replication to highest value in
           // Mark already set in this scan.  Set replication to highest value in
-          // any PathBasedCacheEntry that covers this file.
+          // any CacheDirective that covers this file.
           ocblock.setReplicationAndMark((short)Math.max(
           ocblock.setReplicationAndMark((short)Math.max(
               pce.getReplication(), ocblock.getReplication()), mark);
               pce.getReplication(), ocblock.getReplication()), mark);
         }
         }
       }
       }
     }
     }
+    pce.addBytesNeeded(neededTotal);
+    pce.addBytesCached(cachedTotal);
+    if (LOG.isTraceEnabled()) {
+      LOG.debug("Directive " + pce.getEntryId() + " is caching " +
+          file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
+    }
   }
   }
 
 
   /**
   /**

+ 21 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -529,6 +529,21 @@ class BPOfferService {
     }
     }
   }
   }
 
 
+  private String blockIdArrayToString(long ids[]) {
+    long maxNumberOfBlocksToLog = dn.getMaxNumberOfBlocksToLog();
+    StringBuilder bld = new StringBuilder();
+    String prefix = "";
+    for (int i = 0; i < ids.length; i++) {
+      if (i >= maxNumberOfBlocksToLog) {
+        bld.append("...");
+        break;
+      }
+      bld.append(prefix).append(ids[i]);
+      prefix = ", ";
+    }
+    return bld.toString();
+  }
+
   /**
   /**
    * This method should handle all commands from Active namenode except
    * This method should handle all commands from Active namenode except
    * DNA_REGISTER which should be handled earlier itself.
    * DNA_REGISTER which should be handled earlier itself.
@@ -569,12 +584,16 @@ class BPOfferService {
       dn.metrics.incrBlocksRemoved(toDelete.length);
       dn.metrics.incrBlocksRemoved(toDelete.length);
       break;
       break;
     case DatanodeProtocol.DNA_CACHE:
     case DatanodeProtocol.DNA_CACHE:
-      LOG.info("DatanodeCommand action: DNA_CACHE");
+      LOG.info("DatanodeCommand action: DNA_CACHE for " +
+        blockIdCmd.getBlockPoolId() + " of [" +
+          blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
       dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
       dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
       dn.metrics.incrBlocksCached(blockIdCmd.getBlockIds().length);
       dn.metrics.incrBlocksCached(blockIdCmd.getBlockIds().length);
       break;
       break;
     case DatanodeProtocol.DNA_UNCACHE:
     case DatanodeProtocol.DNA_UNCACHE:
-      LOG.info("DatanodeCommand action: DNA_UNCACHE");
+      LOG.info("DatanodeCommand action: DNA_UNCACHE for " +
+        blockIdCmd.getBlockPoolId() + " of [" +
+          blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
       dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
       dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
       dn.metrics.incrBlocksUncached(blockIdCmd.getBlockIds().length);
       dn.metrics.incrBlocksUncached(blockIdCmd.getBlockIds().length);
       break;
       break;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -532,7 +532,7 @@ class BPServiceActor implements Runnable {
       long sendCost = sendTime - createTime;
       long sendCost = sendTime - createTime;
       dn.getMetrics().addCacheReport(sendCost);
       dn.getMetrics().addCacheReport(sendCost);
       LOG.info("CacheReport of " + blockIds.size()
       LOG.info("CacheReport of " + blockIds.size()
-          + " blocks took " + createCost + " msec to generate and "
+          + " block(s) took " + createCost + " msec to generate and "
           + sendCost + " msecs for RPC and NN processing");
           + sendCost + " msecs for RPC and NN processing");
     }
     }
     return cmd;
     return cmd;

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -206,6 +206,7 @@ public class DataNode extends Configured
   private SecureResources secureResources = null;
   private SecureResources secureResources = null;
   private AbstractList<StorageLocation> dataDirs;
   private AbstractList<StorageLocation> dataDirs;
   private Configuration conf;
   private Configuration conf;
+  private final long maxNumberOfBlocksToLog;
 
 
   private final List<String> usersWithLocalPathAccess;
   private final List<String> usersWithLocalPathAccess;
   private boolean connectToDnViaHostname;
   private boolean connectToDnViaHostname;
@@ -221,6 +222,8 @@ public class DataNode extends Configured
            final AbstractList<StorageLocation> dataDirs,
            final AbstractList<StorageLocation> dataDirs,
            final SecureResources resources) throws IOException {
            final SecureResources resources) throws IOException {
     super(conf);
     super(conf);
+    this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
+        DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
 
 
     this.usersWithLocalPathAccess = Arrays.asList(
     this.usersWithLocalPathAccess = Arrays.asList(
         conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
         conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
@@ -1010,6 +1013,10 @@ public class DataNode extends Configured
     }
     }
   }
   }
 
 
+  public long getMaxNumberOfBlocksToLog() {
+    return maxNumberOfBlocksToLog;
+  }
+
   @Override
   @Override
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
       Token<BlockTokenIdentifier> token) throws IOException {
       Token<BlockTokenIdentifier> token) throws IOException {

+ 50 - 49
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -48,11 +48,12 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirective;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
@@ -100,11 +101,11 @@ public final class CacheManager {
   /**
   /**
    * Cache entries, sorted by ID.
    * Cache entries, sorted by ID.
    *
    *
-   * listPathBasedCacheDirectives relies on the ordering of elements in this map
+   * listCacheDirectives relies on the ordering of elements in this map
    * to track what has already been listed by the client.
    * to track what has already been listed by the client.
    */
    */
-  private final TreeMap<Long, PathBasedCacheEntry> entriesById =
-      new TreeMap<Long, PathBasedCacheEntry>();
+  private final TreeMap<Long, CacheDirective> entriesById =
+      new TreeMap<Long, CacheDirective>();
 
 
   /**
   /**
    * The entry ID to use for a new entry.  Entry IDs always increase, and are
    * The entry ID to use for a new entry.  Entry IDs always increase, and are
@@ -115,8 +116,8 @@ public final class CacheManager {
   /**
   /**
    * Cache entries, sorted by path
    * Cache entries, sorted by path
    */
    */
-  private final TreeMap<String, List<PathBasedCacheEntry>> entriesByPath =
-      new TreeMap<String, List<PathBasedCacheEntry>>();
+  private final TreeMap<String, List<CacheDirective>> entriesByPath =
+      new TreeMap<String, List<CacheDirective>>();
 
 
   /**
   /**
    * Cache pools, sorted by name.
    * Cache pools, sorted by name.
@@ -236,7 +237,7 @@ public final class CacheManager {
     return active;
     return active;
   }
   }
 
 
-  public TreeMap<Long, PathBasedCacheEntry> getEntriesById() {
+  public TreeMap<Long, CacheDirective> getEntriesById() {
     assert namesystem.hasReadLock();
     assert namesystem.hasReadLock();
     return entriesById;
     return entriesById;
   }
   }
@@ -264,7 +265,7 @@ public final class CacheManager {
     }
     }
   }
   }
 
 
-  private static String validatePoolName(PathBasedCacheDirective directive)
+  private static String validatePoolName(CacheDirectiveInfo directive)
       throws InvalidRequestException {
       throws InvalidRequestException {
     String pool = directive.getPool();
     String pool = directive.getPool();
     if (pool == null) {
     if (pool == null) {
@@ -276,7 +277,7 @@ public final class CacheManager {
     return pool;
     return pool;
   }
   }
 
 
-  private static String validatePath(PathBasedCacheDirective directive)
+  private static String validatePath(CacheDirectiveInfo directive)
       throws InvalidRequestException {
       throws InvalidRequestException {
     if (directive.getPath() == null) {
     if (directive.getPath() == null) {
       throw new InvalidRequestException("No path specified.");
       throw new InvalidRequestException("No path specified.");
@@ -288,7 +289,7 @@ public final class CacheManager {
     return path;
     return path;
   }
   }
 
 
-  private static short validateReplication(PathBasedCacheDirective directive,
+  private static short validateReplication(CacheDirectiveInfo directive,
       short defaultValue) throws InvalidRequestException {
       short defaultValue) throws InvalidRequestException {
     short repl = (directive.getReplication() != null)
     short repl = (directive.getReplication() != null)
         ? directive.getReplication() : defaultValue;
         ? directive.getReplication() : defaultValue;
@@ -300,16 +301,16 @@ public final class CacheManager {
   }
   }
 
 
   /**
   /**
-   * Get a PathBasedCacheEntry by ID, validating the ID and that the entry
+   * Get a CacheDirective by ID, validating the ID and that the entry
    * exists.
    * exists.
    */
    */
-  private PathBasedCacheEntry getById(long id) throws InvalidRequestException {
+  private CacheDirective getById(long id) throws InvalidRequestException {
     // Check for invalid IDs.
     // Check for invalid IDs.
     if (id <= 0) {
     if (id <= 0) {
       throw new InvalidRequestException("Invalid negative ID.");
       throw new InvalidRequestException("Invalid negative ID.");
     }
     }
     // Find the entry.
     // Find the entry.
-    PathBasedCacheEntry entry = entriesById.get(id);
+    CacheDirective entry = entriesById.get(id);
     if (entry == null) {
     if (entry == null) {
       throw new InvalidRequestException("No directive with ID " + id
       throw new InvalidRequestException("No directive with ID " + id
           + " found.");
           + " found.");
@@ -331,22 +332,22 @@ public final class CacheManager {
 
 
   // RPC handlers
   // RPC handlers
 
 
-  private void addInternal(PathBasedCacheEntry entry) {
+  private void addInternal(CacheDirective entry) {
     entriesById.put(entry.getEntryId(), entry);
     entriesById.put(entry.getEntryId(), entry);
     String path = entry.getPath();
     String path = entry.getPath();
-    List<PathBasedCacheEntry> entryList = entriesByPath.get(path);
+    List<CacheDirective> entryList = entriesByPath.get(path);
     if (entryList == null) {
     if (entryList == null) {
-      entryList = new ArrayList<PathBasedCacheEntry>(1);
+      entryList = new ArrayList<CacheDirective>(1);
       entriesByPath.put(path, entryList);
       entriesByPath.put(path, entryList);
     }
     }
     entryList.add(entry);
     entryList.add(entry);
   }
   }
 
 
-  public PathBasedCacheDirective addDirective(
-      PathBasedCacheDirective directive, FSPermissionChecker pc)
+  public CacheDirectiveInfo addDirective(
+      CacheDirectiveInfo directive, FSPermissionChecker pc)
       throws IOException {
       throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
-    PathBasedCacheEntry entry;
+    CacheDirective entry;
     try {
     try {
       CachePool pool = getCachePool(validatePoolName(directive));
       CachePool pool = getCachePool(validatePoolName(directive));
       checkWritePermission(pc, pool);
       checkWritePermission(pc, pool);
@@ -372,7 +373,7 @@ public final class CacheManager {
         // Add a new entry with the next available ID.
         // Add a new entry with the next available ID.
         id = getNextEntryId();
         id = getNextEntryId();
       }
       }
-      entry = new PathBasedCacheEntry(id, path, replication, pool);
+      entry = new CacheDirective(id, path, replication, pool);
       addInternal(entry);
       addInternal(entry);
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.warn("addDirective of " + directive + " failed: ", e);
       LOG.warn("addDirective of " + directive + " failed: ", e);
@@ -385,7 +386,7 @@ public final class CacheManager {
     return entry.toDirective();
     return entry.toDirective();
   }
   }
 
 
-  public void modifyDirective(PathBasedCacheDirective directive,
+  public void modifyDirective(CacheDirectiveInfo directive,
       FSPermissionChecker pc) throws IOException {
       FSPermissionChecker pc) throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     String idString =
     String idString =
@@ -397,7 +398,7 @@ public final class CacheManager {
       if (id == null) {
       if (id == null) {
         throw new InvalidRequestException("Must supply an ID.");
         throw new InvalidRequestException("Must supply an ID.");
       }
       }
-      PathBasedCacheEntry prevEntry = getById(id);
+      CacheDirective prevEntry = getById(id);
       checkWritePermission(pc, prevEntry.getPool());
       checkWritePermission(pc, prevEntry.getPool());
       String path = prevEntry.getPath();
       String path = prevEntry.getPath();
       if (directive.getPath() != null) {
       if (directive.getPath() != null) {
@@ -413,8 +414,8 @@ public final class CacheManager {
         checkWritePermission(pc, pool);
         checkWritePermission(pc, pool);
       }
       }
       removeInternal(prevEntry);
       removeInternal(prevEntry);
-      PathBasedCacheEntry newEntry =
-          new PathBasedCacheEntry(id, path, replication, pool);
+      CacheDirective newEntry =
+          new CacheDirective(id, path, replication, pool);
       addInternal(newEntry);
       addInternal(newEntry);
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.warn("modifyDirective of " + idString + " failed: ", e);
       LOG.warn("modifyDirective of " + idString + " failed: ", e);
@@ -424,12 +425,12 @@ public final class CacheManager {
         directive + ".");
         directive + ".");
   }
   }
 
 
-  public void removeInternal(PathBasedCacheEntry existing)
+  public void removeInternal(CacheDirective existing)
       throws InvalidRequestException {
       throws InvalidRequestException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     // Remove the corresponding entry in entriesByPath.
     // Remove the corresponding entry in entriesByPath.
     String path = existing.getPath();
     String path = existing.getPath();
-    List<PathBasedCacheEntry> entries = entriesByPath.get(path);
+    List<CacheDirective> entries = entriesByPath.get(path);
     if (entries == null || !entries.remove(existing)) {
     if (entries == null || !entries.remove(existing)) {
       throw new InvalidRequestException("Failed to locate entry " +
       throw new InvalidRequestException("Failed to locate entry " +
           existing.getEntryId() + " by path " + existing.getPath());
           existing.getEntryId() + " by path " + existing.getPath());
@@ -444,7 +445,7 @@ public final class CacheManager {
       throws IOException {
       throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     try {
     try {
-      PathBasedCacheEntry existing = getById(id);
+      CacheDirective existing = getById(id);
       checkWritePermission(pc, existing.getPool());
       checkWritePermission(pc, existing.getPool());
       removeInternal(existing);
       removeInternal(existing);
     } catch (IOException e) {
     } catch (IOException e) {
@@ -457,9 +458,9 @@ public final class CacheManager {
     LOG.info("removeDirective of " + id + " successful.");
     LOG.info("removeDirective of " + id + " successful.");
   }
   }
 
 
-  public BatchedListEntries<PathBasedCacheDirective> 
-        listPathBasedCacheDirectives(long prevId,
-            PathBasedCacheDirective filter,
+  public BatchedListEntries<CacheDirectiveEntry> 
+        listCacheDirectives(long prevId,
+            CacheDirectiveInfo filter,
             FSPermissionChecker pc) throws IOException {
             FSPermissionChecker pc) throws IOException {
     assert namesystem.hasReadLock();
     assert namesystem.hasReadLock();
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
@@ -473,23 +474,23 @@ public final class CacheManager {
     if (filter.getReplication() != null) {
     if (filter.getReplication() != null) {
       throw new IOException("Filtering by replication is unsupported.");
       throw new IOException("Filtering by replication is unsupported.");
     }
     }
-    ArrayList<PathBasedCacheDirective> replies =
-        new ArrayList<PathBasedCacheDirective>(NUM_PRE_ALLOCATED_ENTRIES);
+    ArrayList<CacheDirectiveEntry> replies =
+        new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
     int numReplies = 0;
     int numReplies = 0;
-    SortedMap<Long, PathBasedCacheEntry> tailMap =
+    SortedMap<Long, CacheDirective> tailMap =
       entriesById.tailMap(prevId + 1);
       entriesById.tailMap(prevId + 1);
-    for (Entry<Long, PathBasedCacheEntry> cur : tailMap.entrySet()) {
+    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
       if (numReplies >= maxListCacheDirectivesNumResponses) {
       if (numReplies >= maxListCacheDirectivesNumResponses) {
-        return new BatchedListEntries<PathBasedCacheDirective>(replies, true);
+        return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
       }
       }
-      PathBasedCacheEntry curEntry = cur.getValue();
-      PathBasedCacheDirective directive = cur.getValue().toDirective();
+      CacheDirective curEntry = cur.getValue();
+      CacheDirectiveInfo info = cur.getValue().toDirective();
       if (filter.getPool() != null && 
       if (filter.getPool() != null && 
-          !directive.getPool().equals(filter.getPool())) {
+          !info.getPool().equals(filter.getPool())) {
         continue;
         continue;
       }
       }
       if (filterPath != null &&
       if (filterPath != null &&
-          !directive.getPath().toUri().getPath().equals(filterPath)) {
+          !info.getPath().toUri().getPath().equals(filterPath)) {
         continue;
         continue;
       }
       }
       boolean hasPermission = true;
       boolean hasPermission = true;
@@ -501,11 +502,11 @@ public final class CacheManager {
         }
         }
       }
       }
       if (hasPermission) {
       if (hasPermission) {
-        replies.add(cur.getValue().toDirective());
+        replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
         numReplies++;
         numReplies++;
       }
       }
     }
     }
-    return new BatchedListEntries<PathBasedCacheDirective>(replies, false);
+    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
   }
   }
 
 
   /**
   /**
@@ -602,10 +603,10 @@ public final class CacheManager {
     // Remove entries using this pool
     // Remove entries using this pool
     // TODO: could optimize this somewhat to avoid the need to iterate
     // TODO: could optimize this somewhat to avoid the need to iterate
     // over all entries in entriesById
     // over all entries in entriesById
-    Iterator<Entry<Long, PathBasedCacheEntry>> iter = 
+    Iterator<Entry<Long, CacheDirective>> iter = 
         entriesById.entrySet().iterator();
         entriesById.entrySet().iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
-      Entry<Long, PathBasedCacheEntry> entry = iter.next();
+      Entry<Long, CacheDirective> entry = iter.next();
       if (entry.getValue().getPool() == pool) {
       if (entry.getValue().getPool() == pool) {
         entriesByPath.remove(entry.getValue().getPath());
         entriesByPath.remove(entry.getValue().getPath());
         iter.remove();
         iter.remove();
@@ -789,7 +790,7 @@ public final class CacheManager {
     prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size());
     prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size());
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
     out.writeInt(entriesById.size());
     out.writeInt(entriesById.size());
-    for (PathBasedCacheEntry entry: entriesById.values()) {
+    for (CacheDirective entry: entriesById.values()) {
       out.writeLong(entry.getEntryId());
       out.writeLong(entry.getEntryId());
       Text.writeString(out, entry.getPath());
       Text.writeString(out, entry.getPath());
       out.writeShort(entry.getReplication());
       out.writeShort(entry.getReplication());
@@ -838,15 +839,15 @@ public final class CacheManager {
         throw new IOException("Entry refers to pool " + poolName +
         throw new IOException("Entry refers to pool " + poolName +
             ", which does not exist.");
             ", which does not exist.");
       }
       }
-      PathBasedCacheEntry entry =
-          new PathBasedCacheEntry(entryId, path, replication, pool);
+      CacheDirective entry =
+          new CacheDirective(entryId, path, replication, pool);
       if (entriesById.put(entry.getEntryId(), entry) != null) {
       if (entriesById.put(entry.getEntryId(), entry) != null) {
         throw new IOException("An entry with ID " + entry.getEntryId() +
         throw new IOException("An entry with ID " + entry.getEntryId() +
             " already exists");
             " already exists");
       }
       }
-      List<PathBasedCacheEntry> entries = entriesByPath.get(entry.getPath());
+      List<CacheDirective> entries = entriesByPath.get(entry.getPath());
       if (entries == null) {
       if (entries == null) {
-        entries = new LinkedList<PathBasedCacheEntry>();
+        entries = new LinkedList<CacheDirective>();
         entriesByPath.put(entry.getPath(), entries);
         entriesByPath.put(entry.getPath(), entries);
       }
       }
       entries.add(entry);
       entries.add(entry);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java

@@ -47,7 +47,7 @@ public enum Content {
     }
     }
 
 
     private Counts() {
     private Counts() {
-      super(Content.values());
+      super(Content.class);
     }
     }
   }
   }
 
 

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -2407,8 +2407,9 @@ public class FSDirectory implements Closeable {
     if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
     if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
       throw new IllegalArgumentException("Cannot clear namespace quota on root.");
       throw new IllegalArgumentException("Cannot clear namespace quota on root.");
     } else { // a directory inode
     } else { // a directory inode
-      long oldNsQuota = dirNode.getNsQuota();
-      long oldDsQuota = dirNode.getDsQuota();
+      final Quota.Counts oldQuota = dirNode.getQuotaCounts();
+      final long oldNsQuota = oldQuota.get(Quota.NAMESPACE);
+      final long oldDsQuota = oldQuota.get(Quota.DISKSPACE);
       if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
       if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
         nsQuota = oldNsQuota;
         nsQuota = oldNsQuota;
       }
       }
@@ -2460,8 +2461,9 @@ public class FSDirectory implements Closeable {
     try {
     try {
       INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
       INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
       if (dir != null) {
       if (dir != null) {
-        fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), 
-                                         dir.getDsQuota());
+        final Quota.Counts q = dir.getQuotaCounts();
+        fsImage.getEditLog().logSetQuota(src,
+            q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();

+ 14 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -38,15 +38,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
 import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
@@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -954,27 +954,27 @@ public class FSEditLog implements LogsPurgeable {
     logEdit(op);
     logEdit(op);
   }
   }
   
   
-  void logAddPathBasedCacheDirective(PathBasedCacheDirective directive,
+  void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
       boolean toLogRpcIds) {
       boolean toLogRpcIds) {
-    AddPathBasedCacheDirectiveOp op =
-        AddPathBasedCacheDirectiveOp.getInstance(cache.get())
+    AddCacheDirectiveInfoOp op =
+        AddCacheDirectiveInfoOp.getInstance(cache.get())
             .setDirective(directive);
             .setDirective(directive);
     logRpcIds(op, toLogRpcIds);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
     logEdit(op);
   }
   }
 
 
-  void logModifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive, boolean toLogRpcIds) {
-    ModifyPathBasedCacheDirectiveOp op =
-        ModifyPathBasedCacheDirectiveOp.getInstance(
+  void logModifyCacheDirectiveInfo(
+      CacheDirectiveInfo directive, boolean toLogRpcIds) {
+    ModifyCacheDirectiveInfoOp op =
+        ModifyCacheDirectiveInfoOp.getInstance(
             cache.get()).setDirective(directive);
             cache.get()).setDirective(directive);
     logRpcIds(op, toLogRpcIds);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
     logEdit(op);
   }
   }
 
 
-  void logRemovePathBasedCacheDirective(Long id, boolean toLogRpcIds) {
-    RemovePathBasedCacheDirectiveOp op =
-        RemovePathBasedCacheDirectiveOp.getInstance(cache.get()).setId(id);
+  void logRemoveCacheDirectiveInfo(Long id, boolean toLogRpcIds) {
+    RemoveCacheDirectiveInfoOp op =
+        RemoveCacheDirectiveInfoOp.getInstance(cache.get()).setId(id);
     logRpcIds(op, toLogRpcIds);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
     logEdit(op);
   }
   }

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
@@ -56,10 +56,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -639,8 +639,8 @@ public class FSEditLogLoader {
       break;
       break;
     }
     }
     case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
     case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
-      AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op;
-      PathBasedCacheDirective result = fsNamesys.
+      AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
+      CacheDirectiveInfo result = fsNamesys.
           getCacheManager().addDirective(addOp.directive, null);
           getCacheManager().addDirective(addOp.directive, null);
       if (toAddRetryCache) {
       if (toAddRetryCache) {
         Long id = result.getId();
         Long id = result.getId();
@@ -649,8 +649,8 @@ public class FSEditLogLoader {
       break;
       break;
     }
     }
     case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
     case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
-      ModifyPathBasedCacheDirectiveOp modifyOp =
-          (ModifyPathBasedCacheDirectiveOp) op;
+      ModifyCacheDirectiveInfoOp modifyOp =
+          (ModifyCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().modifyDirective(
       fsNamesys.getCacheManager().modifyDirective(
           modifyOp.directive, null);
           modifyOp.directive, null);
       if (toAddRetryCache) {
       if (toAddRetryCache) {
@@ -659,8 +659,8 @@ public class FSEditLogLoader {
       break;
       break;
     }
     }
     case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
     case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
-      RemovePathBasedCacheDirectiveOp removeOp =
-          (RemovePathBasedCacheDirectiveOp) op;
+      RemoveCacheDirectiveInfoOp removeOp =
+          (RemoveCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
       fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
       if (toAddRetryCache) {
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
         fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);

+ 36 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -86,7 +86,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@@ -166,11 +166,11 @@ public abstract class FSEditLogOp {
       inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
       inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
       inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
       inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
       inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
       inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
-          new AddPathBasedCacheDirectiveOp());
+          new AddCacheDirectiveInfoOp());
       inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
       inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
-          new ModifyPathBasedCacheDirectiveOp());
+          new ModifyCacheDirectiveInfoOp());
       inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
       inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
-          new RemovePathBasedCacheDirectiveOp());
+          new RemoveCacheDirectiveInfoOp());
       inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
       inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
       inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
       inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
       inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
       inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
@@ -2868,22 +2868,22 @@ public abstract class FSEditLogOp {
 
 
   /**
   /**
    * {@literal @AtMostOnce} for
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#addPathBasedCacheDirective}
+   * {@link ClientProtocol#addCacheDirective}
    */
    */
-  static class AddPathBasedCacheDirectiveOp extends FSEditLogOp {
-    PathBasedCacheDirective directive;
+  static class AddCacheDirectiveInfoOp extends FSEditLogOp {
+    CacheDirectiveInfo directive;
 
 
-    public AddPathBasedCacheDirectiveOp() {
+    public AddCacheDirectiveInfoOp() {
       super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
       super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    static AddPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (AddPathBasedCacheDirectiveOp) cache
+    static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (AddCacheDirectiveInfoOp) cache
           .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
           .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    public AddPathBasedCacheDirectiveOp setDirective(
-        PathBasedCacheDirective directive) {
+    public AddCacheDirectiveInfoOp setDirective(
+        CacheDirectiveInfo directive) {
       this.directive = directive;
       this.directive = directive;
       assert(directive.getId() != null);
       assert(directive.getId() != null);
       assert(directive.getPath() != null);
       assert(directive.getPath() != null);
@@ -2898,7 +2898,7 @@ public abstract class FSEditLogOp {
       String path = FSImageSerialization.readString(in);
       String path = FSImageSerialization.readString(in);
       short replication = FSImageSerialization.readShort(in);
       short replication = FSImageSerialization.readShort(in);
       String pool = FSImageSerialization.readString(in);
       String pool = FSImageSerialization.readString(in);
-      directive = new PathBasedCacheDirective.Builder().
+      directive = new CacheDirectiveInfo.Builder().
           setId(id).
           setId(id).
           setPath(new Path(path)).
           setPath(new Path(path)).
           setReplication(replication).
           setReplication(replication).
@@ -2930,7 +2930,7 @@ public abstract class FSEditLogOp {
 
 
     @Override
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
     void fromXml(Stanza st) throws InvalidXmlException {
-      directive = new PathBasedCacheDirective.Builder().
+      directive = new CacheDirectiveInfo.Builder().
           setId(Long.parseLong(st.getValue("ID"))).
           setId(Long.parseLong(st.getValue("ID"))).
           setPath(new Path(st.getValue("PATH"))).
           setPath(new Path(st.getValue("PATH"))).
           setReplication(Short.parseShort(st.getValue("REPLICATION"))).
           setReplication(Short.parseShort(st.getValue("REPLICATION"))).
@@ -2942,7 +2942,7 @@ public abstract class FSEditLogOp {
     @Override
     @Override
     public String toString() {
     public String toString() {
       StringBuilder builder = new StringBuilder();
       StringBuilder builder = new StringBuilder();
-      builder.append("AddPathBasedCacheDirective [");
+      builder.append("AddCacheDirectiveInfo [");
       builder.append("id=" + directive.getId() + ",");
       builder.append("id=" + directive.getId() + ",");
       builder.append("path=" + directive.getPath().toUri().getPath() + ",");
       builder.append("path=" + directive.getPath().toUri().getPath() + ",");
       builder.append("replication=" + directive.getReplication() + ",");
       builder.append("replication=" + directive.getReplication() + ",");
@@ -2955,22 +2955,22 @@ public abstract class FSEditLogOp {
 
 
   /**
   /**
    * {@literal @AtMostOnce} for
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#modifyPathBasedCacheDirective}
+   * {@link ClientProtocol#modifyCacheDirective}
    */
    */
-  static class ModifyPathBasedCacheDirectiveOp extends FSEditLogOp {
-    PathBasedCacheDirective directive;
+  static class ModifyCacheDirectiveInfoOp extends FSEditLogOp {
+    CacheDirectiveInfo directive;
 
 
-    public ModifyPathBasedCacheDirectiveOp() {
+    public ModifyCacheDirectiveInfoOp() {
       super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
       super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    static ModifyPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (ModifyPathBasedCacheDirectiveOp) cache
+    static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (ModifyCacheDirectiveInfoOp) cache
           .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
           .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    public ModifyPathBasedCacheDirectiveOp setDirective(
-        PathBasedCacheDirective directive) {
+    public ModifyCacheDirectiveInfoOp setDirective(
+        CacheDirectiveInfo directive) {
       this.directive = directive;
       this.directive = directive;
       assert(directive.getId() != null);
       assert(directive.getId() != null);
       return this;
       return this;
@@ -2978,8 +2978,8 @@ public abstract class FSEditLogOp {
 
 
     @Override
     @Override
     void readFields(DataInputStream in, int logVersion) throws IOException {
     void readFields(DataInputStream in, int logVersion) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       builder.setId(FSImageSerialization.readLong(in));
       builder.setId(FSImageSerialization.readLong(in));
       byte flags = in.readByte();
       byte flags = in.readByte();
       if ((flags & 0x1) != 0) {
       if ((flags & 0x1) != 0) {
@@ -2993,7 +2993,7 @@ public abstract class FSEditLogOp {
       }
       }
       if ((flags & ~0x7) != 0) {
       if ((flags & ~0x7) != 0) {
         throw new IOException("unknown flags set in " +
         throw new IOException("unknown flags set in " +
-            "ModifyPathBasedCacheDirectiveOp: " + flags);
+            "ModifyCacheDirectiveInfoOp: " + flags);
       }
       }
       this.directive = builder.build();
       this.directive = builder.build();
       readRpcIds(in, logVersion);
       readRpcIds(in, logVersion);
@@ -3041,8 +3041,8 @@ public abstract class FSEditLogOp {
 
 
     @Override
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
     void fromXml(Stanza st) throws InvalidXmlException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       builder.setId(Long.parseLong(st.getValue("ID")));
       builder.setId(Long.parseLong(st.getValue("ID")));
       String path = st.getValueOrNull("PATH");
       String path = st.getValueOrNull("PATH");
       if (path != null) {
       if (path != null) {
@@ -3063,7 +3063,7 @@ public abstract class FSEditLogOp {
     @Override
     @Override
     public String toString() {
     public String toString() {
       StringBuilder builder = new StringBuilder();
       StringBuilder builder = new StringBuilder();
-      builder.append("ModifyPathBasedCacheDirectiveOp[");
+      builder.append("ModifyCacheDirectiveInfoOp[");
       builder.append("id=").append(directive.getId());
       builder.append("id=").append(directive.getId());
       if (directive.getPath() != null) {
       if (directive.getPath() != null) {
         builder.append(",").append("path=").append(directive.getPath());
         builder.append(",").append("path=").append(directive.getPath());
@@ -3083,21 +3083,21 @@ public abstract class FSEditLogOp {
 
 
   /**
   /**
    * {@literal @AtMostOnce} for
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#removePathBasedCacheDirective}
+   * {@link ClientProtocol#removeCacheDirective}
    */
    */
-  static class RemovePathBasedCacheDirectiveOp extends FSEditLogOp {
+  static class RemoveCacheDirectiveInfoOp extends FSEditLogOp {
     long id;
     long id;
 
 
-    public RemovePathBasedCacheDirectiveOp() {
+    public RemoveCacheDirectiveInfoOp() {
       super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
       super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    static RemovePathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (RemovePathBasedCacheDirectiveOp) cache
+    static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (RemoveCacheDirectiveInfoOp) cache
           .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
           .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
     }
     }
 
 
-    public RemovePathBasedCacheDirectiveOp setId(long id) {
+    public RemoveCacheDirectiveInfoOp setId(long id) {
       this.id = id;
       this.id = id;
       return this;
       return this;
     }
     }
@@ -3129,7 +3129,7 @@ public abstract class FSEditLogOp {
     @Override
     @Override
     public String toString() {
     public String toString() {
       StringBuilder builder = new StringBuilder();
       StringBuilder builder = new StringBuilder();
-      builder.append("RemovePathBasedCacheDirective [");
+      builder.append("RemoveCacheDirectiveInfo [");
       builder.append("id=" + Long.toString(id));
       builder.append("id=" + Long.toString(id));
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       builder.append("]");
       builder.append("]");

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -777,18 +777,22 @@ public class FSImage implements Closeable {
       
       
     if (dir.isQuotaSet()) {
     if (dir.isQuotaSet()) {
       // check if quota is violated. It indicates a software bug.
       // check if quota is violated. It indicates a software bug.
+      final Quota.Counts q = dir.getQuotaCounts();
+
       final long namespace = counts.get(Quota.NAMESPACE) - parentNamespace;
       final long namespace = counts.get(Quota.NAMESPACE) - parentNamespace;
-      if (Quota.isViolated(dir.getNsQuota(), namespace)) {
+      final long nsQuota = q.get(Quota.NAMESPACE);
+      if (Quota.isViolated(nsQuota, namespace)) {
         LOG.error("BUG: Namespace quota violation in image for "
         LOG.error("BUG: Namespace quota violation in image for "
             + dir.getFullPathName()
             + dir.getFullPathName()
-            + " quota = " + dir.getNsQuota() + " < consumed = " + namespace);
+            + " quota = " + nsQuota + " < consumed = " + namespace);
       }
       }
 
 
       final long diskspace = counts.get(Quota.DISKSPACE) - parentDiskspace;
       final long diskspace = counts.get(Quota.DISKSPACE) - parentDiskspace;
-      if (Quota.isViolated(dir.getDsQuota(), diskspace)) {
+      final long dsQuota = q.get(Quota.DISKSPACE);
+      if (Quota.isViolated(dsQuota, diskspace)) {
         LOG.error("BUG: Diskspace quota violation in image for "
         LOG.error("BUG: Diskspace quota violation in image for "
             + dir.getFullPathName()
             + dir.getFullPathName()
-            + " quota = " + dir.getDsQuota() + " < consumed = " + diskspace);
+            + " quota = " + dsQuota + " < consumed = " + diskspace);
       }
       }
 
 
       ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);
       ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -371,8 +371,9 @@ public class FSImageFormat {
 
 
   /** Update the root node's attributes */
   /** Update the root node's attributes */
   private void updateRootAttr(INodeWithAdditionalFields root) {                                                           
   private void updateRootAttr(INodeWithAdditionalFields root) {                                                           
-    long nsQuota = root.getNsQuota();
-    long dsQuota = root.getDsQuota();
+    final Quota.Counts q = root.getQuotaCounts();
+    final long nsQuota = q.get(Quota.NAMESPACE);
+    final long dsQuota = q.get(Quota.DISKSPACE);
     FSDirectory fsDir = namesystem.dir;
     FSDirectory fsDir = namesystem.dir;
     if (nsQuota != -1 || dsQuota != -1) {
     if (nsQuota != -1 || dsQuota != -1) {
       fsDir.rootDir.setQuota(nsQuota, dsQuota);
       fsDir.rootDir.setQuota(nsQuota, dsQuota);

+ 9 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java

@@ -219,6 +219,12 @@ public class FSImageSerialization {
     out.writeLong(file.getPreferredBlockSize());
     out.writeLong(file.getPreferredBlockSize());
   }
   }
 
 
+  private static void writeQuota(Quota.Counts quota, DataOutput out)
+      throws IOException {
+    out.writeLong(quota.get(Quota.NAMESPACE));
+    out.writeLong(quota.get(Quota.DISKSPACE));
+  }
+
   /**
   /**
    * Serialize a {@link INodeDirectory}
    * Serialize a {@link INodeDirectory}
    * @param node The node to write
    * @param node The node to write
@@ -234,8 +240,8 @@ public class FSImageSerialization {
     out.writeLong(0);   // preferred block size
     out.writeLong(0);   // preferred block size
     out.writeInt(-1);   // # of blocks
     out.writeInt(-1);   // # of blocks
 
 
-    out.writeLong(node.getNsQuota());
-    out.writeLong(node.getDsQuota());
+    writeQuota(node.getQuotaCounts(), out);
+
     if (node instanceof INodeDirectorySnapshottable) {
     if (node instanceof INodeDirectorySnapshottable) {
       out.writeBoolean(true);
       out.writeBoolean(true);
     } else {
     } else {
@@ -256,9 +262,7 @@ public class FSImageSerialization {
     writeLocalName(a, out);
     writeLocalName(a, out);
     writePermissionStatus(a, out);
     writePermissionStatus(a, out);
     out.writeLong(a.getModificationTime());
     out.writeLong(a.getModificationTime());
-
-    out.writeLong(a.getNsQuota());
-    out.writeLong(a.getDsQuota());
+    writeQuota(a.getQuotaCounts(), out);
   }
   }
 
 
   /**
   /**

+ 22 - 21
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -152,7 +152,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -7056,8 +7057,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
   }
   }
 
 
-  long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  long addCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
         getPermissionChecker() : null;
@@ -7073,15 +7074,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
       if (isInSafeMode()) {
         throw new SafeModeException(
         throw new SafeModeException(
-            "Cannot add PathBasedCache directive", safeMode);
+            "Cannot add cache directive", safeMode);
       }
       }
       if (directive.getId() != null) {
       if (directive.getId() != null) {
         throw new IOException("addDirective: you cannot specify an ID " +
         throw new IOException("addDirective: you cannot specify an ID " +
             "for this operation.");
             "for this operation.");
       }
       }
-      PathBasedCacheDirective effectiveDirective = 
+      CacheDirectiveInfo effectiveDirective = 
           cacheManager.addDirective(directive, pc);
           cacheManager.addDirective(directive, pc);
-      getEditLog().logAddPathBasedCacheDirective(effectiveDirective,
+      getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
           cacheEntry != null);
           cacheEntry != null);
       result = effectiveDirective.getId();
       result = effectiveDirective.getId();
       success = true;
       success = true;
@@ -7091,15 +7092,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         getEditLog().logSync();
         getEditLog().logSync();
       }
       }
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
+        logAuditEvent(success, "addCacheDirective", null, null, null);
       }
       }
       RetryCache.setState(cacheEntry, success, result);
       RetryCache.setState(cacheEntry, success, result);
     }
     }
     return result;
     return result;
   }
   }
 
 
-  void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
         getPermissionChecker() : null;
@@ -7113,10 +7114,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
       if (isInSafeMode()) {
         throw new SafeModeException(
         throw new SafeModeException(
-            "Cannot add PathBasedCache directive", safeMode);
+            "Cannot add cache directive", safeMode);
       }
       }
       cacheManager.modifyDirective(directive, pc);
       cacheManager.modifyDirective(directive, pc);
-      getEditLog().logModifyPathBasedCacheDirective(directive,
+      getEditLog().logModifyCacheDirectiveInfo(directive,
           cacheEntry != null);
           cacheEntry != null);
       success = true;
       success = true;
     } finally {
     } finally {
@@ -7125,13 +7126,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         getEditLog().logSync();
         getEditLog().logSync();
       }
       }
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
+        logAuditEvent(success, "addCacheDirective", null, null, null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
     }
     }
   }
   }
 
 
-  void removePathBasedCacheDirective(Long id) throws IOException {
+  void removeCacheDirective(Long id) throws IOException {
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
         getPermissionChecker() : null;
@@ -7145,15 +7146,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
       if (isInSafeMode()) {
         throw new SafeModeException(
         throw new SafeModeException(
-            "Cannot remove PathBasedCache directives", safeMode);
+            "Cannot remove cache directives", safeMode);
       }
       }
       cacheManager.removeDirective(id, pc);
       cacheManager.removeDirective(id, pc);
-      getEditLog().logRemovePathBasedCacheDirective(id, cacheEntry != null);
+      getEditLog().logRemoveCacheDirectiveInfo(id, cacheEntry != null);
       success = true;
       success = true;
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "removePathBasedCacheDirective", null, null,
+        logAuditEvent(success, "removeCacheDirective", null, null,
             null);
             null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
@@ -7161,23 +7162,23 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     getEditLog().logSync();
     getEditLog().logSync();
   }
   }
 
 
-  BatchedListEntries<PathBasedCacheDirective> listPathBasedCacheDirectives(
-      long startId, PathBasedCacheDirective filter) throws IOException {
+  BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
+      long startId, CacheDirectiveInfo filter) throws IOException {
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
     final FSPermissionChecker pc = isPermissionEnabled ?
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
         getPermissionChecker() : null;
-    BatchedListEntries<PathBasedCacheDirective> results;
+    BatchedListEntries<CacheDirectiveEntry> results;
     readLock();
     readLock();
     boolean success = false;
     boolean success = false;
     try {
     try {
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       results =
       results =
-          cacheManager.listPathBasedCacheDirectives(startId, filter, pc);
+          cacheManager.listCacheDirectives(startId, filter, pc);
       success = true;
       success = true;
     } finally {
     } finally {
       readUnlock();
       readUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "listPathBasedCacheDirectives", null, null,
+        logAuditEvent(success, "listCacheDirectives", null, null,
             null);
             null);
       }
       }
     }
     }

+ 8 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -383,10 +383,11 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public final ContentSummary computeAndConvertContentSummary(
   public final ContentSummary computeAndConvertContentSummary(
       ContentSummaryComputationContext summary) {
       ContentSummaryComputationContext summary) {
     Content.Counts counts = computeContentSummary(summary).getCounts();
     Content.Counts counts = computeContentSummary(summary).getCounts();
+    final Quota.Counts q = getQuotaCounts();
     return new ContentSummary(counts.get(Content.LENGTH),
     return new ContentSummary(counts.get(Content.LENGTH),
         counts.get(Content.FILE) + counts.get(Content.SYMLINK),
         counts.get(Content.FILE) + counts.get(Content.SYMLINK),
-        counts.get(Content.DIRECTORY), getNsQuota(),
-        counts.get(Content.DISKSPACE), getDsQuota());
+        counts.get(Content.DIRECTORY), q.get(Quota.NAMESPACE),
+        counts.get(Content.DISKSPACE), q.get(Quota.DISKSPACE));
   }
   }
 
 
   /**
   /**
@@ -412,18 +413,15 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
 
 
   /**
   /**
    * Get the quota set for this inode
    * Get the quota set for this inode
-   * @return the quota if it is set; -1 otherwise
+   * @return the quota counts.  The count is -1 if it is not set.
    */
    */
-  public long getNsQuota() {
-    return -1;
-  }
-
-  public long getDsQuota() {
-    return -1;
+  public Quota.Counts getQuotaCounts() {
+    return Quota.Counts.newInstance(-1, -1);
   }
   }
   
   
   public final boolean isQuotaSet() {
   public final boolean isQuotaSet() {
-    return getNsQuota() >= 0 || getDsQuota() >= 0;
+    final Quota.Counts q = getQuotaCounts();
+    return q.get(Quota.NAMESPACE) >= 0 || q.get(Quota.DISKSPACE) >= 0;
   }
   }
   
   
   /**
   /**

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -612,8 +612,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   @Override
   public boolean metadataEquals(INodeDirectoryAttributes other) {
   public boolean metadataEquals(INodeDirectoryAttributes other) {
     return other != null
     return other != null
-        && getNsQuota() == other.getNsQuota()
-        && getDsQuota() == other.getDsQuota()
+        && getQuotaCounts().equals(other.getQuotaCounts())
         && getPermissionLong() == other.getPermissionLong();
         && getPermissionLong() == other.getPermissionLong();
   }
   }
   
   

+ 10 - 21
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java

@@ -27,9 +27,7 @@ import com.google.common.base.Preconditions;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public interface INodeDirectoryAttributes extends INodeAttributes {
 public interface INodeDirectoryAttributes extends INodeAttributes {
-  public long getNsQuota();
-
-  public long getDsQuota();
+  public Quota.Counts getQuotaCounts();
 
 
   public boolean metadataEquals(INodeDirectoryAttributes other);
   public boolean metadataEquals(INodeDirectoryAttributes other);
   
   
@@ -46,20 +44,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
     }
     }
 
 
     @Override
     @Override
-    public long getNsQuota() {
-      return -1;
-    }
-
-    @Override
-    public long getDsQuota() {
-      return -1;
+    public Quota.Counts getQuotaCounts() {
+      return Quota.Counts.newInstance(-1, -1);
     }
     }
 
 
     @Override
     @Override
     public boolean metadataEquals(INodeDirectoryAttributes other) {
     public boolean metadataEquals(INodeDirectoryAttributes other) {
       return other != null
       return other != null
-          && getNsQuota() == other.getNsQuota()
-          && getDsQuota() == other.getDsQuota()
+          && this.getQuotaCounts().equals(other.getQuotaCounts())
           && getPermissionLong() == other.getPermissionLong();
           && getPermissionLong() == other.getPermissionLong();
     }
     }
   }
   }
@@ -68,6 +60,7 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
     private final long nsQuota;
     private final long nsQuota;
     private final long dsQuota;
     private final long dsQuota;
 
 
+
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
         long modificationTime, long nsQuota, long dsQuota) {
         long modificationTime, long nsQuota, long dsQuota) {
       super(name, permissions, modificationTime);
       super(name, permissions, modificationTime);
@@ -78,18 +71,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
     public CopyWithQuota(INodeDirectory dir) {
     public CopyWithQuota(INodeDirectory dir) {
       super(dir);
       super(dir);
       Preconditions.checkArgument(dir.isQuotaSet());
       Preconditions.checkArgument(dir.isQuotaSet());
-      this.nsQuota = dir.getNsQuota();
-      this.dsQuota = dir.getDsQuota();
+      final Quota.Counts q = dir.getQuotaCounts();
+      this.nsQuota = q.get(Quota.NAMESPACE);
+      this.dsQuota = q.get(Quota.DISKSPACE);
     }
     }
     
     
     @Override
     @Override
-    public final long getNsQuota() {
-      return nsQuota;
-    }
-
-    @Override
-    public final long getDsQuota() {
-      return dsQuota;
+    public Quota.Counts getQuotaCounts() {
+      return Quota.Counts.newInstance(nsQuota, dsQuota);
     }
     }
   }
   }
 }
 }

+ 9 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java

@@ -44,7 +44,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
    * @param dsQuota Diskspace quota to be assigned to this indoe
    * @param dsQuota Diskspace quota to be assigned to this indoe
    * @param other The other inode from which all other properties are copied
    * @param other The other inode from which all other properties are copied
    */
    */
-  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
+  INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
       long nsQuota, long dsQuota) {
       long nsQuota, long dsQuota) {
     super(other, adopt);
     super(other, adopt);
     final Quota.Counts counts = other.computeQuotaUsage();
     final Quota.Counts counts = other.computeQuotaUsage();
@@ -54,6 +54,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
     this.dsQuota = dsQuota;
     this.dsQuota = dsQuota;
   }
   }
   
   
+  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
+      Quota.Counts quota) {
+    this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
+  }
+
   /** constructor with no quota verification */
   /** constructor with no quota verification */
   INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
   INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
       long modificationTime, long nsQuota, long dsQuota) {
       long modificationTime, long nsQuota, long dsQuota) {
@@ -67,20 +72,9 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
     super(id, name, permissions, 0L);
     super(id, name, permissions, 0L);
   }
   }
   
   
-  /** Get this directory's namespace quota
-   * @return this directory's namespace quota
-   */
-  @Override
-  public long getNsQuota() {
-    return nsQuota;
-  }
-  
-  /** Get this directory's diskspace quota
-   * @return this directory's diskspace quota
-   */
   @Override
   @Override
-  public long getDsQuota() {
-    return dsQuota;
+  public Quota.Counts getQuotaCounts() {
+    return Quota.Counts.newInstance(nsQuota, dsQuota);
   }
   }
   
   
   /** Set this directory's quota
   /** Set this directory's quota
@@ -120,7 +114,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
   }
   }
   
   
   private void checkDiskspace(final long computed) {
   private void checkDiskspace(final long computed) {
-    if (-1 != getDsQuota() && diskspace != computed) {
+    if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
           + getFullPathName() + ". Cached = " + diskspace
           + getFullPathName() + ". Cached = " + diskspace
           + " != Computed = " + computed);
           + " != Computed = " + computed);

+ 2 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java

@@ -295,15 +295,10 @@ public abstract class INodeReference extends INode {
   }
   }
 
 
   @Override
   @Override
-  public final long getNsQuota() {
-    return referred.getNsQuota();
+  public Quota.Counts getQuotaCounts() {
+    return referred.getQuotaCounts();
   }
   }
 
 
-  @Override
-  public final long getDsQuota() {
-    return referred.getDsQuota();
-  }
-  
   @Override
   @Override
   public final void clear() {
   public final void clear() {
     super.clear();
     super.clear();

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -804,6 +804,10 @@ public class NameNode implements NameNodeStatusMXBean {
     return httpServer.getHttpAddress();
     return httpServer.getHttpAddress();
   }
   }
 
 
+  /**
+   * @return NameNode HTTPS address, used by the Web UI, image transfer,
+   *    and HTTP-based file system clients like Hftp and WebHDFS
+   */
   public InetSocketAddress getHttpsAddress() {
   public InetSocketAddress getHttpsAddress() {
     return httpServer.getHttpsAddress();
     return httpServer.getHttpsAddress();
   }
   }

+ 23 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -61,7 +61,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1236,52 +1237,52 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   @Override
   @Override
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective path) throws IOException {
-    return namesystem.addPathBasedCacheDirective(path);
+  public long addCacheDirective(
+      CacheDirectiveInfo path) throws IOException {
+    return namesystem.addCacheDirective(path);
   }
   }
 
 
   @Override
   @Override
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
-    namesystem.modifyPathBasedCacheDirective(directive);
+  public void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
+    namesystem.modifyCacheDirective(directive);
   }
   }
 
 
   @Override
   @Override
-  public void removePathBasedCacheDirective(long id) throws IOException {
-    namesystem.removePathBasedCacheDirective(id);
+  public void removeCacheDirective(long id) throws IOException {
+    namesystem.removeCacheDirective(id);
   }
   }
 
 
-  private class ServerSidePathBasedCacheEntriesIterator
-      extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
+  private class ServerSideCacheEntriesIterator 
+      extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
 
 
-    private final PathBasedCacheDirective filter;
+    private final CacheDirectiveInfo filter;
     
     
-    public ServerSidePathBasedCacheEntriesIterator(Long firstKey, 
-        PathBasedCacheDirective filter) {
+    public ServerSideCacheEntriesIterator (Long firstKey, 
+        CacheDirectiveInfo filter) {
       super(firstKey);
       super(firstKey);
       this.filter = filter;
       this.filter = filter;
     }
     }
 
 
     @Override
     @Override
-    public BatchedEntries<PathBasedCacheDirective> makeRequest(
+    public BatchedEntries<CacheDirectiveEntry> makeRequest(
         Long nextKey) throws IOException {
         Long nextKey) throws IOException {
-      return namesystem.listPathBasedCacheDirectives(nextKey, filter);
+      return namesystem.listCacheDirectives(nextKey, filter);
     }
     }
 
 
     @Override
     @Override
-    public Long elementToPrevKey(PathBasedCacheDirective entry) {
-      return entry.getId();
+    public Long elementToPrevKey(CacheDirectiveEntry entry) {
+      return entry.getInfo().getId();
     }
     }
   }
   }
   
   
   @Override
   @Override
-  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(long prevId,
-      PathBasedCacheDirective filter) throws IOException {
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(long prevId,
+      CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
     if (filter == null) {
-      filter = new PathBasedCacheDirective.Builder().build();
+      filter = new CacheDirectiveInfo.Builder().build();
     }
     }
-    return new ServerSidePathBasedCacheEntriesIterator(prevId, filter);
+    return new ServerSideCacheEntriesIterator(prevId, filter);
   }
   }
 
 
   @Override
   @Override

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -23,7 +23,6 @@ import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryMXBean;
 import java.lang.management.MemoryMXBean;
 import java.lang.management.MemoryUsage;
 import java.lang.management.MemoryUsage;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URLEncoder;
 import java.net.URLEncoder;
@@ -57,7 +56,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -1087,7 +1085,7 @@ class NamenodeJspHelper {
           doc.endTag();
           doc.endTag();
 
 
           doc.startTag("ds_quota");
           doc.startTag("ds_quota");
-          doc.pcdata(""+inode.getDsQuota());
+          doc.pcdata(""+inode.getQuotaCounts().get(Quota.DISKSPACE));
           doc.endTag();
           doc.endTag();
 
 
           doc.startTag("permission_status");
           doc.startTag("permission_status");

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java

@@ -41,7 +41,7 @@ public enum Quota {
     }
     }
     
     
     Counts() {
     Counts() {
-      super(Quota.values());
+      super(Quota.class);
     }
     }
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java

@@ -491,7 +491,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
 
 
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
       DirectoryDiffList diffs) {
       DirectoryDiffList diffs) {
-    super(that, adopt, that.getNsQuota(), that.getDsQuota());
+    super(that, adopt, that.getQuotaCounts());
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
   }
   }
 
 

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.ParamFilter;
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -98,6 +99,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
@@ -214,7 +216,8 @@ public class NamenodeWebHdfsMethods {
     final Credentials c = DelegationTokenSecretManager.createCredentials(
     final Credentials c = DelegationTokenSecretManager.createCredentials(
         namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
         namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
     final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
     final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
-    t.setKind(WebHdfsFileSystem.TOKEN_KIND);
+    Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND;
+    t.setKind(kind);
     return t;
     return t;
   }
   }
 
 

+ 69 - 52
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -30,8 +30,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -120,7 +122,7 @@ public class CacheAdmin extends Configured implements Tool {
     int run(Configuration conf, List<String> args) throws IOException;
     int run(Configuration conf, List<String> args) throws IOException;
   }
   }
 
 
-  private static class AddPathBasedCacheDirectiveCommand implements Command {
+  private static class AddCacheDirectiveInfoCommand implements Command {
     @Override
     @Override
     public String getName() {
     public String getName() {
       return "-addDirective";
       return "-addDirective";
@@ -143,7 +145,7 @@ public class CacheAdmin extends Configured implements Tool {
           "added. You must have write permission on the cache pool "
           "added. You must have write permission on the cache pool "
           + "in order to add new directives.");
           + "in order to add new directives.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
-        "Add a new PathBasedCache directive.\n\n" +
+        "Add a new cache directive.\n\n" +
         listing.toString();
         listing.toString();
     }
     }
 
 
@@ -171,14 +173,14 @@ public class CacheAdmin extends Configured implements Tool {
       }
       }
         
         
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
-      PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder().
+      CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().
           setPath(new Path(path)).
           setPath(new Path(path)).
           setReplication(replication).
           setReplication(replication).
           setPool(poolName).
           setPool(poolName).
           build();
           build();
       try {
       try {
-        long id = dfs.addPathBasedCacheDirective(directive);
-        System.out.println("Added PathBasedCache entry " + id);
+        long id = dfs.addCacheDirective(directive);
+        System.out.println("Added cache directive " + id);
       } catch (IOException e) {
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         System.err.println(prettifyException(e));
         return 2;
         return 2;
@@ -188,7 +190,7 @@ public class CacheAdmin extends Configured implements Tool {
     }
     }
   }
   }
 
 
-  private static class RemovePathBasedCacheDirectiveCommand implements Command {
+  private static class RemoveCacheDirectiveInfoCommand implements Command {
     @Override
     @Override
     public String getName() {
     public String getName() {
       return "-removeDirective";
       return "-removeDirective";
@@ -205,7 +207,7 @@ public class CacheAdmin extends Configured implements Tool {
       listing.addRow("<id>", "The id of the cache directive to remove.  " + 
       listing.addRow("<id>", "The id of the cache directive to remove.  " + 
         "You must have write permission on the pool of the " +
         "You must have write permission on the pool of the " +
         "directive in order to remove it.  To see a list " +
         "directive in order to remove it.  To see a list " +
-        "of PathBasedCache directive IDs, use the -listDirectives command.");
+        "of cache directive IDs, use the -listDirectives command.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
         "Remove a cache directive.\n\n" +
         "Remove a cache directive.\n\n" +
         listing.toString();
         listing.toString();
@@ -238,8 +240,8 @@ public class CacheAdmin extends Configured implements Tool {
       }
       }
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
       try {
       try {
-        dfs.getClient().removePathBasedCacheDirective(id);
-        System.out.println("Removed PathBasedCache directive " + id);
+        dfs.getClient().removeCacheDirective(id);
+        System.out.println("Removed cached directive " + id);
       } catch (IOException e) {
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         System.err.println(prettifyException(e));
         return 2;
         return 2;
@@ -248,7 +250,7 @@ public class CacheAdmin extends Configured implements Tool {
     }
     }
   }
   }
 
 
-  private static class ModifyPathBasedCacheDirectiveCommand implements Command {
+  private static class ModifyCacheDirectiveInfoCommand implements Command {
     @Override
     @Override
     public String getName() {
     public String getName() {
       return "-modifyDirective";
       return "-modifyDirective";
@@ -273,14 +275,14 @@ public class CacheAdmin extends Configured implements Tool {
           "added. You must have write permission on the cache pool "
           "added. You must have write permission on the cache pool "
           + "in order to move a directive into it. (optional)");
           + "in order to move a directive into it. (optional)");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
-        "Modify a PathBasedCache directive.\n\n" +
+        "Modify a cache directive.\n\n" +
         listing.toString();
         listing.toString();
     }
     }
 
 
     @Override
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
     public int run(Configuration conf, List<String> args) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-        new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+        new CacheDirectiveInfo.Builder();
       boolean modified = false;
       boolean modified = false;
       String idString = StringUtils.popOptionWithArgument("-id", args);
       String idString = StringUtils.popOptionWithArgument("-id", args);
       if (idString == null) {
       if (idString == null) {
@@ -316,8 +318,8 @@ public class CacheAdmin extends Configured implements Tool {
       }
       }
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
       try {
       try {
-        dfs.modifyPathBasedCacheDirective(builder.build());
-        System.out.println("Modified PathBasedCache entry " + idString);
+        dfs.modifyCacheDirective(builder.build());
+        System.out.println("Modified cache directive " + idString);
       } catch (IOException e) {
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         System.err.println(prettifyException(e));
         return 2;
         return 2;
@@ -326,7 +328,7 @@ public class CacheAdmin extends Configured implements Tool {
     }
     }
   }
   }
 
 
-  private static class RemovePathBasedCacheDirectivesCommand implements Command {
+  private static class RemoveCacheDirectiveInfosCommand implements Command {
     @Override
     @Override
     public String getName() {
     public String getName() {
       return "-removeDirectives";
       return "-removeDirectives";
@@ -362,31 +364,31 @@ public class CacheAdmin extends Configured implements Tool {
         return 1;
         return 1;
       }
       }
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
-      RemoteIterator<PathBasedCacheDirective> iter =
-          dfs.listPathBasedCacheDirectives(
-              new PathBasedCacheDirective.Builder().
+      RemoteIterator<CacheDirectiveEntry> iter =
+          dfs.listCacheDirectives(
+              new CacheDirectiveInfo.Builder().
                   setPath(new Path(path)).build());
                   setPath(new Path(path)).build());
       int exitCode = 0;
       int exitCode = 0;
       while (iter.hasNext()) {
       while (iter.hasNext()) {
-        PathBasedCacheDirective directive = iter.next();
+        CacheDirectiveEntry entry = iter.next();
         try {
         try {
-          dfs.removePathBasedCacheDirective(directive.getId());
-          System.out.println("Removed PathBasedCache directive " +
-              directive.getId());
+          dfs.removeCacheDirective(entry.getInfo().getId());
+          System.out.println("Removed cache directive " +
+              entry.getInfo().getId());
         } catch (IOException e) {
         } catch (IOException e) {
           System.err.println(prettifyException(e));
           System.err.println(prettifyException(e));
           exitCode = 2;
           exitCode = 2;
         }
         }
       }
       }
       if (exitCode == 0) {
       if (exitCode == 0) {
-        System.out.println("Removed every PathBasedCache directive with path " +
+        System.out.println("Removed every cache directive with path " +
             path);
             path);
       }
       }
       return exitCode;
       return exitCode;
     }
     }
   }
   }
 
 
-  private static class ListPathBasedCacheDirectiveCommand implements Command {
+  private static class ListCacheDirectiveInfoCommand implements Command {
     @Override
     @Override
     public String getName() {
     public String getName() {
       return "-listDirectives";
       return "-listDirectives";
@@ -394,27 +396,28 @@ public class CacheAdmin extends Configured implements Tool {
 
 
     @Override
     @Override
     public String getShortUsage() {
     public String getShortUsage() {
-      return "[" + getName() + " [-path <path>] [-pool <pool>]]\n";
+      return "[" + getName() + " [-stats] [-path <path>] [-pool <pool>]]\n";
     }
     }
 
 
     @Override
     @Override
     public String getLongUsage() {
     public String getLongUsage() {
       TableListing listing = getOptionDescriptionListing();
       TableListing listing = getOptionDescriptionListing();
       listing.addRow("<path>", "List only " +
       listing.addRow("<path>", "List only " +
-          "PathBasedCache directives with this path. " +
-          "Note that if there is a PathBasedCache directive for <path> " +
+          "cache directives with this path. " +
+          "Note that if there is a cache directive for <path> " +
           "in a cache pool that we don't have read access for, it " + 
           "in a cache pool that we don't have read access for, it " + 
           "will not be listed.");
           "will not be listed.");
       listing.addRow("<pool>", "List only path cache directives in that pool.");
       listing.addRow("<pool>", "List only path cache directives in that pool.");
+      listing.addRow("-stats", "List path-based cache directive statistics.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
-        "List PathBasedCache directives.\n\n" +
+        "List cache directives.\n\n" +
         listing.toString();
         listing.toString();
     }
     }
 
 
     @Override
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
     public int run(Configuration conf, List<String> args) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       String pathFilter = StringUtils.popOptionWithArgument("-path", args);
       String pathFilter = StringUtils.popOptionWithArgument("-path", args);
       if (pathFilter != null) {
       if (pathFilter != null) {
         builder.setPath(new Path(pathFilter));
         builder.setPath(new Path(pathFilter));
@@ -423,28 +426,42 @@ public class CacheAdmin extends Configured implements Tool {
       if (poolFilter != null) {
       if (poolFilter != null) {
         builder.setPool(poolFilter);
         builder.setPool(poolFilter);
       }
       }
+      boolean printStats = StringUtils.popOption("-stats", args);
       if (!args.isEmpty()) {
       if (!args.isEmpty()) {
         System.err.println("Can't understand argument: " + args.get(0));
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
         return 1;
       }
       }
-      TableListing tableListing = new TableListing.Builder().
-          addField("ID", Justification.LEFT).
+      TableListing.Builder tableBuilder = new TableListing.Builder().
+          addField("ID", Justification.RIGHT).
           addField("POOL", Justification.LEFT).
           addField("POOL", Justification.LEFT).
-          addField("REPLICATION", Justification.LEFT).
-          addField("PATH", Justification.LEFT).
-          build();
+          addField("REPLICATION", Justification.RIGHT).
+          addField("PATH", Justification.LEFT);
+      if (printStats) {
+        tableBuilder.addField("NEEDED", Justification.RIGHT).
+                    addField("CACHED", Justification.RIGHT).
+                    addField("FILES", Justification.RIGHT);
+      }
+      TableListing tableListing = tableBuilder.build();
+
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
-      RemoteIterator<PathBasedCacheDirective> iter =
-          dfs.listPathBasedCacheDirectives(builder.build());
+      RemoteIterator<CacheDirectiveEntry> iter =
+          dfs.listCacheDirectives(builder.build());
       int numEntries = 0;
       int numEntries = 0;
       while (iter.hasNext()) {
       while (iter.hasNext()) {
-        PathBasedCacheDirective directive = iter.next();
-        String row[] = new String[] {
-            "" + directive.getId(), directive.getPool(),
-            "" + directive.getReplication(),
-            directive.getPath().toUri().getPath(),
-        };
-        tableListing.addRow(row);
+        CacheDirectiveEntry entry = iter.next();
+        CacheDirectiveInfo directive = entry.getInfo();
+        CacheDirectiveStats stats = entry.getStats();
+        List<String> row = new LinkedList<String>();
+        row.add("" + directive.getId());
+        row.add(directive.getPool());
+        row.add("" + directive.getReplication());
+        row.add(directive.getPath().toUri().getPath());
+        if (printStats) {
+          row.add("" + stats.getBytesNeeded());
+          row.add("" + stats.getBytesCached());
+          row.add("" + stats.getFilesAffected());
+        }
+        tableListing.addRow(row.toArray(new String[0]));
         numEntries++;
         numEntries++;
       }
       }
       System.out.print(String.format("Found %d entr%s\n",
       System.out.print(String.format("Found %d entr%s\n",
@@ -734,7 +751,7 @@ public class CacheAdmin extends Configured implements Tool {
           addField("OWNER", Justification.LEFT).
           addField("OWNER", Justification.LEFT).
           addField("GROUP", Justification.LEFT).
           addField("GROUP", Justification.LEFT).
           addField("MODE", Justification.LEFT).
           addField("MODE", Justification.LEFT).
-          addField("WEIGHT", Justification.LEFT).
+          addField("WEIGHT", Justification.RIGHT).
           build();
           build();
       int numResults = 0;
       int numResults = 0;
       try {
       try {
@@ -824,11 +841,11 @@ public class CacheAdmin extends Configured implements Tool {
   }
   }
 
 
   private static Command[] COMMANDS = {
   private static Command[] COMMANDS = {
-    new AddPathBasedCacheDirectiveCommand(),
-    new ModifyPathBasedCacheDirectiveCommand(),
-    new ListPathBasedCacheDirectiveCommand(),
-    new RemovePathBasedCacheDirectiveCommand(),
-    new RemovePathBasedCacheDirectivesCommand(),
+    new AddCacheDirectiveInfoCommand(),
+    new ModifyCacheDirectiveInfoCommand(),
+    new ListCacheDirectiveInfoCommand(),
+    new RemoveCacheDirectiveInfoCommand(),
+    new RemoveCacheDirectiveInfosCommand(),
     new AddCachePoolCommand(),
     new AddCachePoolCommand(),
     new ModifyCachePoolCommand(),
     new ModifyCachePoolCommand(),
     new RemoveCachePoolCommand(),
     new RemoveCachePoolCommand(),

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java

@@ -30,9 +30,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
  * Example:
  * Example:
  * 
  * 
  * NAME   OWNER   GROUP   MODE       WEIGHT
  * NAME   OWNER   GROUP   MODE       WEIGHT
- * pool1  andrew  andrew  rwxr-xr-x  100
- * pool2  andrew  andrew  rwxr-xr-x  100
- * pool3  andrew  andrew  rwxr-xr-x  100
+ * pool1  andrew  andrew  rwxr-xr-x     100
+ * pool2  andrew  andrew  rwxr-xr-x     100
+ * pool3  andrew  andrew  rwxr-xr-x     100
  * 
  * 
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private

+ 34 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.util;
 package org.apache.hadoop.hdfs.util;
 
 
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashMap;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -34,21 +35,19 @@ import com.google.common.base.Preconditions;
  * @param <E> the enum type
  * @param <E> the enum type
  */
  */
 public class EnumCounters<E extends Enum<E>> {
 public class EnumCounters<E extends Enum<E>> {
-  /** An array of enum constants. */
-  private final E[] enumConstants;
+  /** The class of the enum. */
+  private final Class<E> enumClass;
   /** The counter array, counters[i] corresponds to the enumConstants[i]. */
   /** The counter array, counters[i] corresponds to the enumConstants[i]. */
   private final long[] counters;
   private final long[] counters;
 
 
   /**
   /**
    * Construct counters for the given enum constants.
    * Construct counters for the given enum constants.
-   * @param enumConstants an array of enum constants such that, 
-   *                      for all i, enumConstants[i].ordinal() == i.
+   * @param enumClass the enum class of the counters.
    */
    */
-  public EnumCounters(final E[] enumConstants) {
-    for(int i = 0; i < enumConstants.length; i++) {
-      Preconditions.checkArgument(enumConstants[i].ordinal() == i);
-    }
-    this.enumConstants = enumConstants;
+  public EnumCounters(final Class<E> enumClass) {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    Preconditions.checkNotNull(enumConstants);
+    this.enumClass = enumClass;
     this.counters = new long[enumConstants.length];
     this.counters = new long[enumConstants.length];
   }
   }
   
   
@@ -69,6 +68,13 @@ public class EnumCounters<E extends Enum<E>> {
     counters[e.ordinal()] = value;
     counters[e.ordinal()] = value;
   }
   }
 
 
+  /** Set this counters to that counters. */
+  public final void set(final EnumCounters<E> that) {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = that.counters[i];
+    }
+  }
+
   /** Add the given value to counter e. */
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;
     counters[e.ordinal()] += value;
@@ -86,15 +92,33 @@ public class EnumCounters<E extends Enum<E>> {
     counters[e.ordinal()] -= value;
     counters[e.ordinal()] -= value;
   }
   }
 
 
-  /** Subtract that counters from this counters. */
+  /** Subtract this counters from that counters. */
   public final void subtract(final EnumCounters<E> that) {
   public final void subtract(final EnumCounters<E> that) {
     for(int i = 0; i < counters.length; i++) {
     for(int i = 0; i < counters.length; i++) {
       this.counters[i] -= that.counters[i];
       this.counters[i] -= that.counters[i];
     }
     }
   }
   }
 
 
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof EnumCounters)) {
+      return false;
+    }
+    final EnumCounters<?> that = (EnumCounters<?>)obj;
+    return this.enumClass == that.enumClass
+        && Arrays.equals(this.counters, that.counters);
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(counters);
+  }
+
   @Override
   @Override
   public String toString() {
   public String toString() {
+    final E[] enumConstants = enumClass.getEnumConstants();
     final StringBuilder b = new StringBuilder();
     final StringBuilder b = new StringBuilder();
     for(int i = 0; i < counters.length; i++) {
     for(int i = 0; i < counters.length; i++) {
       final String name = enumConstants[i].name();
       final String name = enumConstants[i].name();

+ 66 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.io.Text;
+
+public class SWebHdfsFileSystem extends WebHdfsFileSystem {
+
+  public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
+  public static final String SCHEME = "swebhdfs";
+
+  @Override
+  public String getScheme() {
+    return SCHEME;
+  }
+
+  @Override
+  protected String getTransportScheme() {
+    return "https";
+  }
+
+  @Override
+  protected synchronized void initializeTokenAspect() {
+    tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
+  }
+
+  @Override
+  protected void initializeConnectionFactory(Configuration conf)
+      throws IOException {
+    connectionFactory = new URLConnectionFactory(
+        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
+    try {
+      connectionFactory.setConnConfigurator(URLConnectionFactory
+          .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+              conf));
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  protected int getDefaultPort() {
+    return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
+  }
+}

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java

@@ -58,7 +58,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
     public boolean handleKind(Text kind) {
     public boolean handleKind(Text kind) {
       return kind.equals(HftpFileSystem.TOKEN_KIND)
       return kind.equals(HftpFileSystem.TOKEN_KIND)
           || kind.equals(HsftpFileSystem.TOKEN_KIND)
           || kind.equals(HsftpFileSystem.TOKEN_KIND)
-          || kind.equals(WebHdfsFileSystem.TOKEN_KIND);
+          || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
+          || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
     }
     }
 
 
     @Override
     @Override
@@ -83,6 +84,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
         uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
         uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
       } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
       } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
         uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
         uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
+      } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
+        uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address);
       } else {
       } else {
         throw new IllegalArgumentException("Unsupported scheme");
         throw new IllegalArgumentException("Unsupported scheme");
       }
       }

+ 33 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
-import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -98,7 +97,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.base.Charsets;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
@@ -118,8 +116,7 @@ public class WebHdfsFileSystem extends FileSystem
 
 
   /** Delegation token kind */
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
-  protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>(
-      this, TOKEN_KIND);
+  protected TokenAspect<WebHdfsFileSystem> tokenAspect;
 
 
   private UserGroupInformation ugi;
   private UserGroupInformation ugi;
   private URI uri;
   private URI uri;
@@ -140,17 +137,44 @@ public class WebHdfsFileSystem extends FileSystem
     return SCHEME;
     return SCHEME;
   }
   }
 
 
+  /**
+   * return the underlying transport protocol (http / https).
+   */
+  protected String getTransportScheme() {
+    return "http";
+  }
+
+  /**
+   * Initialize tokenAspect. This function is intended to
+   * be overridden by SWebHdfsFileSystem.
+   */
+  protected synchronized void initializeTokenAspect() {
+    tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
+  }
+
+  /**
+   * Initialize connectionFactory. This function is intended to
+   * be overridden by SWebHdfsFileSystem.
+   */
+  protected void initializeConnectionFactory(Configuration conf)
+      throws IOException {
+    connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+  }
+
   @Override
   @Override
   public synchronized void initialize(URI uri, Configuration conf
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {
     super.initialize(uri, conf);
     super.initialize(uri, conf);
     setConf(conf);
     setConf(conf);
+    initializeTokenAspect();
+    initializeConnectionFactory(conf);
+
     ugi = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.getCurrentUser();
 
 
     try {
     try {
       this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
       this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
           null, null);
           null, null);
-      this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
+      this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
     } catch (URISyntaxException e) {
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
       throw new IllegalArgumentException(e);
     }
     }
@@ -342,7 +366,7 @@ public class WebHdfsFileSystem extends FileSystem
    */
    */
   private URL getNamenodeURL(String path, String query) throws IOException {
   private URL getNamenodeURL(String path, String query) throws IOException {
     InetSocketAddress nnAddr = getCurrentNNAddr();
     InetSocketAddress nnAddr = getCurrentNNAddr();
-    final URL url = new URL("http", nnAddr.getHostName(),
+    final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
           nnAddr.getPort(), path + '?' + query);
           nnAddr.getPort(), path + '?' + query);
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("url=" + url);
       LOG.trace("url=" + url);
@@ -840,7 +864,9 @@ public class WebHdfsFileSystem extends FileSystem
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     super.close();
     super.close();
-    tokenAspect.removeRenewAction();
+    synchronized (this) {
+      tokenAspect.removeRenewAction();
+    }
   }
   }
 
 
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {

+ 30 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -364,46 +364,53 @@ message IsFileClosedResponseProto {
   required bool result = 1;
   required bool result = 1;
 }
 }
 
 
-message PathBasedCacheDirectiveInfoProto {
+message CacheDirectiveInfoProto {
   optional int64 id = 1;
   optional int64 id = 1;
   optional string path = 2;
   optional string path = 2;
   optional uint32 replication = 3;
   optional uint32 replication = 3;
   optional string pool = 4;
   optional string pool = 4;
 }
 }
 
 
-message AddPathBasedCacheDirectiveRequestProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message CacheDirectiveStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 filesAffected = 3;
 }
 }
 
 
-message AddPathBasedCacheDirectiveResponseProto {
+message AddCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
+}
+
+message AddCacheDirectiveResponseProto {
   required int64 id = 1;
   required int64 id = 1;
 }
 }
 
 
-message ModifyPathBasedCacheDirectiveRequestProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message ModifyCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
 }
 }
 
 
-message ModifyPathBasedCacheDirectiveResponseProto {
+message ModifyCacheDirectiveResponseProto {
 }
 }
 
 
-message RemovePathBasedCacheDirectiveRequestProto {
+message RemoveCacheDirectiveRequestProto {
   required int64 id = 1;
   required int64 id = 1;
 }
 }
 
 
-message RemovePathBasedCacheDirectiveResponseProto {
+message RemoveCacheDirectiveResponseProto {
 }
 }
 
 
-message ListPathBasedCacheDirectivesRequestProto {
+message ListCacheDirectivesRequestProto {
   required int64 prevId = 1;
   required int64 prevId = 1;
-  required PathBasedCacheDirectiveInfoProto filter = 2;
+  required CacheDirectiveInfoProto filter = 2;
 }
 }
 
 
-message ListPathBasedCacheDirectivesElementProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message CacheDirectiveEntryProto {
+  required CacheDirectiveInfoProto info = 1;
+  required CacheDirectiveStatsProto stats = 2;
 }
 }
 
 
-message ListPathBasedCacheDirectivesResponseProto {
-  repeated ListPathBasedCacheDirectivesElementProto elements = 1;
+message ListCacheDirectivesResponseProto {
+  repeated CacheDirectiveEntryProto elements = 1;
   required bool hasMore = 2;
   required bool hasMore = 2;
 }
 }
 
 
@@ -631,14 +638,14 @@ service ClientNamenodeProtocol {
       returns(ListCorruptFileBlocksResponseProto);
       returns(ListCorruptFileBlocksResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
   rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
   rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
-  rpc addPathBasedCacheDirective(AddPathBasedCacheDirectiveRequestProto)
-      returns (AddPathBasedCacheDirectiveResponseProto);
-  rpc modifyPathBasedCacheDirective(ModifyPathBasedCacheDirectiveRequestProto)
-      returns (ModifyPathBasedCacheDirectiveResponseProto);
-  rpc removePathBasedCacheDirective(RemovePathBasedCacheDirectiveRequestProto)
-      returns (RemovePathBasedCacheDirectiveResponseProto);
-  rpc listPathBasedCacheDirectives(ListPathBasedCacheDirectivesRequestProto)
-      returns (ListPathBasedCacheDirectivesResponseProto);
+  rpc addCacheDirective(AddCacheDirectiveRequestProto)
+      returns (AddCacheDirectiveResponseProto);
+  rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
+      returns (ModifyCacheDirectiveResponseProto);
+  rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
+      returns (RemoveCacheDirectiveResponseProto);
+  rpc listCacheDirectives(ListCacheDirectivesRequestProto)
+      returns (ListCacheDirectivesResponseProto);
   rpc addCachePool(AddCachePoolRequestProto)
   rpc addCachePool(AddCachePoolRequestProto)
       returns(AddCachePoolResponseProto);
       returns(AddCachePoolResponseProto);
   rpc modifyCachePool(ModifyCachePoolRequestProto)
   rpc modifyCachePool(ModifyCachePoolRequestProto)

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSystem
 org.apache.hadoop.hdfs.web.HftpFileSystem
 org.apache.hadoop.hdfs.web.HftpFileSystem
 org.apache.hadoop.hdfs.web.HsftpFileSystem
 org.apache.hadoop.hdfs.web.HsftpFileSystem
 org.apache.hadoop.hdfs.web.WebHdfsFileSystem
 org.apache.hadoop.hdfs.web.WebHdfsFileSystem
+org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -1179,7 +1179,7 @@
 
 
 <property>
 <property>
   <name>dfs.webhdfs.enabled</name>
   <name>dfs.webhdfs.enabled</name>
-  <value>false</value>
+  <value>true</value>
   <description>
   <description>
     Enable WebHDFS (REST API) in Namenodes and Datanodes.
     Enable WebHDFS (REST API) in Namenodes and Datanodes.
   </description>
   </description>

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm

@@ -118,7 +118,7 @@ Centralized Cache Management in HDFS
 
 
   Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
   Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
 
 
-  Add a new PathBasedCache directive.
+  Add a new cache directive.
 
 
 *--+--+
 *--+--+
 \<path\> | A path to cache. The path can be a directory or a file.
 \<path\> | A path to cache. The path can be a directory or a file.
@@ -135,7 +135,7 @@ Centralized Cache Management in HDFS
   Remove a cache directive.
   Remove a cache directive.
 
 
 *--+--+
 *--+--+
-\<id\> | The id of the cache directive to remove.  You must have write permission on the pool of the directive in order to remove it.  To see a list of PathBasedCache directive IDs, use the -listDirectives command.
+\<id\> | The id of the cache directive to remove.  You must have write permission on the pool of the directive in order to remove it.  To see a list of cachedirective IDs, use the -listDirectives command.
 *--+--+
 *--+--+
 
 
 *** {removeDirectives}
 *** {removeDirectives}
@@ -152,10 +152,10 @@ Centralized Cache Management in HDFS
 
 
   Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
   Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
 
 
-  List PathBasedCache directives.
+  List cache directives.
 
 
 *--+--+
 *--+--+
-\<path\> | List only PathBasedCache directives with this path. Note that if there is a PathBasedCache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
+\<path\> | List only cache directives with this path. Note that if there is a cache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
 *--+--+
 *--+--+
 \<pool\> | List only path cache directives in that pool.
 \<pool\> | List only path cache directives in that pool.
 *--+--+
 *--+--+

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java

@@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     conf.set(FsPermission.UMASK_LABEL, "000");
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster = new MiniDFSCluster.Builder(conf).build();
-    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
   }
   }
 
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -1038,20 +1038,20 @@ public class DFSTestUtil {
     // OP_MODIFY_CACHE_POOL
     // OP_MODIFY_CACHE_POOL
     filesystem.modifyCachePool(new CachePoolInfo("pool1").setWeight(99));
     filesystem.modifyCachePool(new CachePoolInfo("pool1").setWeight(99));
     // OP_ADD_PATH_BASED_CACHE_DIRECTIVE
     // OP_ADD_PATH_BASED_CACHE_DIRECTIVE
-    long id = filesystem.addPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    long id = filesystem.addCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setPath(new Path("/path")).
             setPath(new Path("/path")).
             setReplication((short)1).
             setReplication((short)1).
             setPool("pool1").
             setPool("pool1").
             build());
             build());
     // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
     // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
-    filesystem.modifyPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    filesystem.modifyCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setId(id).
             setId(id).
             setReplication((short)2).
             setReplication((short)2).
             build());
             build());
     // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
     // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
-    filesystem.removePathBasedCacheDirective(id);
+    filesystem.removeCacheDirective(id);
     // OP_REMOVE_CACHE_POOL
     // OP_REMOVE_CACHE_POOL
     filesystem.removeCachePool("pool1");
     filesystem.removeCachePool("pool1");
   }
   }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -886,8 +887,8 @@ public class TestDFSClientRetries {
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
       final DistributedFileSystem dfs = cluster.getFileSystem();
-      final FileSystem fs = isWebHDFS?
-          WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
+      final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
+          conf, WebHdfsFileSystem.SCHEME) : dfs;
       final URI uri = dfs.getUri();
       final URI uri = dfs.getUri();
       assertTrue(HdfsUtils.isHealthy(uri));
       assertTrue(HdfsUtils.isHealthy(uri));
 
 
@@ -1091,7 +1092,7 @@ public class TestDFSClientRetries {
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         username, new String[]{"supergroup"});
         username, new String[]{"supergroup"});
 
 
-    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
         : DFSTestUtil.getFileSystemAs(ugi, conf);
         : DFSTestUtil.getFileSystemAs(ugi, conf);
   }
   }
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -556,7 +556,7 @@ public class TestDFSUtil {
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
 
 
     Map<String, Map<String, InetSocketAddress>> map =
     Map<String, Map<String, InetSocketAddress>> map =
-        DFSUtil.getHaNnHttpAddresses(conf);
+        DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs");
 
 
     assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
     assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
     assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
     assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
@@ -574,7 +574,7 @@ public class TestDFSUtil {
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
     URI uri = new URI("webhdfs://ns1");
     URI uri = new URI("webhdfs://ns1");
     assertTrue(HAUtil.isLogicalUri(conf, uri));
     assertTrue(HAUtil.isLogicalUri(conf, uri));
-    InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf);
+    InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf);
     assertArrayEquals(new InetSocketAddress[] {
     assertArrayEquals(new InetSocketAddress[] {
       new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
       new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
       new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),
       new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java

@@ -147,7 +147,7 @@ public class TestDelegationTokenForProxyUser {
   public void testWebHdfsDoAs() throws Exception {
   public void testWebHdfsDoAs() throws Exception {
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
-    final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
+    final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME);
     
     
     final Path root = new Path("/");
     final Path root = new Path("/");
     cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));
     cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java

@@ -554,7 +554,7 @@ public abstract class FSImageTestUtil {
    * get NameSpace quota.
    * get NameSpace quota.
    */
    */
   public static long getNSQuota(FSNamesystem ns) {
   public static long getNSQuota(FSNamesystem ns) {
-    return ns.dir.rootDir.getNsQuota();
+    return ns.dir.rootDir.getQuotaCounts().get(Quota.NAMESPACE);
   }
   }
   
   
   public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {
   public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java

@@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -241,20 +241,20 @@ public class OfflineEditsViewerHelper {
         .setMode(new FsPermission((short)0700))
         .setMode(new FsPermission((short)0700))
         .setWeight(1989));
         .setWeight(1989));
     // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
     // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
-    long id = dfs.addPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    long id = dfs.addCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setPath(new Path("/bar")).
             setPath(new Path("/bar")).
             setReplication((short)1).
             setReplication((short)1).
             setPool(pool).
             setPool(pool).
             build());
             build());
     // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
     // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
-    dfs.modifyPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    dfs.modifyCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setId(id).
             setId(id).
             setPath(new Path("/bar2")).
             setPath(new Path("/bar2")).
             build());
             build());
     // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
     // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
-    dfs.removePathBasedCacheDirective(id);
+    dfs.removeCacheDirective(id);
     // OP_REMOVE_CACHE_POOL 37
     // OP_REMOVE_CACHE_POOL 37
     dfs.removeCachePool(pool);
     dfs.removeCachePool(pool);
     // sync to disk, otherwise we parse partial edits
     // sync to disk, otherwise we parse partial edits

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java

@@ -163,7 +163,7 @@ public class TestAuditLogs {
 
 
     setupAuditLogs();
     setupAuditLogs();
 
 
-    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
     InputStream istream = webfs.open(file);
     InputStream istream = webfs.open(file);
     int val = istream.read();
     int val = istream.read();
     istream.close();
     istream.close();
@@ -182,7 +182,7 @@ public class TestAuditLogs {
 
 
     setupAuditLogs();
     setupAuditLogs();
 
 
-    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
     FileStatus st = webfs.getFileStatus(file);
     FileStatus st = webfs.getFileStatus(file);
 
 
     verifyAuditLogs(true);
     verifyAuditLogs(true);
@@ -222,7 +222,7 @@ public class TestAuditLogs {
 
 
     setupAuditLogs();
     setupAuditLogs();
     try {
     try {
-      WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+      WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
       InputStream istream = webfs.open(file);
       InputStream istream = webfs.open(file);
       int val = istream.read();
       int val = istream.read();
       fail("open+read must not succeed, got " + val);
       fail("open+read must not succeed, got " + val);

+ 144 - 79
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java → hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -31,7 +31,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -54,8 +53,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -66,14 +68,17 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.GSet;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import com.google.common.base.Supplier;
 import com.google.common.base.Supplier;
 
 
-public class TestPathBasedCacheRequests {
-  static final Log LOG = LogFactory.getLog(TestPathBasedCacheRequests.class);
+public class TestCacheDirectives {
+  static final Log LOG = LogFactory.getLog(TestCacheDirectives.class);
 
 
   private static final UserGroupInformation unprivilegedUser =
   private static final UserGroupInformation unprivilegedUser =
       UserGroupInformation.createRemoteUser("unprivilegedUser");
       UserGroupInformation.createRemoteUser("unprivilegedUser");
@@ -100,6 +105,7 @@ public class TestPathBasedCacheRequests {
     proto = cluster.getNameNodeRpc();
     proto = cluster.getNameNodeRpc();
     prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
     prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
+    LogManager.getLogger(CacheReplicationMonitor.class).setLevel(Level.TRACE);
   }
   }
 
 
   @After
   @After
@@ -307,24 +313,25 @@ public class TestPathBasedCacheRequests {
   }
   }
 
 
   private static void validateListAll(
   private static void validateListAll(
-      RemoteIterator<PathBasedCacheDirective> iter,
+      RemoteIterator<CacheDirectiveEntry> iter,
       Long... ids) throws Exception {
       Long... ids) throws Exception {
     for (Long id: ids) {
     for (Long id: ids) {
       assertTrue("Unexpectedly few elements", iter.hasNext());
       assertTrue("Unexpectedly few elements", iter.hasNext());
-      assertEquals("Unexpected directive ID", id, iter.next().getId());
+      assertEquals("Unexpected directive ID", id,
+          iter.next().getInfo().getId());
     }
     }
     assertFalse("Unexpectedly many list elements", iter.hasNext());
     assertFalse("Unexpectedly many list elements", iter.hasNext());
   }
   }
 
 
   private static long addAsUnprivileged(
   private static long addAsUnprivileged(
-      final PathBasedCacheDirective directive) throws Exception {
+      final CacheDirectiveInfo directive) throws Exception {
     return unprivilegedUser
     return unprivilegedUser
         .doAs(new PrivilegedExceptionAction<Long>() {
         .doAs(new PrivilegedExceptionAction<Long>() {
           @Override
           @Override
           public Long run() throws IOException {
           public Long run() throws IOException {
             DistributedFileSystem myDfs =
             DistributedFileSystem myDfs =
                 (DistributedFileSystem) FileSystem.get(conf);
                 (DistributedFileSystem) FileSystem.get(conf);
-            return myDfs.addPathBasedCacheDirective(directive);
+            return myDfs.addCacheDirective(directive);
           }
           }
         });
         });
   }
   }
@@ -340,15 +347,15 @@ public class TestPathBasedCacheRequests {
     proto.addCachePool(new CachePoolInfo("pool4").
     proto.addCachePool(new CachePoolInfo("pool4").
         setMode(new FsPermission((short)0)));
         setMode(new FsPermission((short)0)));
 
 
-    PathBasedCacheDirective alpha = new PathBasedCacheDirective.Builder().
+    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().
         setPath(new Path("/alpha")).
         setPath(new Path("/alpha")).
         setPool("pool1").
         setPool("pool1").
         build();
         build();
-    PathBasedCacheDirective beta = new PathBasedCacheDirective.Builder().
+    CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().
         setPath(new Path("/beta")).
         setPath(new Path("/beta")).
         setPool("pool2").
         setPool("pool2").
         build();
         build();
-    PathBasedCacheDirective delta = new PathBasedCacheDirective.Builder().
+    CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().
         setPath(new Path("/delta")).
         setPath(new Path("/delta")).
         setPool("pool1").
         setPool("pool1").
         build();
         build();
@@ -356,12 +363,12 @@ public class TestPathBasedCacheRequests {
     long alphaId = addAsUnprivileged(alpha);
     long alphaId = addAsUnprivileged(alpha);
     long alphaId2 = addAsUnprivileged(alpha);
     long alphaId2 = addAsUnprivileged(alpha);
     assertFalse("Expected to get unique directives when re-adding an "
     assertFalse("Expected to get unique directives when re-adding an "
-        + "existing PathBasedCacheDirective",
+        + "existing CacheDirectiveInfo",
         alphaId == alphaId2);
         alphaId == alphaId2);
     long betaId = addAsUnprivileged(beta);
     long betaId = addAsUnprivileged(beta);
 
 
     try {
     try {
-      addAsUnprivileged(new PathBasedCacheDirective.Builder().
+      addAsUnprivileged(new CacheDirectiveInfo.Builder().
           setPath(new Path("/unicorn")).
           setPath(new Path("/unicorn")).
           setPool("no_such_pool").
           setPool("no_such_pool").
           build());
           build());
@@ -371,7 +378,7 @@ public class TestPathBasedCacheRequests {
     }
     }
 
 
     try {
     try {
-      addAsUnprivileged(new PathBasedCacheDirective.Builder().
+      addAsUnprivileged(new CacheDirectiveInfo.Builder().
           setPath(new Path("/blackhole")).
           setPath(new Path("/blackhole")).
           setPool("pool4").
           setPool("pool4").
           build());
           build());
@@ -383,7 +390,7 @@ public class TestPathBasedCacheRequests {
     }
     }
 
 
     try {
     try {
-      addAsUnprivileged(new PathBasedCacheDirective.Builder().
+      addAsUnprivileged(new CacheDirectiveInfo.Builder().
           setPath(new Path("/illegal:path/")).
           setPath(new Path("/illegal:path/")).
           setPool("pool1").
           setPool("pool1").
           build());
           build());
@@ -394,12 +401,12 @@ public class TestPathBasedCacheRequests {
     }
     }
 
 
     try {
     try {
-      addAsUnprivileged(new PathBasedCacheDirective.Builder().
+      addAsUnprivileged(new CacheDirectiveInfo.Builder().
           setPath(new Path("/emptypoolname")).
           setPath(new Path("/emptypoolname")).
           setReplication((short)1).
           setReplication((short)1).
           setPool("").
           setPool("").
           build());
           build());
-      fail("expected an error when adding a PathBasedCache " +
+      fail("expected an error when adding a cache " +
           "directive with an empty pool name.");
           "directive with an empty pool name.");
     } catch (InvalidRequestException e) {
     } catch (InvalidRequestException e) {
       GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
       GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
@@ -410,75 +417,75 @@ public class TestPathBasedCacheRequests {
     // We expect the following to succeed, because DistributedFileSystem
     // We expect the following to succeed, because DistributedFileSystem
     // qualifies the path.
     // qualifies the path.
     long relativeId = addAsUnprivileged(
     long relativeId = addAsUnprivileged(
-        new PathBasedCacheDirective.Builder().
+        new CacheDirectiveInfo.Builder().
             setPath(new Path("relative")).
             setPath(new Path("relative")).
             setPool("pool1").
             setPool("pool1").
             build());
             build());
 
 
-    RemoteIterator<PathBasedCacheDirective> iter;
-    iter = dfs.listPathBasedCacheDirectives(null);
+    RemoteIterator<CacheDirectiveEntry> iter;
+    iter = dfs.listCacheDirectives(null);
     validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
     validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
-    iter = dfs.listPathBasedCacheDirectives(
-        new PathBasedCacheDirective.Builder().setPool("pool3").build());
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setPool("pool3").build());
     assertFalse(iter.hasNext());
     assertFalse(iter.hasNext());
-    iter = dfs.listPathBasedCacheDirectives(
-        new PathBasedCacheDirective.Builder().setPool("pool1").build());
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setPool("pool1").build());
     validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
     validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
-    iter = dfs.listPathBasedCacheDirectives(
-        new PathBasedCacheDirective.Builder().setPool("pool2").build());
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setPool("pool2").build());
     validateListAll(iter, betaId);
     validateListAll(iter, betaId);
 
 
-    dfs.removePathBasedCacheDirective(betaId);
-    iter = dfs.listPathBasedCacheDirectives(
-        new PathBasedCacheDirective.Builder().setPool("pool2").build());
+    dfs.removeCacheDirective(betaId);
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setPool("pool2").build());
     assertFalse(iter.hasNext());
     assertFalse(iter.hasNext());
 
 
     try {
     try {
-      dfs.removePathBasedCacheDirective(betaId);
+      dfs.removeCacheDirective(betaId);
       fail("expected an error when removing a non-existent ID");
       fail("expected an error when removing a non-existent ID");
     } catch (InvalidRequestException e) {
     } catch (InvalidRequestException e) {
       GenericTestUtils.assertExceptionContains("No directive with ID", e);
       GenericTestUtils.assertExceptionContains("No directive with ID", e);
     }
     }
 
 
     try {
     try {
-      proto.removePathBasedCacheDirective(-42l);
+      proto.removeCacheDirective(-42l);
       fail("expected an error when removing a negative ID");
       fail("expected an error when removing a negative ID");
     } catch (InvalidRequestException e) {
     } catch (InvalidRequestException e) {
       GenericTestUtils.assertExceptionContains(
       GenericTestUtils.assertExceptionContains(
           "Invalid negative ID", e);
           "Invalid negative ID", e);
     }
     }
     try {
     try {
-      proto.removePathBasedCacheDirective(43l);
+      proto.removeCacheDirective(43l);
       fail("expected an error when removing a non-existent ID");
       fail("expected an error when removing a non-existent ID");
     } catch (InvalidRequestException e) {
     } catch (InvalidRequestException e) {
       GenericTestUtils.assertExceptionContains("No directive with ID", e);
       GenericTestUtils.assertExceptionContains("No directive with ID", e);
     }
     }
 
 
-    dfs.removePathBasedCacheDirective(alphaId);
-    dfs.removePathBasedCacheDirective(alphaId2);
-    dfs.removePathBasedCacheDirective(deltaId);
+    dfs.removeCacheDirective(alphaId);
+    dfs.removeCacheDirective(alphaId2);
+    dfs.removeCacheDirective(deltaId);
 
 
-    dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
+    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().
         setId(relativeId).
         setId(relativeId).
         setReplication((short)555).
         setReplication((short)555).
         build());
         build());
-    iter = dfs.listPathBasedCacheDirectives(null);
+    iter = dfs.listCacheDirectives(null);
     assertTrue(iter.hasNext());
     assertTrue(iter.hasNext());
-    PathBasedCacheDirective modified = iter.next();
+    CacheDirectiveInfo modified = iter.next().getInfo();
     assertEquals(relativeId, modified.getId().longValue());
     assertEquals(relativeId, modified.getId().longValue());
     assertEquals((short)555, modified.getReplication().shortValue());
     assertEquals((short)555, modified.getReplication().shortValue());
-    dfs.removePathBasedCacheDirective(relativeId);
-    iter = dfs.listPathBasedCacheDirectives(null);
+    dfs.removeCacheDirective(relativeId);
+    iter = dfs.listCacheDirectives(null);
     assertFalse(iter.hasNext());
     assertFalse(iter.hasNext());
 
 
     // Verify that PBCDs with path "." work correctly
     // Verify that PBCDs with path "." work correctly
-    PathBasedCacheDirective directive =
-        new PathBasedCacheDirective.Builder().setPath(new Path("."))
+    CacheDirectiveInfo directive =
+        new CacheDirectiveInfo.Builder().setPath(new Path("."))
             .setPool("pool1").build();
             .setPool("pool1").build();
-    long id = dfs.addPathBasedCacheDirective(directive);
-    dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder(
+    long id = dfs.addCacheDirective(directive);
+    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
         directive).setId(id).setReplication((short)2).build());
         directive).setId(id).setReplication((short)2).build());
-    dfs.removePathBasedCacheDirective(id);
+    dfs.removeCacheDirective(id);
   }
   }
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
@@ -514,15 +521,15 @@ public class TestPathBasedCacheRequests {
     String entryPrefix = "/party-";
     String entryPrefix = "/party-";
     long prevId = -1;
     long prevId = -1;
     for (int i=0; i<numEntries; i++) {
     for (int i=0; i<numEntries; i++) {
-      prevId = dfs.addPathBasedCacheDirective(
-          new PathBasedCacheDirective.Builder().
+      prevId = dfs.addCacheDirective(
+          new CacheDirectiveInfo.Builder().
             setPath(new Path(entryPrefix + i)).setPool(pool).build());
             setPath(new Path(entryPrefix + i)).setPool(pool).build());
     }
     }
-    RemoteIterator<PathBasedCacheDirective> dit
-        = dfs.listPathBasedCacheDirectives(null);
+    RemoteIterator<CacheDirectiveEntry> dit
+        = dfs.listCacheDirectives(null);
     for (int i=0; i<numEntries; i++) {
     for (int i=0; i<numEntries; i++) {
       assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
       assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      PathBasedCacheDirective cd = dit.next();
+      CacheDirectiveInfo cd = dit.next().getInfo();
       assertEquals(i+1, cd.getId().longValue());
       assertEquals(i+1, cd.getId().longValue());
       assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
       assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
       assertEquals(pool, cd.getPool());
       assertEquals(pool, cd.getPool());
@@ -543,18 +550,18 @@ public class TestPathBasedCacheRequests {
     assertEquals(weight, (int)info.getWeight());
     assertEquals(weight, (int)info.getWeight());
     assertFalse("Unexpected # of cache pools found", pit.hasNext());
     assertFalse("Unexpected # of cache pools found", pit.hasNext());
   
   
-    dit = dfs.listPathBasedCacheDirectives(null);
+    dit = dfs.listCacheDirectives(null);
     for (int i=0; i<numEntries; i++) {
     for (int i=0; i<numEntries; i++) {
       assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
       assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      PathBasedCacheDirective cd = dit.next();
+      CacheDirectiveInfo cd = dit.next().getInfo();
       assertEquals(i+1, cd.getId().longValue());
       assertEquals(i+1, cd.getId().longValue());
       assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
       assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
       assertEquals(pool, cd.getPool());
       assertEquals(pool, cd.getPool());
     }
     }
     assertFalse("Unexpected # of cache directives found", dit.hasNext());
     assertFalse("Unexpected # of cache directives found", dit.hasNext());
 
 
-    long nextId = dfs.addPathBasedCacheDirective(
-          new PathBasedCacheDirective.Builder().
+    long nextId = dfs.addCacheDirective(
+          new CacheDirectiveInfo.Builder().
             setPath(new Path("/foobar")).setPool(pool).build());
             setPath(new Path("/foobar")).setPool(pool).build());
     assertEquals(prevId + 1, nextId);
     assertEquals(prevId + 1, nextId);
   }
   }
@@ -686,22 +693,22 @@ public class TestPathBasedCacheRequests {
       // Cache and check each path in sequence
       // Cache and check each path in sequence
       int expected = 0;
       int expected = 0;
       for (int i=0; i<numFiles; i++) {
       for (int i=0; i<numFiles; i++) {
-        PathBasedCacheDirective directive =
-            new PathBasedCacheDirective.Builder().
+        CacheDirectiveInfo directive =
+            new CacheDirectiveInfo.Builder().
               setPath(new Path(paths.get(i))).
               setPath(new Path(paths.get(i))).
               setPool(pool).
               setPool(pool).
               build();
               build();
-        nnRpc.addPathBasedCacheDirective(directive);
+        nnRpc.addCacheDirective(directive);
         expected += numBlocksPerFile;
         expected += numBlocksPerFile;
         waitForCachedBlocks(namenode, expected, expected,
         waitForCachedBlocks(namenode, expected, expected,
             "testWaitForCachedReplicas:1");
             "testWaitForCachedReplicas:1");
       }
       }
       // Uncache and check each path in sequence
       // Uncache and check each path in sequence
-      RemoteIterator<PathBasedCacheDirective> entries =
-          nnRpc.listPathBasedCacheDirectives(0, null);
+      RemoteIterator<CacheDirectiveEntry> entries =
+          nnRpc.listCacheDirectives(0, null);
       for (int i=0; i<numFiles; i++) {
       for (int i=0; i<numFiles; i++) {
-        PathBasedCacheDirective directive = entries.next();
-        nnRpc.removePathBasedCacheDirective(directive.getId());
+        CacheDirectiveEntry entry = entries.next();
+        nnRpc.removeCacheDirective(entry.getInfo().getId());
         expected -= numBlocksPerFile;
         expected -= numBlocksPerFile;
         waitForCachedBlocks(namenode, expected, expected,
         waitForCachedBlocks(namenode, expected, expected,
             "testWaitForCachedReplicas:2");
             "testWaitForCachedReplicas:2");
@@ -712,7 +719,7 @@ public class TestPathBasedCacheRequests {
   }
   }
 
 
   @Test(timeout=120000)
   @Test(timeout=120000)
-  public void testAddingPathBasedCacheDirectivesWhenCachingIsDisabled()
+  public void testAddingCacheDirectiveInfosWhenCachingIsDisabled()
       throws Exception {
       throws Exception {
     HdfsConfiguration conf = createCachingConf();
     HdfsConfiguration conf = createCachingConf();
     conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, false);
     conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, false);
@@ -738,22 +745,22 @@ public class TestPathBasedCacheRequests {
       }
       }
       // Check the initial statistics at the namenode
       // Check the initial statistics at the namenode
       waitForCachedBlocks(namenode, 0, 0,
       waitForCachedBlocks(namenode, 0, 0,
-          "testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:0");
+          "testAddingCacheDirectiveInfosWhenCachingIsDisabled:0");
       // Cache and check each path in sequence
       // Cache and check each path in sequence
       int expected = 0;
       int expected = 0;
       for (int i=0; i<numFiles; i++) {
       for (int i=0; i<numFiles; i++) {
-        PathBasedCacheDirective directive =
-            new PathBasedCacheDirective.Builder().
+        CacheDirectiveInfo directive =
+            new CacheDirectiveInfo.Builder().
               setPath(new Path(paths.get(i))).
               setPath(new Path(paths.get(i))).
               setPool(pool).
               setPool(pool).
               build();
               build();
-        dfs.addPathBasedCacheDirective(directive);
+        dfs.addCacheDirective(directive);
         waitForCachedBlocks(namenode, expected, 0,
         waitForCachedBlocks(namenode, expected, 0,
-          "testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:1");
+          "testAddingCacheDirectiveInfosWhenCachingIsDisabled:1");
       }
       }
       Thread.sleep(20000);
       Thread.sleep(20000);
       waitForCachedBlocks(namenode, expected, 0,
       waitForCachedBlocks(namenode, expected, 0,
-          "testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:2");
+          "testAddingCacheDirectiveInfosWhenCachingIsDisabled:2");
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
@@ -788,18 +795,76 @@ public class TestPathBasedCacheRequests {
       waitForCachedBlocks(namenode, 0, 0,
       waitForCachedBlocks(namenode, 0, 0,
           "testWaitForCachedReplicasInDirectory:0");
           "testWaitForCachedReplicasInDirectory:0");
       // cache entire directory
       // cache entire directory
-      long id = dfs.addPathBasedCacheDirective(
-            new PathBasedCacheDirective.Builder().
+      long id = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo")).
               setPath(new Path("/foo")).
               setReplication((short)2).
               setReplication((short)2).
               setPool(pool).
               setPool(pool).
               build());
               build());
       waitForCachedBlocks(namenode, 4, 8,
       waitForCachedBlocks(namenode, 4, 8,
           "testWaitForCachedReplicasInDirectory:1");
           "testWaitForCachedReplicasInDirectory:1");
+      // Verify that listDirectives gives the stats we want.
+      RemoteIterator<CacheDirectiveEntry> iter =
+        dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().
+            setPath(new Path("/foo")).
+            build());
+      CacheDirectiveEntry entry = iter.next();
+      CacheDirectiveStats stats = entry.getStats();
+      Assert.assertEquals(Long.valueOf(2),
+          stats.getFilesAffected());
+      Assert.assertEquals(Long.valueOf(
+          2 * numBlocksPerFile * BLOCK_SIZE * 2),
+          stats.getBytesNeeded());
+      Assert.assertEquals(Long.valueOf(
+          2 * numBlocksPerFile * BLOCK_SIZE * 2),
+          stats.getBytesCached());
+      
+      long id2 = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path("/foo/bar")).
+              setReplication((short)4).
+              setPool(pool).
+              build());
+      // wait for an additional 2 cached replicas to come up
+      waitForCachedBlocks(namenode, 4, 10,
+          "testWaitForCachedReplicasInDirectory:2");
+      // the directory directive's stats are unchanged
+      iter = dfs.listCacheDirectives(
+          new CacheDirectiveInfo.Builder().
+            setPath(new Path("/foo")).
+            build());
+      entry = iter.next();
+      stats = entry.getStats();
+      Assert.assertEquals(Long.valueOf(2),
+          stats.getFilesAffected());
+      Assert.assertEquals(Long.valueOf(
+          2 * numBlocksPerFile * BLOCK_SIZE * 2),
+          stats.getBytesNeeded());
+      Assert.assertEquals(Long.valueOf(
+          2 * numBlocksPerFile * BLOCK_SIZE * 2),
+          stats.getBytesCached());
+      // verify /foo/bar's stats
+      iter = dfs.listCacheDirectives(
+          new CacheDirectiveInfo.Builder().
+            setPath(new Path("/foo/bar")).
+            build());
+      entry = iter.next();
+      stats = entry.getStats();
+      Assert.assertEquals(Long.valueOf(1),
+          stats.getFilesAffected());
+      Assert.assertEquals(Long.valueOf(
+          4 * numBlocksPerFile * BLOCK_SIZE),
+          stats.getBytesNeeded());
+      // only 3 because the file only has 3 replicas, not 4 as requested.
+      Assert.assertEquals(Long.valueOf(
+          3 * numBlocksPerFile * BLOCK_SIZE),
+          stats.getBytesCached());
+      
       // remove and watch numCached go to 0
       // remove and watch numCached go to 0
-      dfs.removePathBasedCacheDirective(id);
+      dfs.removeCacheDirective(id);
+      dfs.removeCacheDirective(id2);
       waitForCachedBlocks(namenode, 0, 0,
       waitForCachedBlocks(namenode, 0, 0,
-          "testWaitForCachedReplicasInDirectory:2");
+          "testWaitForCachedReplicasInDirectory:3");
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
@@ -839,8 +904,8 @@ public class TestPathBasedCacheRequests {
       waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
       waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
       checkNumCachedReplicas(dfs, paths, 0, 0);
       checkNumCachedReplicas(dfs, paths, 0, 0);
       // cache directory
       // cache directory
-      long id = dfs.addPathBasedCacheDirective(
-          new PathBasedCacheDirective.Builder().
+      long id = dfs.addCacheDirective(
+          new CacheDirectiveInfo.Builder().
             setPath(new Path("/foo")).
             setPath(new Path("/foo")).
             setReplication((short)1).
             setReplication((short)1).
             setPool(pool).
             setPool(pool).
@@ -849,8 +914,8 @@ public class TestPathBasedCacheRequests {
       checkNumCachedReplicas(dfs, paths, 4, 4);
       checkNumCachedReplicas(dfs, paths, 4, 4);
       // step up the replication factor
       // step up the replication factor
       for (int i=2; i<=3; i++) {
       for (int i=2; i<=3; i++) {
-        dfs.modifyPathBasedCacheDirective(
-            new PathBasedCacheDirective.Builder().
+        dfs.modifyCacheDirective(
+            new CacheDirectiveInfo.Builder().
             setId(id).
             setId(id).
             setReplication((short)i).
             setReplication((short)i).
             build());
             build());
@@ -859,8 +924,8 @@ public class TestPathBasedCacheRequests {
       }
       }
       // step it down
       // step it down
       for (int i=2; i>=1; i--) {
       for (int i=2; i>=1; i--) {
-        dfs.modifyPathBasedCacheDirective(
-            new PathBasedCacheDirective.Builder().
+        dfs.modifyCacheDirective(
+            new CacheDirectiveInfo.Builder().
             setId(id).
             setId(id).
             setReplication((short)i).
             setReplication((short)i).
             build());
             build());
@@ -868,7 +933,7 @@ public class TestPathBasedCacheRequests {
         checkNumCachedReplicas(dfs, paths, 4, 4*i);
         checkNumCachedReplicas(dfs, paths, 4, 4*i);
       }
       }
       // remove and watch numCached go to 0
       // remove and watch numCached go to 0
-      dfs.removePathBasedCacheDirective(id);
+      dfs.removeCacheDirective(id);
       waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
       waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
       checkNumCachedReplicas(dfs, paths, 0, 0);
       checkNumCachedReplicas(dfs, paths, 0, 0);
     } finally {
     } finally {

+ 44 - 43
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

@@ -61,7 +61,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -737,14 +738,14 @@ public class TestRetryCacheWithHA {
     }
     }
   }
   }
   
   
-  /** addPathBasedCacheDirective */
-  class AddPathBasedCacheDirectiveOp extends AtMostOnceOp {
-    private PathBasedCacheDirective directive;
+  /** addCacheDirective */
+  class AddCacheDirectiveInfoOp extends AtMostOnceOp {
+    private CacheDirectiveInfo directive;
     private Long result;
     private Long result;
 
 
-    AddPathBasedCacheDirectiveOp(DFSClient client,
-        PathBasedCacheDirective directive) {
-      super("addPathBasedCacheDirective", client);
+    AddCacheDirectiveInfoOp(DFSClient client,
+        CacheDirectiveInfo directive) {
+      super("addCacheDirective", client);
       this.directive = directive;
       this.directive = directive;
     }
     }
 
 
@@ -755,15 +756,15 @@ public class TestRetryCacheWithHA {
 
 
     @Override
     @Override
     void invoke() throws Exception {
     void invoke() throws Exception {
-      result = client.addPathBasedCacheDirective(directive);
+      result = client.addCacheDirective(directive);
     }
     }
 
 
     @Override
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<PathBasedCacheDirective> iter =
-            dfs.listPathBasedCacheDirectives(
-                new PathBasedCacheDirective.Builder().
+        RemoteIterator<CacheDirectiveEntry> iter =
+            dfs.listCacheDirectives(
+                new CacheDirectiveInfo.Builder().
                     setPool(directive.getPool()).
                     setPool(directive.getPool()).
                     setPath(directive.getPath()).
                     setPath(directive.getPath()).
                     build());
                     build());
@@ -781,15 +782,15 @@ public class TestRetryCacheWithHA {
     }
     }
   }
   }
 
 
-  /** modifyPathBasedCacheDirective */
-  class ModifyPathBasedCacheDirectiveOp extends AtMostOnceOp {
-    private final PathBasedCacheDirective directive;
+  /** modifyCacheDirective */
+  class ModifyCacheDirectiveInfoOp extends AtMostOnceOp {
+    private final CacheDirectiveInfo directive;
     private final short newReplication;
     private final short newReplication;
     private long id;
     private long id;
 
 
-    ModifyPathBasedCacheDirectiveOp(DFSClient client,
-        PathBasedCacheDirective directive, short newReplication) {
-      super("modifyPathBasedCacheDirective", client);
+    ModifyCacheDirectiveInfoOp(DFSClient client,
+        CacheDirectiveInfo directive, short newReplication) {
+      super("modifyCacheDirective", client);
       this.directive = directive;
       this.directive = directive;
       this.newReplication = newReplication;
       this.newReplication = newReplication;
     }
     }
@@ -797,13 +798,13 @@ public class TestRetryCacheWithHA {
     @Override
     @Override
     void prepare() throws Exception {
     void prepare() throws Exception {
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
-      id = client.addPathBasedCacheDirective(directive);
+      id = client.addCacheDirective(directive);
     }
     }
 
 
     @Override
     @Override
     void invoke() throws Exception {
     void invoke() throws Exception {
-      client.modifyPathBasedCacheDirective(
-          new PathBasedCacheDirective.Builder().
+      client.modifyCacheDirective(
+          new CacheDirectiveInfo.Builder().
               setId(id).
               setId(id).
               setReplication(newReplication).
               setReplication(newReplication).
               build());
               build());
@@ -812,14 +813,14 @@ public class TestRetryCacheWithHA {
     @Override
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<PathBasedCacheDirective> iter =
-            dfs.listPathBasedCacheDirectives(
-                new PathBasedCacheDirective.Builder().
+        RemoteIterator<CacheDirectiveEntry> iter =
+            dfs.listCacheDirectives(
+                new CacheDirectiveInfo.Builder().
                     setPool(directive.getPool()).
                     setPool(directive.getPool()).
                     setPath(directive.getPath()).
                     setPath(directive.getPath()).
                     build());
                     build());
         while (iter.hasNext()) {
         while (iter.hasNext()) {
-          PathBasedCacheDirective result = iter.next();
+          CacheDirectiveInfo result = iter.next().getInfo();
           if ((result.getId() == id) &&
           if ((result.getId() == id) &&
               (result.getReplication().shortValue() == newReplication)) {
               (result.getReplication().shortValue() == newReplication)) {
             return true;
             return true;
@@ -836,15 +837,15 @@ public class TestRetryCacheWithHA {
     }
     }
   }
   }
 
 
-  /** removePathBasedCacheDirective */
-  class RemovePathBasedCacheDirectiveOp extends AtMostOnceOp {
-    private PathBasedCacheDirective directive;
+  /** removeCacheDirective */
+  class RemoveCacheDirectiveInfoOp extends AtMostOnceOp {
+    private CacheDirectiveInfo directive;
     private long id;
     private long id;
 
 
-    RemovePathBasedCacheDirectiveOp(DFSClient client, String pool,
+    RemoveCacheDirectiveInfoOp(DFSClient client, String pool,
         String path) {
         String path) {
-      super("removePathBasedCacheDirective", client);
-      this.directive = new PathBasedCacheDirective.Builder().
+      super("removeCacheDirective", client);
+      this.directive = new CacheDirectiveInfo.Builder().
           setPool(pool).
           setPool(pool).
           setPath(new Path(path)).
           setPath(new Path(path)).
           build();
           build();
@@ -853,20 +854,20 @@ public class TestRetryCacheWithHA {
     @Override
     @Override
     void prepare() throws Exception {
     void prepare() throws Exception {
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
-      id = dfs.addPathBasedCacheDirective(directive);
+      id = dfs.addCacheDirective(directive);
     }
     }
 
 
     @Override
     @Override
     void invoke() throws Exception {
     void invoke() throws Exception {
-      client.removePathBasedCacheDirective(id);
+      client.removeCacheDirective(id);
     }
     }
 
 
     @Override
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<PathBasedCacheDirective> iter =
-            dfs.listPathBasedCacheDirectives(
-                new PathBasedCacheDirective.Builder().
+        RemoteIterator<CacheDirectiveEntry> iter =
+            dfs.listCacheDirectives(
+                new CacheDirectiveInfo.Builder().
                   setPool(directive.getPool()).
                   setPool(directive.getPool()).
                   setPath(directive.getPath()).
                   setPath(directive.getPath()).
                   build());
                   build());
@@ -1072,10 +1073,10 @@ public class TestRetryCacheWithHA {
   }
   }
   
   
   @Test (timeout=60000)
   @Test (timeout=60000)
-  public void testAddPathBasedCacheDirective() throws Exception {
+  public void testAddCacheDirectiveInfo() throws Exception {
     DFSClient client = genClientWithDummyHandler();
     DFSClient client = genClientWithDummyHandler();
-    AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, 
-        new PathBasedCacheDirective.Builder().
+    AtMostOnceOp op = new AddCacheDirectiveInfoOp(client, 
+        new CacheDirectiveInfo.Builder().
             setPool("pool").
             setPool("pool").
             setPath(new Path("/path")).
             setPath(new Path("/path")).
             build());
             build());
@@ -1083,10 +1084,10 @@ public class TestRetryCacheWithHA {
   }
   }
 
 
   @Test (timeout=60000)
   @Test (timeout=60000)
-  public void testModifyPathBasedCacheDirective() throws Exception {
+  public void testModifyCacheDirectiveInfo() throws Exception {
     DFSClient client = genClientWithDummyHandler();
     DFSClient client = genClientWithDummyHandler();
-    AtMostOnceOp op = new ModifyPathBasedCacheDirectiveOp(client, 
-        new PathBasedCacheDirective.Builder().
+    AtMostOnceOp op = new ModifyCacheDirectiveInfoOp(client, 
+        new CacheDirectiveInfo.Builder().
             setPool("pool").
             setPool("pool").
             setPath(new Path("/path")).
             setPath(new Path("/path")).
             setReplication((short)1).build(),
             setReplication((short)1).build(),
@@ -1095,9 +1096,9 @@ public class TestRetryCacheWithHA {
   }
   }
 
 
   @Test (timeout=60000)
   @Test (timeout=60000)
-  public void testRemovePathBasedCacheDescriptor() throws Exception {
+  public void testRemoveCacheDescriptor() throws Exception {
     DFSClient client = genClientWithDummyHandler();
     DFSClient client = genClientWithDummyHandler();
-    AtMostOnceOp op = new RemovePathBasedCacheDirectiveOp(client, "pool",
+    AtMostOnceOp op = new RemoveCacheDirectiveInfoOp(client, "pool",
         "/path");
         "/path");
     testClientRetryWithFailover(op);
     testClientRetryWithFailover(op);
   }
   }

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java

@@ -65,6 +65,7 @@ public class TestHttpsFileSystem {
     cluster.getFileSystem().create(new Path("/test")).close();
     cluster.getFileSystem().create(new Path("/test")).close();
     InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
     InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
     nnAddr = addr.getHostName() + ":" + addr.getPort();
     nnAddr = addr.getHostName() + ":" + addr.getPort();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
   }
   }
 
 
   @AfterClass
   @AfterClass
@@ -80,4 +81,15 @@ public class TestHttpsFileSystem {
     Assert.assertTrue(fs.exists(new Path("/test")));
     Assert.assertTrue(fs.exists(new Path("/test")));
     fs.close();
     fs.close();
   }
   }
+
+  @Test
+  public void testSWebHdfsFileSystem() throws Exception {
+    FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
+    final Path f = new Path("/testswebhdfs");
+    FSDataOutputStream os = fs.create(f);
+    os.write(23);
+    os.close();
+    Assert.assertTrue(fs.exists(f));
+    fs.close();
+  }
 }
 }

+ 20 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestDFSClientRetries;
 import org.apache.hadoop.hdfs.TestDFSClientRetries;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@@ -101,7 +102,7 @@ public class TestWebHDFS {
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
 
 
-      final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+      final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
       final Path dir = new Path("/test/largeFile");
       final Path dir = new Path("/test/largeFile");
       Assert.assertTrue(fs.mkdirs(dir));
       Assert.assertTrue(fs.mkdirs(dir));
 
 
@@ -229,9 +230,9 @@ public class TestWebHDFS {
         new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
         new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
-      WebHdfsTestUtil.getWebHdfsFileSystem(conf).setPermission(
-          new Path("/"),
-          new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
+      WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME)
+          .setPermission(new Path("/"),
+              new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
 
 
       // trick the NN into not believing it's not the superuser so we can
       // trick the NN into not believing it's not the superuser so we can
       // tell if the correct user is used by listStatus
       // tell if the correct user is used by listStatus
@@ -243,8 +244,9 @@ public class TestWebHDFS {
         .doAs(new PrivilegedExceptionAction<Void>() {
         .doAs(new PrivilegedExceptionAction<Void>() {
           @Override
           @Override
           public Void run() throws IOException, URISyntaxException {
           public Void run() throws IOException, URISyntaxException {
-            FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
-            Path d = new Path("/my-dir");
+              FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+                  WebHdfsFileSystem.SCHEME);
+              Path d = new Path("/my-dir");
             Assert.assertTrue(fs.mkdirs(d));
             Assert.assertTrue(fs.mkdirs(d));
             for (int i=0; i < listLimit*3; i++) {
             for (int i=0; i < listLimit*3; i++) {
               Path p = new Path(d, "file-"+i);
               Path p = new Path(d, "file-"+i);
@@ -258,4 +260,16 @@ public class TestWebHDFS {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+
+  /**
+   * WebHdfs should be enabled by default after HDFS-5532
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testWebHdfsEnabledByDefault() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
+        false));
+  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -82,7 +82,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.createUserForTesting(
     ugi = UserGroupInformation.createUserForTesting(
         current.getShortUserName() + "x", new String[]{"user"});
         current.getShortUserName() + "x", new String[]{"user"});
-    fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf);
+    fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
     defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
     defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
   }
   }
 
 

+ 6 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java

@@ -18,35 +18,32 @@
 
 
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.OutputStream;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.Socket;
-import java.net.SocketAddress;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
 import java.nio.channels.SocketChannel;
 import java.nio.channels.SocketChannel;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 
 /**
 /**
  * This test suite checks that WebHdfsFileSystem sets connection timeouts and
  * This test suite checks that WebHdfsFileSystem sets connection timeouts and
@@ -77,7 +74,7 @@ public class TestWebHdfsTimeouts {
     serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
     serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
     nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
     nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
-    fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+    fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
     fs.connectionFactory = connectionFactory;
     fs.connectionFactory = connectionFactory;
     clients = new ArrayList<SocketChannel>();
     clients = new ArrayList<SocketChannel>();
     serverThread = null;
     serverThread = null;

+ 22 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java

@@ -46,20 +46,36 @@ public class WebHdfsTestUtil {
     return conf;
     return conf;
   }
   }
 
 
-  public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf
-      ) throws IOException, URISyntaxException {
-    final String uri = WebHdfsFileSystem.SCHEME  + "://"
-        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+  public static WebHdfsFileSystem getWebHdfsFileSystem(
+      final Configuration conf, String scheme) throws IOException,
+      URISyntaxException {
+    final String uri;
+
+    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
+      uri = WebHdfsFileSystem.SCHEME + "://"
+          + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
+      uri = SWebHdfsFileSystem.SCHEME + "://"
+          + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
+    } else {
+      throw new IllegalArgumentException("unknown scheme:" + scheme);
+    }
     return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
     return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
   }
   }
 
 
   public static WebHdfsFileSystem getWebHdfsFileSystemAs(
   public static WebHdfsFileSystem getWebHdfsFileSystemAs(
-      final UserGroupInformation ugi, final Configuration conf
+  final UserGroupInformation ugi, final Configuration conf
+  ) throws IOException, InterruptedException {
+    return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
+  }
+
+  public static WebHdfsFileSystem getWebHdfsFileSystemAs(
+      final UserGroupInformation ugi, final Configuration conf, String scheme
       ) throws IOException, InterruptedException {
       ) throws IOException, InterruptedException {
     return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
     return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
       @Override
       @Override
       public WebHdfsFileSystem run() throws Exception {
       public WebHdfsFileSystem run() throws Exception {
-        return getWebHdfsFileSystem(conf);
+        return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
       }
       }
     });
     });
   }
   }

+ 16 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml

@@ -90,7 +90,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>poolparty  bob    bobgroup  rwxrwxrwx  51</expected-output>
+          <expected-output>poolparty  bob    bobgroup  rwxrwxrwx       51</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -129,11 +129,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>bar   alice  alicegroup  rwxr-xr-x  100    </expected-output>
+          <expected-output>bar   alice  alicegroup  rwxr-xr-x      100</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>foo   bob    bob         rw-rw-r--  100    </expected-output>
+          <expected-output>foo   bob    bob         rw-rw-r--      100</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -156,7 +156,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>foo   bob    bob    rw-rw-r--  100    </expected-output>
+          <expected-output>foo   bob    bob    rw-rw-r--      100</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -180,15 +180,15 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>1   pool1  1            /foo</expected-output>
+          <expected-output>  1 pool1             1 /foo</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>2   pool1  1            /bar</expected-output>
+          <expected-output>  2 pool1             1 /bar</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>3   pool1  2            /baz</expected-output>
+          <expected-output>  3 pool1             2 /baz</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -234,11 +234,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>8   pool2  1            /baz</expected-output>
+          <expected-output>  8 pool2             1 /baz</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>9   pool2  1            /buz</expected-output>
+          <expected-output>  9 pool2             1 /buz</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -265,11 +265,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>10  pool1  1            /foo</expected-output>
+          <expected-output> 10 pool1             1 /foo</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>12  pool2  1            /foo</expected-output>
+          <expected-output> 12 pool2             1 /foo</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -296,7 +296,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>16  pool2  1            /foo</expected-output>
+          <expected-output> 16 pool2             1 /foo</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -320,7 +320,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>19  pool1  1            /bar</expected-output>
+          <expected-output> 19 pool1             1 /bar</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -349,11 +349,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>22  pool1  1            /bar</expected-output>
+          <expected-output> 22 pool1             1 /bar</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>24  pool2  1            /bar</expected-output>
+          <expected-output> 24 pool2             1 /bar</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -379,7 +379,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>25  pool1  1            /bar3</expected-output>
+          <expected-output> 25 pool1             1 /bar3</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -223,6 +223,9 @@ Release 2.3.0 - UNRELEASED
     MAPREDUCE-5625. TestFixedLengthInputFormat fails in jdk7 environment
     MAPREDUCE-5625. TestFixedLengthInputFormat fails in jdk7 environment
     (Mariappan Asokan via jeagles)
     (Mariappan Asokan via jeagles)
 
 
+    MAPREDUCE-5631. TestJobEndNotifier.testNotifyRetries fails with Should
+    have taken more than 5 seconds in jdk7 (Jonathan Eagles via jlowe)
+
 Release 2.2.1 - UNRELEASED
 Release 2.2.1 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 17 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java

@@ -38,6 +38,7 @@ import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -160,8 +161,13 @@ public class TestJobEndNotifier extends JobEndNotifier {
   //Check retries happen as intended
   //Check retries happen as intended
   @Test
   @Test
   public void testNotifyRetries() throws InterruptedException {
   public void testNotifyRetries() throws InterruptedException {
-    Configuration conf = new Configuration();
+    JobConf conf = new JobConf();
+    conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "0");
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1");
     conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
     conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
+    conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "5000");
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5000");
+
     JobReport jobReport = mock(JobReport.class);
     JobReport jobReport = mock(JobReport.class);
  
  
     long startTime = System.currentTimeMillis();
     long startTime = System.currentTimeMillis();
@@ -170,7 +176,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     this.notify(jobReport);
     this.notify(jobReport);
     long endTime = System.currentTimeMillis();
     long endTime = System.currentTimeMillis();
     Assert.assertEquals("Only 1 try was expected but was : "
     Assert.assertEquals("Only 1 try was expected but was : "
-      + this.notificationCount, this.notificationCount, 1);
+      + this.notificationCount, 1, this.notificationCount);
     Assert.assertTrue("Should have taken more than 5 seconds it took "
     Assert.assertTrue("Should have taken more than 5 seconds it took "
       + (endTime - startTime), endTime - startTime > 5000);
       + (endTime - startTime), endTime - startTime > 5000);
 
 
@@ -185,7 +191,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     this.notify(jobReport);
     this.notify(jobReport);
     endTime = System.currentTimeMillis();
     endTime = System.currentTimeMillis();
     Assert.assertEquals("Only 3 retries were expected but was : "
     Assert.assertEquals("Only 3 retries were expected but was : "
-      + this.notificationCount, this.notificationCount, 3);
+      + this.notificationCount, 3, this.notificationCount);
     Assert.assertTrue("Should have taken more than 9 seconds it took "
     Assert.assertTrue("Should have taken more than 9 seconds it took "
       + (endTime - startTime), endTime - startTime > 9000);
       + (endTime - startTime), endTime - startTime > 9000);
 
 
@@ -198,14 +204,14 @@ public class TestJobEndNotifier extends JobEndNotifier {
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
         2, 2, true, this.getClass().getName(), true, 2, true));
         2, 2, true, this.getClass().getName(), true, 2, true));
     doNothing().when(app).sysexit();
     doNothing().when(app).sysexit();
-    Configuration conf = new Configuration();
+    JobConf conf = new JobConf();
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
     JobImpl job = (JobImpl)app.submit(conf);
     JobImpl job = (JobImpl)app.submit(conf);
     app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
     app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
     // Unregistration succeeds: successfullyUnregistered is set
     // Unregistration succeeds: successfullyUnregistered is set
     app.shutDownJob();
     app.shutDownJob();
-    Assert.assertEquals(true, app.isLastAMRetry());
+    Assert.assertTrue(app.isLastAMRetry());
     Assert.assertEquals(1, JobEndServlet.calledTimes);
     Assert.assertEquals(1, JobEndServlet.calledTimes);
     Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
     Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
         JobEndServlet.requestUri.getQuery());
         JobEndServlet.requestUri.getQuery());
@@ -221,7 +227,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 1, false));
         this.getClass().getName(), true, 1, false));
     doNothing().when(app).sysexit();
     doNothing().when(app).sysexit();
-    Configuration conf = new Configuration();
+    JobConf conf = new JobConf();
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
     JobImpl job = (JobImpl)app.submit(conf);
     JobImpl job = (JobImpl)app.submit(conf);
@@ -234,10 +240,10 @@ public class TestJobEndNotifier extends JobEndNotifier {
     app.shutDownJob();
     app.shutDownJob();
     // Not the last AM attempt. So user should that the job is still running.
     // Not the last AM attempt. So user should that the job is still running.
     app.waitForState(job, JobState.RUNNING);
     app.waitForState(job, JobState.RUNNING);
-    Assert.assertEquals(false, app.isLastAMRetry());
+    Assert.assertFalse(app.isLastAMRetry());
     Assert.assertEquals(0, JobEndServlet.calledTimes);
     Assert.assertEquals(0, JobEndServlet.calledTimes);
-    Assert.assertEquals(null, JobEndServlet.requestUri);
-    Assert.assertEquals(null, JobEndServlet.foundJobState);
+    Assert.assertNull(JobEndServlet.requestUri);
+    Assert.assertNull(JobEndServlet.foundJobState);
     server.stop();
     server.stop();
   }
   }
 
 
@@ -248,7 +254,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 2, false));
         this.getClass().getName(), true, 2, false));
     doNothing().when(app).sysexit();
     doNothing().when(app).sysexit();
-    Configuration conf = new Configuration();
+    JobConf conf = new JobConf();
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
     conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
         JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
     JobImpl job = (JobImpl)app.submit(conf);
     JobImpl job = (JobImpl)app.submit(conf);
@@ -259,7 +265,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     // Now shutdown. User should see FAILED state.
     // Now shutdown. User should see FAILED state.
     // Unregistration fails: isLastAMRetry is recalculated, this is
     // Unregistration fails: isLastAMRetry is recalculated, this is
     app.shutDownJob();
     app.shutDownJob();
-    Assert.assertEquals(true, app.isLastAMRetry());
+    Assert.assertTrue(app.isLastAMRetry());
     Assert.assertEquals(1, JobEndServlet.calledTimes);
     Assert.assertEquals(1, JobEndServlet.calledTimes);
     Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
     Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
         JobEndServlet.requestUri.getQuery());
         JobEndServlet.requestUri.getQuery());

+ 1 - 1
hadoop-project/pom.xml

@@ -589,7 +589,7 @@
       <dependency>
       <dependency>
         <groupId>commons-lang</groupId>
         <groupId>commons-lang</groupId>
         <artifactId>commons-lang</artifactId>
         <artifactId>commons-lang</artifactId>
-        <version>2.5</version>
+        <version>2.6</version>
       </dependency>
       </dependency>
       <dependency>
       <dependency>
         <groupId>commons-collections</groupId>
         <groupId>commons-collections</groupId>

+ 10 - 0
hadoop-yarn-project/CHANGES.txt

@@ -114,6 +114,9 @@ Release 2.3.0 - UNRELEASED
     YARN-584. In scheduler web UIs, queues unexpand on refresh. (Harshit
     YARN-584. In scheduler web UIs, queues unexpand on refresh. (Harshit
     Daga via Sandy Ryza)
     Daga via Sandy Ryza)
 
 
+    YARN-1303. Fixed DistributedShell to not fail with multiple commands separated
+    by a semi-colon as shell-command. (Xuan Gong via vinodkv)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -160,6 +163,13 @@ Release 2.3.0 - UNRELEASED
     process same allocate request twice resulting in additional containers
     process same allocate request twice resulting in additional containers
     getting allocated. (Omkar Vinit Joshi via bikas)
     getting allocated. (Omkar Vinit Joshi via bikas)
 
 
+    YARN-1425. TestRMRestart fails because MockRM.waitForState(AttemptId) uses
+    current attempt instead of the attempt passed as argument (Omkar Vinit
+    Joshi via bikas)
+
+    YARN-1053. Diagnostic message from ContainerExitEvent is ignored in
+    ContainerImpl (Omkar Vinit Joshi via bikas)
+
 Release 2.2.1 - UNRELEASED
 Release 2.2.1 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 16 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java

@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.applications.distributedshell;
 package org.apache.hadoop.yarn.applications.distributedshell;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.InputStreamReader;
 import java.io.StringReader;
 import java.io.StringReader;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
@@ -218,6 +220,8 @@ public class ApplicationMaster {
   // Hardcoded path to shell script in launch container's local env
   // Hardcoded path to shell script in launch container's local env
   private final String ExecShellStringPath = "ExecShellScript.sh";
   private final String ExecShellStringPath = "ExecShellScript.sh";
 
 
+  private final String shellCommandPath = "shellCommands";
+
   private volatile boolean done;
   private volatile boolean done;
   private volatile boolean success;
   private volatile boolean success;
 
 
@@ -300,8 +304,6 @@ public class ApplicationMaster {
     Options opts = new Options();
     Options opts = new Options();
     opts.addOption("app_attempt_id", true,
     opts.addOption("app_attempt_id", true,
         "App Attempt ID. Not to be used unless for testing purposes");
         "App Attempt ID. Not to be used unless for testing purposes");
-    opts.addOption("shell_command", true,
-        "Shell command to be executed by the Application Master");
     opts.addOption("shell_script", true,
     opts.addOption("shell_script", true,
         "Location of the shell script to be executed");
         "Location of the shell script to be executed");
     opts.addOption("shell_args", true, "Command line args for the shell script");
     opts.addOption("shell_args", true, "Command line args for the shell script");
@@ -372,11 +374,20 @@ public class ApplicationMaster {
         + appAttemptID.getApplicationId().getClusterTimestamp()
         + appAttemptID.getApplicationId().getClusterTimestamp()
         + ", attemptId=" + appAttemptID.getAttemptId());
         + ", attemptId=" + appAttemptID.getAttemptId());
 
 
-    if (!cliParser.hasOption("shell_command")) {
+    File shellCommandFile = new File(shellCommandPath);
+    if (!shellCommandFile.exists()) {
       throw new IllegalArgumentException(
       throw new IllegalArgumentException(
           "No shell command specified to be executed by application master");
           "No shell command specified to be executed by application master");
     }
     }
-    shellCommand = cliParser.getOptionValue("shell_command");
+    FileInputStream fs = null;
+    DataInputStream ds = null;
+    try {
+      ds = new DataInputStream(new FileInputStream(shellCommandFile));
+      shellCommand = ds.readUTF();
+    } finally {
+      org.apache.commons.io.IOUtils.closeQuietly(ds);
+      org.apache.commons.io.IOUtils.closeQuietly(fs);
+    }
 
 
     if (cliParser.hasOption("shell_args")) {
     if (cliParser.hasOption("shell_args")) {
       shellArgs = cliParser.getOptionValue("shell_args");
       shellArgs = cliParser.getOptionValue("shell_args");

+ 26 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java

@@ -32,14 +32,17 @@ import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.ParseException;
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -162,6 +165,7 @@ public class Client {
   // Command line options
   // Command line options
   private Options opts;
   private Options opts;
 
 
+  private final String shellCommandPath = "shellCommands";
   /**
   /**
    * @param args Command line arguments 
    * @param args Command line arguments 
    */
    */
@@ -483,6 +487,27 @@ public class Client {
       hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
       hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
     }
     }
 
 
+    if (!shellCommand.isEmpty()) {
+      String shellCommandSuffix =
+          appName + "/" + appId.getId() + "/" + shellCommandPath;
+      Path shellCommandDst =
+          new Path(fs.getHomeDirectory(), shellCommandSuffix);
+      FSDataOutputStream ostream = null;
+      try {
+        ostream = FileSystem
+            .create(fs, shellCommandDst, new FsPermission((short) 0710));
+        ostream.writeUTF(shellCommand);
+      } finally {
+        IOUtils.closeQuietly(ostream);
+      }
+      FileStatus scFileStatus = fs.getFileStatus(shellCommandDst);
+      LocalResource scRsrc =
+          LocalResource.newInstance(
+              ConverterUtils.getYarnUrlFromURI(shellCommandDst.toUri()),
+              LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
+              scFileStatus.getLen(), scFileStatus.getModificationTime());
+      localResources.put(shellCommandPath, scRsrc);
+    }
     // Set local resource info into app master container launch context
     // Set local resource info into app master container launch context
     amContainer.setLocalResources(localResources);
     amContainer.setLocalResources(localResources);
 
 
@@ -541,9 +566,7 @@ public class Client {
     vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
     vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
     vargs.add("--num_containers " + String.valueOf(numContainers));
     vargs.add("--num_containers " + String.valueOf(numContainers));
     vargs.add("--priority " + String.valueOf(shellCmdPriority));
     vargs.add("--priority " + String.valueOf(shellCmdPriority));
-    if (!shellCommand.isEmpty()) {
-      vargs.add("--shell_command " + shellCommand + "");
-    }
+
     if (!shellArgs.isEmpty()) {
     if (!shellArgs.isEmpty()) {
       vargs.add("--shell_args " + shellArgs + "");
       vargs.add("--shell_args " + shellArgs + "");
     }
     }

+ 95 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java

@@ -18,12 +18,15 @@
 
 
 package org.apache.hadoop.yarn.applications.distributedshell;
 package org.apache.hadoop.yarn.applications.distributedshell;
 
 
+import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
+import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.net.URL;
 import java.net.URL;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 
@@ -170,6 +173,39 @@ public class TestDistributedShell {
 
 
   }
   }
 
 
+  @Test(timeout=90000)
+  public void testDSShellWithCommands() throws Exception {
+
+    String[] args = {
+        "--jar",
+        APPMASTER_JAR,
+        "--num_containers",
+        "2",
+        "--shell_command",
+        "\"echo output_ignored;echo output_expected\"",
+        "--master_memory",
+        "512",
+        "--master_vcores",
+        "2",
+        "--container_memory",
+        "128",
+        "--container_vcores",
+        "1"
+    };
+
+    LOG.info("Initializing DS Client");
+    final Client client =
+        new Client(new Configuration(yarnCluster.getConfig()));
+    boolean initSuccess = client.init(args);
+    Assert.assertTrue(initSuccess);
+    LOG.info("Running DS Client");
+    boolean result = client.run();
+    LOG.info("Client run completed. Result=" + result);
+    List<String> expectedContent = new ArrayList<String>();
+    expectedContent.add("output_expected");
+    verifyContainerLog(2, expectedContent, false, "");
+  }
+
   @Test(timeout=90000)
   @Test(timeout=90000)
   public void testDSShellWithInvalidArgs() throws Exception {
   public void testDSShellWithInvalidArgs() throws Exception {
     Client client = new Client(new Configuration(yarnCluster.getConfig()));
     Client client = new Client(new Configuration(yarnCluster.getConfig()));
@@ -332,5 +368,64 @@ public class TestDistributedShell {
     LOG.info("Running DS Client");
     LOG.info("Running DS Client");
     Assert.assertTrue(client.run());
     Assert.assertTrue(client.run());
   }
   }
+
+  private int verifyContainerLog(int containerNum,
+      List<String> expectedContent, boolean count, String expectedWord) {
+    File logFolder =
+        new File(yarnCluster.getNodeManager(0).getConfig()
+            .get(YarnConfiguration.NM_LOG_DIRS,
+                YarnConfiguration.DEFAULT_NM_LOG_DIRS));
+
+    File[] listOfFiles = logFolder.listFiles();
+    int currentContainerLogFileIndex = -1;
+    for (int i = listOfFiles.length - 1; i >= 0; i--) {
+      if (listOfFiles[i].listFiles().length == containerNum + 1) {
+        currentContainerLogFileIndex = i;
+        break;
+      }
+    }
+    Assert.assertTrue(currentContainerLogFileIndex != -1);
+    File[] containerFiles =
+        listOfFiles[currentContainerLogFileIndex].listFiles();
+
+    int numOfWords = 0;
+    for (int i = 0; i < containerFiles.length; i++) {
+      for (File output : containerFiles[i].listFiles()) {
+        if (output.getName().trim().contains("stdout")) {
+          BufferedReader br = null;
+          try {
+
+            String sCurrentLine;
+
+            br = new BufferedReader(new FileReader(output));
+            int numOfline = 0;
+            while ((sCurrentLine = br.readLine()) != null) {
+              if (count) {
+                if (sCurrentLine.contains(expectedWord)) {
+                  numOfWords++;
+                }
+              } else if (output.getName().trim().equals("stdout")){
+                Assert.assertEquals("The current is" + sCurrentLine,
+                    expectedContent.get(numOfline), sCurrentLine.trim());
+                numOfline++;
+              }
+            }
+
+          } catch (IOException e) {
+            e.printStackTrace();
+          } finally {
+            try {
+              if (br != null)
+                br.close();
+            } catch (IOException ex) {
+              ex.printStackTrace();
+            }
+          }
+        }
+      }
+    }
+    return numOfWords;
+  }
+
 }
 }
 
 

+ 8 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java

@@ -18,7 +18,6 @@
 
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
 
 
-import java.io.IOException;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -687,6 +686,10 @@ public class ContainerImpl implements Container {
     public void transition(ContainerImpl container, ContainerEvent event) {
     public void transition(ContainerImpl container, ContainerEvent event) {
       ContainerExitEvent exitEvent = (ContainerExitEvent) event;
       ContainerExitEvent exitEvent = (ContainerExitEvent) event;
       container.exitCode = exitEvent.getExitCode();
       container.exitCode = exitEvent.getExitCode();
+      if (exitEvent.getDiagnosticInfo() != null) {
+        container.diagnostics.append(exitEvent.getDiagnosticInfo())
+          .append('\n');
+      }
 
 
       // TODO: Add containerWorkDir to the deletion service.
       // TODO: Add containerWorkDir to the deletion service.
       // TODO: Add containerOuputDir to the deletion service.
       // TODO: Add containerOuputDir to the deletion service.
@@ -806,6 +809,10 @@ public class ContainerImpl implements Container {
     public void transition(ContainerImpl container, ContainerEvent event) {
     public void transition(ContainerImpl container, ContainerEvent event) {
       ContainerExitEvent exitEvent = (ContainerExitEvent) event;
       ContainerExitEvent exitEvent = (ContainerExitEvent) event;
       container.exitCode = exitEvent.getExitCode();
       container.exitCode = exitEvent.getExitCode();
+      if (exitEvent.getDiagnosticInfo() != null) {
+        container.diagnostics.append(exitEvent.getDiagnosticInfo())
+          .append('\n');
+      }
 
 
       // The process/process-grp is killed. Decrement reference counts and
       // The process/process-grp is killed. Decrement reference counts and
       // cleanup resources
       // cleanup resources

+ 11 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -844,9 +845,13 @@ public class TestContainer {
     }
     }
 
 
     public void containerFailed(int exitCode) {
     public void containerFailed(int exitCode) {
+      String diagnosticMsg = "Container completed with exit code " + exitCode;
       c.handle(new ContainerExitEvent(cId,
       c.handle(new ContainerExitEvent(cId,
           ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitCode,
           ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitCode,
-          "Container completed with exit code " + exitCode));
+          diagnosticMsg));
+      ContainerStatus containerStatus = c.cloneAndGetContainerStatus();
+      assert containerStatus.getDiagnostics().contains(diagnosticMsg);
+      assert containerStatus.getExitStatus() == exitCode;
       drainDispatcherEvents();
       drainDispatcherEvents();
     }
     }
 
 
@@ -857,9 +862,13 @@ public class TestContainer {
 
 
     public void containerKilledOnRequest() {
     public void containerKilledOnRequest() {
       int exitCode = ExitCode.FORCE_KILLED.getExitCode();
       int exitCode = ExitCode.FORCE_KILLED.getExitCode();
+      String diagnosticMsg = "Container completed with exit code " + exitCode;
       c.handle(new ContainerExitEvent(cId,
       c.handle(new ContainerExitEvent(cId,
           ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode,
           ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode,
-          "Container completed with exit code " + exitCode));
+          diagnosticMsg));
+      ContainerStatus containerStatus = c.cloneAndGetContainerStatus();
+      assert containerStatus.getDiagnostics().contains(diagnosticMsg);
+      assert containerStatus.getExitStatus() == exitCode; 
       drainDispatcherEvents();
       drainDispatcherEvents();
     }
     }
     
     

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java

@@ -107,7 +107,7 @@ public class MockRM extends ResourceManager {
       throws Exception {
       throws Exception {
     RMApp app = getRMContext().getRMApps().get(attemptId.getApplicationId());
     RMApp app = getRMContext().getRMApps().get(attemptId.getApplicationId());
     Assert.assertNotNull("app shouldn't be null", app);
     Assert.assertNotNull("app shouldn't be null", app);
-    RMAppAttempt attempt = app.getCurrentAppAttempt();
+    RMAppAttempt attempt = app.getRMAppAttempt(attemptId);
     int timeoutSecs = 0;
     int timeoutSecs = 0;
     while (!finalState.equals(attempt.getAppAttemptState()) && timeoutSecs++ < 40) {
     while (!finalState.equals(attempt.getAppAttemptState()) && timeoutSecs++ < 40) {
       System.out.println("AppAttempt : " + attemptId 
       System.out.println("AppAttempt : " + attemptId 

+ 10 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java

@@ -487,6 +487,8 @@ public class TestRMRestart {
     Assert.assertEquals(2, rmApp.getAppAttempts().size());
     Assert.assertEquals(2, rmApp.getAppAttempts().size());
     // am1 attempt should be in FAILED state where as am2 attempt should be in
     // am1 attempt should be in FAILED state where as am2 attempt should be in
     // LAUNCHED state
     // LAUNCHED state
+    rm2.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
+    rm2.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
     Assert.assertEquals(RMAppAttemptState.FAILED,
     Assert.assertEquals(RMAppAttemptState.FAILED,
         rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
         rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
             .getAppAttemptState());
             .getAppAttemptState());
@@ -524,14 +526,17 @@ public class TestRMRestart {
     Assert.assertEquals(3, rmApp.getAppAttempts().size());
     Assert.assertEquals(3, rmApp.getAppAttempts().size());
     // am1 and am2 attempts should be in FAILED state where as am3 should be
     // am1 and am2 attempts should be in FAILED state where as am3 should be
     // in LAUNCHED state
     // in LAUNCHED state
+    rm3.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
+    rm3.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.FAILED);
+    ApplicationAttemptId latestAppAttemptId =
+        rmApp.getCurrentAppAttempt().getAppAttemptId();
+    rm3.waitForState(latestAppAttemptId, RMAppAttemptState.LAUNCHED);
     Assert.assertEquals(RMAppAttemptState.FAILED,
     Assert.assertEquals(RMAppAttemptState.FAILED,
         rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
         rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
             .getAppAttemptState());
             .getAppAttemptState());
     Assert.assertEquals(RMAppAttemptState.FAILED,
     Assert.assertEquals(RMAppAttemptState.FAILED,
         rmApp.getAppAttempts().get(am2.getApplicationAttemptId())
         rmApp.getAppAttempts().get(am2.getApplicationAttemptId())
             .getAppAttemptState());
             .getAppAttemptState());
-    ApplicationAttemptId latestAppAttemptId =
-        rmApp.getCurrentAppAttempt().getAppAttemptId();
     Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts()
     Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts()
         .get(latestAppAttemptId).getAppAttemptState());
         .get(latestAppAttemptId).getAppAttemptState());
     
     
@@ -562,6 +567,7 @@ public class TestRMRestart {
     rm4.waitForState(rmApp.getApplicationId(), RMAppState.ACCEPTED);
     rm4.waitForState(rmApp.getApplicationId(), RMAppState.ACCEPTED);
     Assert.assertEquals(4, rmApp.getAppAttempts().size());
     Assert.assertEquals(4, rmApp.getAppAttempts().size());
     Assert.assertEquals(RMAppState.ACCEPTED, rmApp.getState());
     Assert.assertEquals(RMAppState.ACCEPTED, rmApp.getState());
+    rm4.waitForState(latestAppAttemptId, RMAppAttemptState.SCHEDULED);
     Assert.assertEquals(RMAppAttemptState.SCHEDULED, rmApp.getAppAttempts()
     Assert.assertEquals(RMAppAttemptState.SCHEDULED, rmApp.getAppAttempts()
         .get(latestAppAttemptId).getAppAttemptState());
         .get(latestAppAttemptId).getAppAttemptState());
     
     
@@ -571,6 +577,8 @@ public class TestRMRestart {
     rm4.waitForState(app2.getApplicationId(), RMAppState.ACCEPTED);
     rm4.waitForState(app2.getApplicationId(), RMAppState.ACCEPTED);
     Assert.assertEquals(RMAppState.ACCEPTED, app2.getState());
     Assert.assertEquals(RMAppState.ACCEPTED, app2.getState());
     Assert.assertEquals(1, app2.getAppAttempts().size());
     Assert.assertEquals(1, app2.getAppAttempts().size());
+    rm4.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),
+        RMAppAttemptState.SCHEDULED);
     Assert.assertEquals(RMAppAttemptState.SCHEDULED, app2
     Assert.assertEquals(RMAppAttemptState.SCHEDULED, app2
         .getCurrentAppAttempt().getAppAttemptState());
         .getCurrentAppAttempt().getAppAttemptState());