浏览代码

Merging r1544666 through r1547120 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1547122 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 年之前
父节点
当前提交
18159be495
共有 100 个文件被更改,包括 2730 次插入1568 次删除
  1. 11 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 8 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  3. 299 209
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  4. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
  5. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  6. 5 70
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  7. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
  8. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
  9. 2 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java
  10. 17 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  11. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
  12. 26 20
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  13. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
  14. 57 61
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
  15. 4 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  16. 8 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
  17. 0 95
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java
  18. 1 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java
  19. 66 42
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
  20. 3 3
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java
  21. 32 19
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  22. 8 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  23. 47 5
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
  24. 83 9
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
  25. 42 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  26. 5 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  27. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  28. 80 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  29. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  30. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
  31. 105 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
  32. 141 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
  33. 31 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
  34. 45 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java
  35. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
  36. 87 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
  37. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  38. 7 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  39. 16 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  40. 59 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  41. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
  42. 9 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
  43. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
  44. 6 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
  45. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
  46. 12 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
  47. 21 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
  48. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
  49. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  50. 20 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  51. 61 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  52. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
  53. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  54. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
  55. 180 146
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  56. 42 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
  57. 48 92
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
  58. 29 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
  59. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
  60. 24 40
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  61. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  62. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  63. 51 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  64. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
  65. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  66. 15 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  67. 14 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  68. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
  69. 58 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  70. 119 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  71. 32 42
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
  72. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
  73. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  74. 32 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  75. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
  76. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  77. 30 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  78. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  79. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  80. 19 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
  81. 92 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
  82. 35 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
  83. 0 227
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
  84. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
  85. 12 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
  86. 104 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
  87. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
  88. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
  89. 72 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  90. 32 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
  91. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  92. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
  93. 1 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java
  94. 0 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
  95. 36 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
  96. 4 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  97. 16 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  98. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
  99. 13 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
  100. 41 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

+ 11 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -388,6 +388,8 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
     HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
     via jeagles)
     via jeagles)
 
 
+    HADOOP-10126. LightWeightGSet log message is confusing. (Vinay via suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@@ -450,6 +452,9 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10107. Server.getNumOpenConnections may throw NPE. (Kihwal Lee via
     HADOOP-10107. Server.getNumOpenConnections may throw NPE. (Kihwal Lee via
     jing9)
     jing9)
 
 
+    HADOOP-10135 writes to swift fs over partition size leave temp files and
+    empty output file (David Dobbins via stevel)
+
 Release 2.2.1 - UNRELEASED
 Release 2.2.1 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -467,6 +472,9 @@ Release 2.2.1 - UNRELEASED
     HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
     HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
     Patrick McCabe)
     Patrick McCabe)
 
 
+    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
+    is encountered (Ted yu via umamahesh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -508,6 +516,9 @@ Release 2.2.1 - UNRELEASED
     HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
     HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
     through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
     through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
 
 
+    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
+    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
+
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 8 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -83,39 +83,6 @@ public class RawLocalFileSystem extends FileSystem {
     setConf(conf);
     setConf(conf);
   }
   }
   
   
-  class TrackingFileInputStream extends FileInputStream {
-    public TrackingFileInputStream(File f) throws IOException {
-      super(f);
-    }
-    
-    @Override
-    public int read() throws IOException {
-      int result = super.read();
-      if (result != -1) {
-        statistics.incrementBytesRead(1);
-      }
-      return result;
-    }
-    
-    @Override
-    public int read(byte[] data) throws IOException {
-      int result = super.read(data);
-      if (result != -1) {
-        statistics.incrementBytesRead(result);
-      }
-      return result;
-    }
-    
-    @Override
-    public int read(byte[] data, int offset, int length) throws IOException {
-      int result = super.read(data, offset, length);
-      if (result != -1) {
-        statistics.incrementBytesRead(result);
-      }
-      return result;
-    }
-  }
-
   /*******************************************************
   /*******************************************************
    * For open()'s FSInputStream.
    * For open()'s FSInputStream.
    *******************************************************/
    *******************************************************/
@@ -124,7 +91,7 @@ public class RawLocalFileSystem extends FileSystem {
     private long position;
     private long position;
 
 
     public LocalFSFileInputStream(Path f) throws IOException {
     public LocalFSFileInputStream(Path f) throws IOException {
-      this.fis = new TrackingFileInputStream(pathToFile(f));
+      fis = new FileInputStream(pathToFile(f));
     }
     }
     
     
     @Override
     @Override
@@ -159,6 +126,7 @@ public class RawLocalFileSystem extends FileSystem {
         int value = fis.read();
         int value = fis.read();
         if (value >= 0) {
         if (value >= 0) {
           this.position++;
           this.position++;
+          statistics.incrementBytesRead(1);
         }
         }
         return value;
         return value;
       } catch (IOException e) {                 // unexpected exception
       } catch (IOException e) {                 // unexpected exception
@@ -172,6 +140,7 @@ public class RawLocalFileSystem extends FileSystem {
         int value = fis.read(b, off, len);
         int value = fis.read(b, off, len);
         if (value > 0) {
         if (value > 0) {
           this.position += value;
           this.position += value;
+          statistics.incrementBytesRead(value);
         }
         }
         return value;
         return value;
       } catch (IOException e) {                 // unexpected exception
       } catch (IOException e) {                 // unexpected exception
@@ -184,7 +153,11 @@ public class RawLocalFileSystem extends FileSystem {
       throws IOException {
       throws IOException {
       ByteBuffer bb = ByteBuffer.wrap(b, off, len);
       ByteBuffer bb = ByteBuffer.wrap(b, off, len);
       try {
       try {
-        return fis.getChannel().read(bb, position);
+        int value = fis.getChannel().read(bb, position);
+        if (value > 0) {
+          statistics.incrementBytesRead(value);
+        }
+        return value;
       } catch (IOException e) {
       } catch (IOException e) {
         throw new FSError(e);
         throw new FSError(e);
       }
       }

+ 299 - 209
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -19,12 +19,13 @@ package org.apache.hadoop.http;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.io.InterruptedIOException;
 import java.io.InterruptedIOException;
+import java.io.PrintWriter;
 import java.net.BindException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
-import java.security.GeneralSecurityException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Enumeration;
 import java.util.Enumeration;
@@ -32,7 +33,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
-import javax.net.ssl.SSLServerSocketFactory;
 import javax.servlet.Filter;
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
 import javax.servlet.FilterConfig;
@@ -60,7 +60,6 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.mortbay.io.Buffer;
 import org.mortbay.io.Buffer;
@@ -71,8 +70,8 @@ import org.mortbay.jetty.RequestLog;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.handler.ContextHandler;
 import org.mortbay.jetty.handler.ContextHandler;
 import org.mortbay.jetty.handler.ContextHandlerCollection;
 import org.mortbay.jetty.handler.ContextHandlerCollection;
-import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.handler.HandlerCollection;
 import org.mortbay.jetty.handler.HandlerCollection;
+import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
 import org.mortbay.jetty.nio.SelectChannelConnector;
 import org.mortbay.jetty.security.SslSocketConnector;
 import org.mortbay.jetty.security.SslSocketConnector;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.Context;
@@ -86,6 +85,7 @@ import org.mortbay.thread.QueuedThreadPool;
 import org.mortbay.util.MultiException;
 import org.mortbay.util.MultiException;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 
 
 /**
 /**
@@ -114,11 +114,25 @@ public class HttpServer implements FilterContainer {
 
 
   public static final String BIND_ADDRESS = "bind.address";
   public static final String BIND_ADDRESS = "bind.address";
 
 
-  private AccessControlList adminsAcl;
+  private final AccessControlList adminsAcl;
 
 
-  private SSLFactory sslFactory;
   protected final Server webServer;
   protected final Server webServer;
-  protected final Connector listener;
+
+  private static class ListenerInfo {
+    /**
+     * Boolean flag to determine whether the HTTP server should clean up the
+     * listener in stop().
+     */
+    private final boolean isManaged;
+    private final Connector listener;
+    private ListenerInfo(boolean isManaged, Connector listener) {
+      this.isManaged = isManaged;
+      this.listener = listener;
+    }
+  }
+
+  private final List<ListenerInfo> listeners = Lists.newArrayList();
+
   protected final WebAppContext webAppContext;
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
   protected final boolean findPort;
   protected final Map<Context, Boolean> defaultContexts =
   protected final Map<Context, Boolean> defaultContexts =
@@ -127,34 +141,111 @@ public class HttpServer implements FilterContainer {
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
 
 
-  private final boolean listenerStartedExternally;
-  
   /**
   /**
    * Class to construct instances of HTTP server with specific options.
    * Class to construct instances of HTTP server with specific options.
    */
    */
   public static class Builder {
   public static class Builder {
-    String name;
-    String bindAddress;
-    Integer port;
-    Boolean findPort;
-    Configuration conf;
-    Connector connector;
-    String[] pathSpecs;
-    AccessControlList adminsAcl;
-    boolean securityEnabled = false;
-    String usernameConfKey = null;
-    String keytabConfKey = null;
-    
+    private ArrayList<URI> endpoints = Lists.newArrayList();
+    private Connector connector;
+    private String name;
+    private Configuration conf;
+    private String[] pathSpecs;
+    private AccessControlList adminsAcl;
+    private boolean securityEnabled = false;
+    private String usernameConfKey;
+    private String keytabConfKey;
+    private boolean needsClientAuth;
+    private String trustStore;
+    private String trustStorePassword;
+    private String trustStoreType;
+
+    private String keyStore;
+    private String keyStorePassword;
+    private String keyStoreType;
+
+    // The -keypass option in keytool
+    private String keyPassword;
+
+    @Deprecated
+    private String bindAddress;
+    @Deprecated
+    private int port = -1;
+
+    private boolean findPort;
+
+    private String hostName;
+
     public Builder setName(String name){
     public Builder setName(String name){
       this.name = name;
       this.name = name;
       return this;
       return this;
     }
     }
+
+    /**
+     * Add an endpoint that the HTTP server should listen to.
+     *
+     * @param endpoint
+     *          the endpoint of that the HTTP server should listen to. The
+     *          scheme specifies the protocol (i.e. HTTP / HTTPS), the host
+     *          specifies the binding address, and the port specifies the
+     *          listening port. Unspecified or zero port means that the server
+     *          can listen to any port.
+     */
+    public Builder addEndpoint(URI endpoint) {
+      endpoints.add(endpoint);
+      return this;
+    }
+
+    /**
+     * Set the hostname of the http server. The host name is used to resolve the
+     * _HOST field in Kerberos principals. The hostname of the first listener
+     * will be used if the name is unspecified.
+     */
+    public Builder hostName(String hostName) {
+      this.hostName = hostName;
+      return this;
+    }
     
     
+    public Builder trustStore(String location, String password, String type) {
+      this.trustStore = location;
+      this.trustStorePassword = password;
+      this.trustStoreType = type;
+      return this;
+    }
+
+    public Builder keyStore(String location, String password, String type) {
+      this.keyStore = location;
+      this.keyStorePassword = password;
+      this.keyStoreType = type;
+      return this;
+    }
+
+    public Builder keyPassword(String password) {
+      this.keyPassword = password;
+      return this;
+    }
+
+    /**
+     * Specify whether the server should authorize the client in SSL
+     * connections.
+     */
+    public Builder needsClientAuth(boolean value) {
+      this.needsClientAuth = value;
+      return this;
+    }
+
+    /**
+     * Use addEndpoint() instead.
+     */
+    @Deprecated
     public Builder setBindAddress(String bindAddress){
     public Builder setBindAddress(String bindAddress){
       this.bindAddress = bindAddress;
       this.bindAddress = bindAddress;
       return this;
       return this;
     }
     }
-    
+
+    /**
+     * Use addEndpoint() instead.
+     */
+    @Deprecated
     public Builder setPort(int port) {
     public Builder setPort(int port) {
       this.port = port;
       this.port = port;
       return this;
       return this;
@@ -204,25 +295,70 @@ public class HttpServer implements FilterContainer {
       if (this.name == null) {
       if (this.name == null) {
         throw new HadoopIllegalArgumentException("name is not set");
         throw new HadoopIllegalArgumentException("name is not set");
       }
       }
-      if (this.bindAddress == null) {
-        throw new HadoopIllegalArgumentException("bindAddress is not set");
+
+      // Make the behavior compatible with deprecated interfaces
+      if (bindAddress != null && port != -1) {
+        try {
+          endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
+        } catch (URISyntaxException e) {
+          throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
+        }
       }
       }
-      if (this.port == null) {
-        throw new HadoopIllegalArgumentException("port is not set");
+
+      if (endpoints.size() == 0) {
+        throw new HadoopIllegalArgumentException("No endpoints specified");
       }
       }
-      if (this.findPort == null) {
-        throw new HadoopIllegalArgumentException("findPort is not set");
+
+      if (hostName == null) {
+        hostName = endpoints.get(0).getHost();
       }
       }
       
       
       if (this.conf == null) {
       if (this.conf == null) {
         conf = new Configuration();
         conf = new Configuration();
       }
       }
       
       
-      HttpServer server = new HttpServer(this.name, this.bindAddress, this.port,
-      this.findPort, this.conf, this.adminsAcl, this.connector, this.pathSpecs);
+      HttpServer server = new HttpServer(this);
+
       if (this.securityEnabled) {
       if (this.securityEnabled) {
-        server.initSpnego(this.conf, this.usernameConfKey, this.keytabConfKey);
+        server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
+      }
+
+      if (connector != null) {
+        server.addUnmanagedListener(connector);
       }
       }
+
+      for (URI ep : endpoints) {
+        Connector listener = null;
+        String scheme = ep.getScheme();
+        if ("http".equals(scheme)) {
+          listener = HttpServer.createDefaultChannelConnector();
+        } else if ("https".equals(scheme)) {
+          SslSocketConnector c = new SslSocketConnector();
+          c.setNeedClientAuth(needsClientAuth);
+          c.setKeyPassword(keyPassword);
+
+          if (keyStore != null) {
+            c.setKeystore(keyStore);
+            c.setKeystoreType(keyStoreType);
+            c.setPassword(keyStorePassword);
+          }
+
+          if (trustStore != null) {
+            c.setTruststore(trustStore);
+            c.setTruststoreType(trustStoreType);
+            c.setTrustPassword(trustStorePassword);
+          }
+          listener = c;
+
+        } else {
+          throw new HadoopIllegalArgumentException(
+              "unknown scheme for endpoint:" + ep);
+        }
+        listener.setHost(ep.getHost());
+        listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+        server.addManagedListener(listener);
+      }
+      server.loadListeners();
       return server;
       return server;
     }
     }
   }
   }
@@ -233,7 +369,7 @@ public class HttpServer implements FilterContainer {
       ) throws IOException {
       ) throws IOException {
     this(name, bindAddress, port, findPort, new Configuration());
     this(name, bindAddress, port, findPort, new Configuration());
   }
   }
-  
+
   @Deprecated
   @Deprecated
   public HttpServer(String name, String bindAddress, int port,
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, Connector connector) throws IOException {
       boolean findPort, Configuration conf, Connector connector) throws IOException {
@@ -314,51 +450,39 @@ public class HttpServer implements FilterContainer {
    * @param pathSpecs Path specifications that this httpserver will be serving. 
    * @param pathSpecs Path specifications that this httpserver will be serving. 
    *        These will be added to any filters.
    *        These will be added to any filters.
    */
    */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       Connector connector, String[] pathSpecs) throws IOException {
       Connector connector, String[] pathSpecs) throws IOException {
-    webServer = new Server();
-    this.findPort = findPort;
-    this.adminsAcl = adminsAcl;
-    
-    if(connector == null) {
-      listenerStartedExternally = false;
-      if (HttpConfig.isSecure()) {
-        sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
-        try {
-          sslFactory.init();
-        } catch (GeneralSecurityException ex) {
-          throw new IOException(ex);
-        }
-        SslSocketConnector sslListener = new SslSocketConnector() {
-          @Override
-          protected SSLServerSocketFactory createFactory() throws Exception {
-            return sslFactory.createSSLServerSocketFactory();
-          }
-        };
-        listener = sslListener;
-      } else {
-        listener = createBaseListener(conf);
-      }
-      listener.setHost(bindAddress);
-      listener.setPort(port);
-      LOG.info("SSL is enabled on " + toString());
-    } else {
-      listenerStartedExternally = true;
-      listener = connector;
-    }
-    
-    webServer.addConnector(listener);
+    this(new Builder().setName(name)
+        .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
+        .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
+        .setConnector(connector).setPathSpec(pathSpecs));
+  }
+
+  private HttpServer(final Builder b) throws IOException {
+    final String appDir = getWebAppsPath(b.name);
+    this.webServer = new Server();
+    this.adminsAcl = b.adminsAcl;
+    this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    this.findPort = b.findPort;
+    initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
+  }
+
+  private void initializeWebServer(String name, String hostName,
+      Configuration conf, String[] pathSpecs)
+      throws FileNotFoundException, IOException {
+
+    Preconditions.checkNotNull(webAppContext);
 
 
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // default value (currently 250).
     // default value (currently 250).
-    QueuedThreadPool threadPool = maxThreads == -1 ?
-        new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
+    QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool()
+        : new QueuedThreadPool(maxThreads);
     threadPool.setDaemon(true);
     threadPool.setDaemon(true);
     webServer.setThreadPool(threadPool);
     webServer.setThreadPool(threadPool);
 
 
-    final String appDir = getWebAppsPath(name);
     ContextHandlerCollection contexts = new ContextHandlerCollection();
     ContextHandlerCollection contexts = new ContextHandlerCollection();
     RequestLog requestLog = HttpRequestLog.getRequestLog(name);
     RequestLog requestLog = HttpRequestLog.getRequestLog(name);
 
 
@@ -366,30 +490,24 @@ public class HttpServer implements FilterContainer {
       RequestLogHandler requestLogHandler = new RequestLogHandler();
       RequestLogHandler requestLogHandler = new RequestLogHandler();
       requestLogHandler.setRequestLog(requestLog);
       requestLogHandler.setRequestLog(requestLog);
       HandlerCollection handlers = new HandlerCollection();
       HandlerCollection handlers = new HandlerCollection();
-      handlers.setHandlers(new Handler[] {requestLogHandler, contexts});
+      handlers.setHandlers(new Handler[] { requestLogHandler, contexts });
       webServer.setHandler(handlers);
       webServer.setHandler(handlers);
-    }
-    else {
+    } else {
       webServer.setHandler(contexts);
       webServer.setHandler(contexts);
     }
     }
 
 
-    webAppContext = new WebAppContext();
-    webAppContext.setDisplayName(name);
-    webAppContext.setContextPath("/");
-    webAppContext.setWar(appDir + "/" + name);
-    webAppContext.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
-    webAppContext.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
-    addNoCacheFilter(webAppContext);
+    final String appDir = getWebAppsPath(name);
+
     webServer.addHandler(webAppContext);
     webServer.addHandler(webAppContext);
 
 
     addDefaultApps(contexts, appDir, conf);
     addDefaultApps(contexts, appDir, conf);
-        
+
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
-    final FilterInitializer[] initializers = getFilterInitializers(conf); 
+    final FilterInitializer[] initializers = getFilterInitializers(conf);
     if (initializers != null) {
     if (initializers != null) {
       conf = new Configuration(conf);
       conf = new Configuration(conf);
-      conf.set(BIND_ADDRESS, bindAddress);
-      for(FilterInitializer c : initializers) {
+      conf.set(BIND_ADDRESS, hostName);
+      for (FilterInitializer c : initializers) {
         c.initFilter(this, conf);
         c.initFilter(this, conf);
       }
       }
     }
     }
@@ -404,10 +522,29 @@ public class HttpServer implements FilterContainer {
     }
     }
   }
   }
 
 
-  @SuppressWarnings("unchecked")
-  private void addNoCacheFilter(WebAppContext ctxt) {
-    defineFilter(ctxt, NO_CACHE_FILTER,
-      NoCacheFilter.class.getName(), Collections.EMPTY_MAP, new String[] { "/*"});
+  private void addUnmanagedListener(Connector connector) {
+    listeners.add(new ListenerInfo(false, connector));
+  }
+
+  private void addManagedListener(Connector connector) {
+    listeners.add(new ListenerInfo(true, connector));
+  }
+
+  private static WebAppContext createWebAppContext(String name,
+      Configuration conf, AccessControlList adminsAcl, final String appDir) {
+    WebAppContext ctx = new WebAppContext();
+    ctx.setDisplayName(name);
+    ctx.setContextPath("/");
+    ctx.setWar(appDir + "/" + name);
+    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+    ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
+    addNoCacheFilter(ctx);
+    return ctx;
+  }
+
+  private static void addNoCacheFilter(WebAppContext ctxt) {
+    defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
+        Collections.<String, String> emptyMap(), new String[] { "/*" });
   }
   }
 
 
   /**
   /**
@@ -651,7 +788,7 @@ public class HttpServer implements FilterContainer {
   /**
   /**
    * Define a filter for a context and set up default url mappings.
    * Define a filter for a context and set up default url mappings.
    */
    */
-  public void defineFilter(Context ctx, String name,
+  public static void defineFilter(Context ctx, String name,
       String classname, Map<String,String> parameters, String[] urls) {
       String classname, Map<String,String> parameters, String[] urls) {
 
 
     FilterHolder holder = new FilterHolder();
     FilterHolder holder = new FilterHolder();
@@ -715,93 +852,47 @@ public class HttpServer implements FilterContainer {
    * Get the port that the server is on
    * Get the port that the server is on
    * @return the port
    * @return the port
    */
    */
+  @Deprecated
   public int getPort() {
   public int getPort() {
     return webServer.getConnectors()[0].getLocalPort();
     return webServer.getConnectors()[0].getLocalPort();
   }
   }
 
 
   /**
   /**
-   * Get the port that corresponds to a particular connector. In the case of
-   * HDFS, the second connector corresponds to the HTTPS connector.
+   * Get the address that corresponds to a particular connector.
    *
    *
-   * @return the corresponding port for the connector, or -1 if there's no such
-   *         connector.
+   * @return the corresponding address for the connector, or null if there's no
+   *         such connector or the connector is not bounded.
    */
    */
-  public int getConnectorPort(int index) {
+  public InetSocketAddress getConnectorAddress(int index) {
     Preconditions.checkArgument(index >= 0);
     Preconditions.checkArgument(index >= 0);
-    return index < webServer.getConnectors().length ?
-        webServer.getConnectors()[index].getLocalPort() : -1;
+    if (index > webServer.getConnectors().length)
+      return null;
+
+    Connector c = webServer.getConnectors()[index];
+    if (c.getLocalPort() == -1) {
+      // The connector is not bounded
+      return null;
+    }
+
+    return new InetSocketAddress(c.getHost(), c.getLocalPort());
   }
   }
 
 
   /**
   /**
    * Set the min, max number of worker threads (simultaneous connections).
    * Set the min, max number of worker threads (simultaneous connections).
    */
    */
   public void setThreads(int min, int max) {
   public void setThreads(int min, int max) {
-    QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ;
+    QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool();
     pool.setMinThreads(min);
     pool.setMinThreads(min);
     pool.setMaxThreads(max);
     pool.setMaxThreads(max);
   }
   }
 
 
-  /**
-   * Configure an ssl listener on the server.
-   * @param addr address to listen on
-   * @param keystore location of the keystore
-   * @param storPass password for the keystore
-   * @param keyPass password for the key
-   * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}
-   */
-  @Deprecated
-  public void addSslListener(InetSocketAddress addr, String keystore,
-      String storPass, String keyPass) throws IOException {
-    if (webServer.isStarted()) {
-      throw new IOException("Failed to add ssl listener");
-    }
-    SslSocketConnector sslListener = new SslSocketConnector();
-    sslListener.setHost(addr.getHostName());
-    sslListener.setPort(addr.getPort());
-    sslListener.setKeystore(keystore);
-    sslListener.setPassword(storPass);
-    sslListener.setKeyPassword(keyPass);
-    webServer.addConnector(sslListener);
-  }
-
-  /**
-   * Configure an ssl listener on the server.
-   * @param addr address to listen on
-   * @param sslConf conf to retrieve ssl options
-   * @param needCertsAuth whether x509 certificate authentication is required
-   */
-  public void addSslListener(InetSocketAddress addr, Configuration sslConf,
-      boolean needCertsAuth) throws IOException {
-    if (webServer.isStarted()) {
-      throw new IOException("Failed to add ssl listener");
-    }
-    if (needCertsAuth) {
-      // setting up SSL truststore for authenticating clients
-      System.setProperty("javax.net.ssl.trustStore", sslConf.get(
-          "ssl.server.truststore.location", ""));
-      System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
-          "ssl.server.truststore.password", ""));
-      System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
-          "ssl.server.truststore.type", "jks"));
-    }
-    SslSocketConnector sslListener = new SslSocketConnector();
-    sslListener.setHost(addr.getHostName());
-    sslListener.setPort(addr.getPort());
-    sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
-    sslListener.setPassword(sslConf.get("ssl.server.keystore.password", ""));
-    sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", ""));
-    sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks"));
-    sslListener.setNeedClientAuth(needCertsAuth);
-    webServer.addConnector(sslListener);
-  }
-  
-  protected void initSpnego(Configuration conf,
+  private void initSpnego(Configuration conf, String hostName,
       String usernameConfKey, String keytabConfKey) throws IOException {
       String usernameConfKey, String keytabConfKey) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
     Map<String, String> params = new HashMap<String, String>();
     String principalInConf = conf.get(usernameConfKey);
     String principalInConf = conf.get(usernameConfKey);
     if (principalInConf != null && !principalInConf.isEmpty()) {
     if (principalInConf != null && !principalInConf.isEmpty()) {
-      params.put("kerberos.principal",
-                 SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
+      params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
+          principalInConf, hostName));
     }
     }
     String httpKeytab = conf.get(keytabConfKey);
     String httpKeytab = conf.get(keytabConfKey);
     if (httpKeytab != null && !httpKeytab.isEmpty()) {
     if (httpKeytab != null && !httpKeytab.isEmpty()) {
@@ -819,8 +910,7 @@ public class HttpServer implements FilterContainer {
   public void start() throws IOException {
   public void start() throws IOException {
     try {
     try {
       try {
       try {
-        openListener();
-        LOG.info("Jetty bound to port " + listener.getLocalPort());
+        openListeners();
         webServer.start();
         webServer.start();
       } catch (IOException ex) {
       } catch (IOException ex) {
         LOG.info("HttpServer.start() threw a non Bind IOException", ex);
         LOG.info("HttpServer.start() threw a non Bind IOException", ex);
@@ -856,50 +946,45 @@ public class HttpServer implements FilterContainer {
     }
     }
   }
   }
 
 
+  private void loadListeners() {
+    for (ListenerInfo li : listeners) {
+      webServer.addConnector(li.listener);
+    }
+  }
+
   /**
   /**
    * Open the main listener for the server
    * Open the main listener for the server
    * @throws Exception
    * @throws Exception
    */
    */
-  void openListener() throws Exception {
-    if (listener.getLocalPort() != -1) { // it's already bound
-      return;
-    }
-    if (listenerStartedExternally) { // Expect that listener was started securely
-      throw new Exception("Expected webserver's listener to be started " +
-          "previously but wasn't");
-    }
-    int port = listener.getPort();
-    while (true) {
-      // jetty has a bug where you can't reopen a listener that previously
-      // failed to open w/o issuing a close first, even if the port is changed
-      try {
-        listener.close();
-        listener.open();
-        break;
-      } catch (BindException ex) {
-        if (port == 0 || !findPort) {
-          BindException be = new BindException(
-              "Port in use: " + listener.getHost() + ":" + listener.getPort());
-          be.initCause(ex);
-          throw be;
+  void openListeners() throws Exception {
+    for (ListenerInfo li : listeners) {
+      Connector listener = li.listener;
+      if (!li.isManaged || li.listener.getLocalPort() != -1) {
+        // This listener is either started externally or has been bound
+        continue;
+      }
+      int port = listener.getPort();
+      while (true) {
+        // jetty has a bug where you can't reopen a listener that previously
+        // failed to open w/o issuing a close first, even if the port is changed
+        try {
+          listener.close();
+          listener.open();
+          LOG.info("Jetty bound to port " + listener.getLocalPort());
+          break;
+        } catch (BindException ex) {
+          if (port == 0 || !findPort) {
+            BindException be = new BindException("Port in use: "
+                + listener.getHost() + ":" + listener.getPort());
+            be.initCause(ex);
+            throw be;
+          }
         }
         }
+        // try the next port number
+        listener.setPort(++port);
+        Thread.sleep(100);
       }
       }
-      // try the next port number
-      listener.setPort(++port);
-      Thread.sleep(100);
-    }
-  }
-  
-  /**
-   * Return the bind address of the listener.
-   * @return InetSocketAddress of the listener
-   */
-  public InetSocketAddress getListenerAddress() {
-    int port = listener.getLocalPort();
-    if (port == -1) { // not bound, return requested port
-      port = listener.getPort();
     }
     }
-    return new InetSocketAddress(listener.getHost(), port);
   }
   }
   
   
   /**
   /**
@@ -907,22 +992,19 @@ public class HttpServer implements FilterContainer {
    */
    */
   public void stop() throws Exception {
   public void stop() throws Exception {
     MultiException exception = null;
     MultiException exception = null;
-    try {
-      listener.close();
-    } catch (Exception e) {
-      LOG.error("Error while stopping listener for webapp"
-          + webAppContext.getDisplayName(), e);
-      exception = addMultiException(exception, e);
-    }
+    for (ListenerInfo li : listeners) {
+      if (!li.isManaged) {
+        continue;
+      }
 
 
-    try {
-      if (sslFactory != null) {
-          sslFactory.destroy();
+      try {
+        li.listener.close();
+      } catch (Exception e) {
+        LOG.error(
+            "Error while stopping listener for webapp"
+                + webAppContext.getDisplayName(), e);
+        exception = addMultiException(exception, e);
       }
       }
-    } catch (Exception e) {
-      LOG.error("Error while destroying the SSLFactory"
-          + webAppContext.getDisplayName(), e);
-      exception = addMultiException(exception, e);
     }
     }
 
 
     try {
     try {
@@ -934,6 +1016,7 @@ public class HttpServer implements FilterContainer {
           + webAppContext.getDisplayName(), e);
           + webAppContext.getDisplayName(), e);
       exception = addMultiException(exception, e);
       exception = addMultiException(exception, e);
     }
     }
+
     try {
     try {
       webServer.stop();
       webServer.stop();
     } catch (Exception e) {
     } catch (Exception e) {
@@ -974,10 +1057,17 @@ public class HttpServer implements FilterContainer {
    */
    */
   @Override
   @Override
   public String toString() {
   public String toString() {
-    return listener != null ?
-        ("HttpServer at http://" + listener.getHost() + ":" + listener.getLocalPort() + "/"
-            + (isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE))
-        : "Inactive HttpServer";
+    if (listeners.size() == 0) {
+      return "Inactive HttpServer";
+    } else {
+      StringBuilder sb = new StringBuilder("HttpServer (")
+        .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:");
+      for (ListenerInfo li : listeners) {
+        Connector l = li.listener;
+        sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
+      }
+      return sb.toString();
+    }
   }
   }
 
 
   /**
   /**

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -142,6 +142,10 @@ public class NativeIO {
         NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
         NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
             len, flags);
             len, flags);
       }
       }
+
+      public boolean verifyCanMlock() {
+        return NativeIO.isAvailable();
+      }
     }
     }
 
 
     /**
     /**
@@ -163,6 +167,10 @@ public class NativeIO {
       public long getOperatingSystemPageSize() {
       public long getOperatingSystemPageSize() {
         return 4096;
         return 4096;
       }
       }
+
+      public boolean verifyCanMlock() {
+        return true;
+      }
     }
     }
 
 
     static {
     static {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -634,7 +634,7 @@ public class RPC {
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.error("Closing proxy or invocation handler caused exception", e);
       LOG.error("Closing proxy or invocation handler caused exception", e);
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
-      LOG.error("RPC.stopProxy called on non proxy.", e);
+      LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e);
     }
     }
     
     
     // If you see this error on a mock object in a unit test you're
     // If you see this error on a mock object in a unit test you're

+ 5 - 70
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -22,19 +22,14 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
-import java.net.URL;
-import java.net.URLConnection;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
-import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 import java.util.Locale;
 import java.util.Locale;
 import java.util.ServiceLoader;
 import java.util.ServiceLoader;
-import java.util.Set;
 
 
-import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.kerberos.KerberosTicket;
 
 
@@ -44,22 +39,19 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 
 
-import com.google.common.annotations.VisibleForTesting;
 
 
 //this will need to be replaced someday when there is a suitable replacement
 //this will need to be replaced someday when there is a suitable replacement
 import sun.net.dns.ResolverConfiguration;
 import sun.net.dns.ResolverConfiguration;
 import sun.net.util.IPAddressUtil;
 import sun.net.util.IPAddressUtil;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class SecurityUtil {
 public class SecurityUtil {
@@ -73,24 +65,14 @@ public class SecurityUtil {
   @VisibleForTesting
   @VisibleForTesting
   static HostResolver hostResolver;
   static HostResolver hostResolver;
 
 
-  private static SSLFactory sslFactory;
-
   static {
   static {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     boolean useIp = conf.getBoolean(
     boolean useIp = conf.getBoolean(
-      CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
-      CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
     setTokenServiceUseIp(useIp);
     setTokenServiceUseIp(useIp);
-    if (HttpConfig.isSecure()) {
-      sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
-      try {
-        sslFactory.init();
-      } catch (Exception ex) {
-        throw new RuntimeException(ex);
-      }
-    }
   }
   }
-  
+
   /**
   /**
    * For use only by tests and initialization
    * For use only by tests and initialization
    */
    */
@@ -102,29 +84,6 @@ public class SecurityUtil {
         : new StandardHostResolver();
         : new StandardHostResolver();
   }
   }
   
   
-  /**
-   * Find the original TGT within the current subject's credentials. Cross-realm
-   * TGT's of the form "krbtgt/TWO.COM@ONE.COM" may be present.
-   * 
-   * @return The TGT from the current subject
-   * @throws IOException
-   *           if TGT can't be found
-   */
-  private static KerberosTicket getTgtFromSubject() throws IOException {
-    Subject current = Subject.getSubject(AccessController.getContext());
-    if (current == null) {
-      throw new IOException(
-          "Can't get TGT from current Subject, because it is null");
-    }
-    Set<KerberosTicket> tickets = current
-        .getPrivateCredentials(KerberosTicket.class);
-    for (KerberosTicket t : tickets) {
-      if (isOriginalTGT(t))
-        return t;
-    }
-    throw new IOException("Failed to find TGT from current Subject:"+current);
-  }
-  
   /**
   /**
    * TGS must have the server principal of the form "krbtgt/FOO@FOO".
    * TGS must have the server principal of the form "krbtgt/FOO@FOO".
    * @param principal
    * @param principal
@@ -492,30 +451,6 @@ public class SecurityUtil {
     }
     }
   }
   }
 
 
-  /**
-   * Open a (if need be) secure connection to a URL in a secure environment
-   * that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
-   * Namenode URLs that are protected via SPNEGO should be accessed via this
-   * method.
-   *
-   * @param url to authenticate via SPNEGO.
-   * @return A connection that has been authenticated via SPNEGO
-   * @throws IOException If unable to authenticate via SPNEGO
-   */
-  public static URLConnection openSecureHttpConnection(URL url) throws IOException {
-    if (!HttpConfig.isSecure() && !UserGroupInformation.isSecurityEnabled()) {
-      return url.openConnection();
-    }
-
-    AuthenticatedURL.Token token = new AuthenticatedURL.Token();
-    try {
-      return new AuthenticatedURL(null, sslFactory).openConnection(url, token);
-    } catch (AuthenticationException e) {
-      throw new IOException("Exception trying to open authenticated connection to "
-              + url, e);
-    }
-  }
-
   /**
   /**
    * Resolves a host subject to the security requirements determined by
    * Resolves a host subject to the security requirements determined by
    * hadoop.security.token.service.use_ip.
    * hadoop.security.token.service.use_ip.

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java

@@ -348,8 +348,11 @@ public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
 
 
     LOG.info("Computing capacity for map " + mapName);
     LOG.info("Computing capacity for map " + mapName);
     LOG.info("VM type       = " + vmBit + "-bit");
     LOG.info("VM type       = " + vmBit + "-bit");
-    LOG.info(percentage + "% max memory = "
-        + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1));
+    LOG.info(percentage + "% max memory "
+        + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1)
+        + " = "
+        + StringUtils.TraditionalBinaryPrefix.long2String((long) percentMemory,
+            "B", 1));
     LOG.info("capacity      = 2^" + exponent + " = " + c + " entries");
     LOG.info("capacity      = 2^" + exponent + " = " + c + " entries");
     return c;
     return c;
   }
   }

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java

@@ -91,6 +91,7 @@ public abstract class FCStatisticsBaseTest {
     FSDataInputStream fstr = fc.open(filePath);
     FSDataInputStream fstr = fc.open(filePath);
     byte[] buf = new byte[blockSize];
     byte[] buf = new byte[blockSize];
     int bytesRead = fstr.read(buf, 0, blockSize);
     int bytesRead = fstr.read(buf, 0, blockSize);
+    fstr.read(0, buf, 0, blockSize);
     Assert.assertEquals(blockSize, bytesRead);
     Assert.assertEquals(blockSize, bytesRead);
     verifyReadBytes(stats);
     verifyReadBytes(stats);
     verifyWrittenBytes(stats);
     verifyWrittenBytes(stats);

+ 2 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java

@@ -47,7 +47,8 @@ public class TestLocalFsFCStatistics extends FCStatisticsBaseTest {
 
 
   @Override
   @Override
   protected void verifyReadBytes(Statistics stats) {
   protected void verifyReadBytes(Statistics stats) {
-    Assert.assertEquals(blockSize, stats.getBytesRead());
+    // one blockSize for read, one for pread
+    Assert.assertEquals(2*blockSize, stats.getBytesRead());
   }
   }
 
 
   @Override
   @Override

+ 17 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -19,13 +19,16 @@
 
 
 package org.apache.hadoop.http;
 package org.apache.hadoop.http;
 
 
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer.Builder;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
+import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
 
 
@@ -120,8 +123,9 @@ public class HttpServerFunctionalTest extends Assert {
   public static HttpServer createServer(String host, int port)
   public static HttpServer createServer(String host, int port)
       throws IOException {
       throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
-    return new HttpServer.Builder().setName(TEST).setBindAddress(host)
-        .setPort(port).setFindPort(true).build();
+    return new HttpServer.Builder().setName(TEST)
+        .addEndpoint(URI.create("http://" + host + ":" + port))
+        .setFindPort(true).build();
   }
   }
 
 
   /**
   /**
@@ -131,8 +135,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if it could not be created
    * @throws IOException if it could not be created
    */
    */
   public static HttpServer createServer(String webapp) throws IOException {
   public static HttpServer createServer(String webapp) throws IOException {
-    return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
-        .setPort(0).setFindPort(true).build();
+    return localServerBuilder(webapp).setFindPort(true).build();
   }
   }
   /**
   /**
    * Create an HttpServer instance for the given webapp
    * Create an HttpServer instance for the given webapp
@@ -143,14 +146,17 @@ public class HttpServerFunctionalTest extends Assert {
    */
    */
   public static HttpServer createServer(String webapp, Configuration conf)
   public static HttpServer createServer(String webapp, Configuration conf)
       throws IOException {
       throws IOException {
-    return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
-        .setPort(0).setFindPort(true).setConf(conf).build();
+    return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
   }
   }
 
 
   public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
   public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
       throws IOException {
       throws IOException {
-    return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
-        .setPort(0).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
+    return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
+  }
+
+  private static Builder localServerBuilder(String webapp) {
+    return new HttpServer.Builder().setName(webapp).addEndpoint(
+        URI.create("http://localhost:0"));
   }
   }
   
   
   /**
   /**
@@ -163,8 +169,7 @@ public class HttpServerFunctionalTest extends Assert {
    */
    */
   public static HttpServer createServer(String webapp, Configuration conf,
   public static HttpServer createServer(String webapp, Configuration conf,
       String[] pathSpecs) throws IOException {
       String[] pathSpecs) throws IOException {
-    return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
-        .setPort(0).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
+    return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
   }
   }
 
 
   /**
   /**
@@ -201,8 +206,8 @@ public class HttpServerFunctionalTest extends Assert {
   public static URL getServerURL(HttpServer server)
   public static URL getServerURL(HttpServer server)
       throws MalformedURLException {
       throws MalformedURLException {
     assertNotNull("No server", server);
     assertNotNull("No server", server);
-    int port = server.getPort();
-    return new URL("http://localhost:" + port + "/");
+    return new URL("http://"
+        + NetUtils.getHostPortString(server.getConnectorAddress(0)));
   }
   }
 
 
   /**
   /**

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java

@@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestGlobalFilter extends HttpServerFunctionalTest {
 public class TestGlobalFilter extends HttpServerFunctionalTest {
@@ -125,7 +126,8 @@ public class TestGlobalFilter extends HttpServerFunctionalTest {
         dataURL, streamFile, rootURL, allURL, outURL, logURL};
         dataURL, streamFile, rootURL, allURL, outURL, logURL};
 
 
     //access the urls
     //access the urls
-    final String prefix = "http://localhost:" + http.getPort();
+    final String prefix = "http://"
+        + NetUtils.getHostPortString(http.getConnectorAddress(0));
     try {
     try {
       for(int i = 0; i < urls.length; i++) {
       for(int i = 0; i < urls.length; i++) {
         access(prefix + urls[i]);
         access(prefix + urls[i]);

+ 26 - 20
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.http;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.Enumeration;
@@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.http.resource.JerseyResource;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -61,6 +62,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
+import org.mortbay.jetty.Connector;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
 public class TestHttpServer extends HttpServerFunctionalTest {
 public class TestHttpServer extends HttpServerFunctionalTest {
@@ -362,11 +365,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
     MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
 
 
     HttpServer myServer = new HttpServer.Builder().setName("test")
     HttpServer myServer = new HttpServer.Builder().setName("test")
-        .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build();
+        .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
     myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
     myServer.start();
-    int port = myServer.getPort();
-    String serverURL = "http://localhost:" + port + "/";
+    String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
     for (String servlet : new String[] { "conf", "logs", "stacks",
         "logLevel", "metrics" }) {
         "logLevel", "metrics" }) {
       for (String user : new String[] { "userA", "userB" }) {
       for (String user : new String[] { "userA", "userB" }) {
@@ -404,12 +406,13 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
     MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
 
 
     HttpServer myServer = new HttpServer.Builder().setName("test")
     HttpServer myServer = new HttpServer.Builder().setName("test")
-        .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).setConf(conf)
+        .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
         .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
         .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
     myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
     myServer.start();
-    int port = myServer.getPort();
-    String serverURL = "http://localhost:" + port + "/";
+
+    String serverURL = "http://"
+        + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
     for (String servlet : new String[] { "conf", "logs", "stacks",
         "logLevel", "metrics" }) {
         "logLevel", "metrics" }) {
       for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
       for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
@@ -520,20 +523,20 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   }
   }
 
 
   @Test public void testBindAddress() throws Exception {
   @Test public void testBindAddress() throws Exception {
-    checkBindAddress("0.0.0.0", 0, false).stop();
+    checkBindAddress("localhost", 0, false).stop();
     // hang onto this one for a bit more testing
     // hang onto this one for a bit more testing
     HttpServer myServer = checkBindAddress("localhost", 0, false);
     HttpServer myServer = checkBindAddress("localhost", 0, false);
     HttpServer myServer2 = null;
     HttpServer myServer2 = null;
     try { 
     try { 
-      int port = myServer.getListenerAddress().getPort();
+      int port = myServer.getConnectorAddress(0).getPort();
       // it's already in use, true = expect a higher port
       // it's already in use, true = expect a higher port
       myServer2 = checkBindAddress("localhost", port, true);
       myServer2 = checkBindAddress("localhost", port, true);
       // try to reuse the port
       // try to reuse the port
-      port = myServer2.getListenerAddress().getPort();
+      port = myServer2.getConnectorAddress(0).getPort();
       myServer2.stop();
       myServer2.stop();
-      assertEquals(-1, myServer2.getPort()); // not bound
-      myServer2.openListener();
-      assertEquals(port, myServer2.getPort()); // expect same port
+      assertNull(myServer2.getConnectorAddress(0)); // not bound
+      myServer2.openListeners();
+      assertEquals(port, myServer2.getConnectorAddress(0).getPort()); // expect same port
     } finally {
     } finally {
       myServer.stop();
       myServer.stop();
       if (myServer2 != null) {
       if (myServer2 != null) {
@@ -547,21 +550,24 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     HttpServer server = createServer(host, port);
     HttpServer server = createServer(host, port);
     try {
     try {
       // not bound, ephemeral should return requested port (0 for ephemeral)
       // not bound, ephemeral should return requested port (0 for ephemeral)
-      InetSocketAddress addr = server.getListenerAddress();
-      assertEquals(port, addr.getPort());
+      List<?> listeners = (List<?>) Whitebox.getInternalState(server,
+          "listeners");
+      Connector listener = (Connector) Whitebox.getInternalState(
+          listeners.get(0), "listener");
+
+      assertEquals(port, listener.getPort());
       // verify hostname is what was given
       // verify hostname is what was given
-      server.openListener();
-      addr = server.getListenerAddress();
-      assertEquals(host, addr.getHostName());
+      server.openListeners();
+      assertEquals(host, server.getConnectorAddress(0).getHostName());
 
 
-      int boundPort = addr.getPort();
+      int boundPort = server.getConnectorAddress(0).getPort();
       if (port == 0) {
       if (port == 0) {
         assertTrue(boundPort != 0); // ephemeral should now return bound port
         assertTrue(boundPort != 0); // ephemeral should now return bound port
       } else if (findPort) {
       } else if (findPort) {
         assertTrue(boundPort > port);
         assertTrue(boundPort > port);
         // allow a little wiggle room to prevent random test failures if
         // allow a little wiggle room to prevent random test failures if
         // some consecutive ports are already in use
         // some consecutive ports are already in use
-        assertTrue(addr.getPort() - port < 8);
+        assertTrue(boundPort - port < 8);
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
       server.stop();
       server.stop();

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java

@@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestPathFilter extends HttpServerFunctionalTest {
 public class TestPathFilter extends HttpServerFunctionalTest {
@@ -126,7 +127,8 @@ public class TestPathFilter extends HttpServerFunctionalTest {
 
 
     // access the urls and verify our paths specs got added to the 
     // access the urls and verify our paths specs got added to the 
     // filters
     // filters
-    final String prefix = "http://localhost:" + http.getPort();
+    final String prefix = "http://"
+        + NetUtils.getHostPortString(http.getConnectorAddress(0));
     try {
     try {
       for(int i = 0; i < filteredUrls.length; i++) {
       for(int i = 0; i < filteredUrls.length; i++) {
         access(prefix + filteredUrls[i]);
         access(prefix + filteredUrls[i]);

+ 57 - 61
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

@@ -17,105 +17,101 @@
  */
  */
 package org.apache.hadoop.http;
 package org.apache.hadoop.http;
 
 
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URL;
+
+import javax.net.ssl.HttpsURLConnection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.ssl.SSLFactory;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
-import javax.net.ssl.HttpsURLConnection;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.InputStream;
-import java.io.Writer;
-import java.net.URL;
-
 /**
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
  * This testcase issues SSL certificates configures the HttpServer to serve
  * HTTPS using the created certficates and calls an echo servlet using the
  * HTTPS using the created certficates and calls an echo servlet using the
  * corresponding HTTPS URL.
  * corresponding HTTPS URL.
  */
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
-  private static final String CONFIG_SITE_XML = "sslhttpserver-site.xml";
-
-  private static final String BASEDIR =
-      System.getProperty("test.build.dir", "target/test-dir") + "/" +
-      TestSSLHttpServer.class.getSimpleName();
+  private static final String BASEDIR = System.getProperty("test.build.dir",
+      "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName();
 
 
-  static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static Configuration conf;
   private static HttpServer server;
   private static HttpServer server;
   private static URL baseUrl;
   private static URL baseUrl;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static SSLFactory clientSslFactory;
 
 
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new Configuration();
+    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
 
 
-  @Before
-  public void setup() throws Exception {
-    HttpConfig.setPolicy(HttpConfig.Policy.HTTPS_ONLY);
     File base = new File(BASEDIR);
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     FileUtil.fullyDelete(base);
     base.mkdirs();
     base.mkdirs();
-    String classpathDir =
-        KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
-    Configuration conf = new Configuration();
-    String keystoresDir = new File(BASEDIR).getAbsolutePath();
-    String sslConfsDir =
-        KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false);
-    conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true);
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
 
 
-    //we do this trick because the MR AppMaster is started in another VM and
-    //the HttpServer configuration is not loaded from the job.xml but from the
-    //site.xml files in the classpath
-    Writer writer = new FileWriter(new File(classpathDir, CONFIG_SITE_XML));
-    conf.writeXml(writer);
-    writer.close();
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    Configuration sslConf = new Configuration(false);
+    sslConf.addResource("ssl-server.xml");
+    sslConf.addResource("ssl-client.xml");
 
 
-    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
-    conf.addResource(CONFIG_SITE_XML);
-    server = createServer("test", conf);
+    clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
+    clientSslFactory.init();
+
+    server = new HttpServer.Builder()
+        .setName("test")
+        .addEndpoint(new URI("https://localhost"))
+        .setConf(conf)
+        .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
+        .keyStore(sslConf.get("ssl.server.keystore.location"),
+            sslConf.get("ssl.server.keystore.password"),
+            sslConf.get("ssl.server.keystore.type", "jks"))
+        .trustStore(sslConf.get("ssl.server.truststore.location"),
+            sslConf.get("ssl.server.truststore.password"),
+            sslConf.get("ssl.server.truststore.type", "jks")).build();
     server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
     server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
     server.start();
     server.start();
-    baseUrl = new URL("https://localhost:" + server.getPort() + "/");
-    LOG.info("HTTP server started: "+ baseUrl);
+    baseUrl = new URL("https://"
+        + NetUtils.getHostPortString(server.getConnectorAddress(0)));
+    LOG.info("HTTP server started: " + baseUrl);
   }
   }
 
 
-  @After
-  public void cleanup() throws Exception {
+  @AfterClass
+  public static void cleanup() throws Exception {
     server.stop();
     server.stop();
-    String classpathDir =
-        KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
-    new File(classpathDir, CONFIG_SITE_XML).delete();
-    HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+    clientSslFactory.destroy();
   }
   }
-  
 
 
   @Test
   @Test
   public void testEcho() throws Exception {
   public void testEcho() throws Exception {
-    assertEquals("a:b\nc:d\n", 
-        readOut(new URL(baseUrl, "/echo?a=b&c=d")));
-    assertEquals("a:b\nc&lt;:d\ne:&gt;\n", 
-        readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>")));
+    assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d")));
+    assertEquals("a:b\nc&lt;:d\ne:&gt;\n", readOut(new URL(baseUrl,
+        "/echo?a=b&c<=d&e=>")));
   }
   }
 
 
   private static String readOut(URL url) throws Exception {
   private static String readOut(URL url) throws Exception {
-    StringBuilder out = new StringBuilder();
     HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
     HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
-    Configuration conf = new Configuration();
-    conf.addResource(CONFIG_SITE_XML);
-    SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
-    sslf.init();
-    conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
+    conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
     InputStream in = conn.getInputStream();
     InputStream in = conn.getInputStream();
-    byte[] buffer = new byte[64 * 1024];
-    int len = in.read(buffer);
-    while (len > 0) {
-      out.append(new String(buffer, 0, len));
-      len = in.read(buffer);
-    }
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(in, out, 1024);
     return out.toString();
     return out.toString();
   }
   }
 
 

+ 4 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -125,7 +126,8 @@ public class TestServletFilter extends HttpServerFunctionalTest {
     }
     }
 
 
     //access the urls as the sequence
     //access the urls as the sequence
-    final String prefix = "http://localhost:" + http.getPort();
+    final String prefix = "http://"
+        + NetUtils.getHostPortString(http.getConnectorAddress(0));
     try {
     try {
       for(int i = 0; i < sequence.length; i++) {
       for(int i = 0; i < sequence.length; i++) {
         access(prefix + urls[sequence[i]]);
         access(prefix + urls[sequence[i]]);
@@ -185,7 +187,7 @@ public class TestServletFilter extends HttpServerFunctionalTest {
       throws Exception {
       throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     HttpServer http = createTestServer(conf);
     HttpServer http = createTestServer(conf);
-    http.defineFilter(http.webAppContext,
+    HttpServer.defineFilter(http.webAppContext,
         "ErrorFilter", ErrorFilter.class.getName(),
         "ErrorFilter", ErrorFilter.class.getName(),
         null, null);
         null, null);
     try {
     try {

+ 8 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java

@@ -21,8 +21,10 @@ import java.io.*;
 import java.net.*;
 import java.net.*;
 
 
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.net.NetUtils;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
+
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.log4j.*;
 import org.apache.log4j.*;
@@ -43,15 +45,16 @@ public class TestLogLevel extends TestCase {
       assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
       assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
 
 
       HttpServer server = new HttpServer.Builder().setName("..")
       HttpServer server = new HttpServer.Builder().setName("..")
-          .setBindAddress("localhost").setPort(22222).setFindPort(true)
+          .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
           .build();
           .build();
       
       
       server.start();
       server.start();
-      int port = server.getPort();
+      String authority = NetUtils.getHostPortString(server
+          .getConnectorAddress(0));
 
 
       //servlet
       //servlet
-      URL url = new URL("http://localhost:" + port
-          + "/logLevel?log=" + logName + "&level=" + Level.ERROR);
+      URL url = new URL("http://" + authority + "/logLevel?log=" + logName
+          + "&level=" + Level.ERROR);
       out.println("*** Connecting to " + url);
       out.println("*** Connecting to " + url);
       URLConnection connection = url.openConnection();
       URLConnection connection = url.openConnection();
       connection.connect();
       connection.connect();
@@ -67,7 +70,7 @@ public class TestLogLevel extends TestCase {
       assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
       assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
 
 
       //command line
       //command line
-      String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
+      String[] args = {"-setlevel", authority, logName, Level.DEBUG.toString()};
       LogLevel.main(args);
       LogLevel.main(args);
       log.debug("log.debug3");
       log.debug("log.debug3");
       log.info("log.info3");
       log.info("log.info3");

+ 0 - 95
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java

@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.portmap;
-
-import org.apache.hadoop.oncrpc.XDR;
-
-/**
- * Methods that need to be implemented to provide Portmap RPC program.
- * See RFC 1833 for details.
- */
-public interface PortmapInterface {
-  public enum Procedure {
-    // the order of the values below are significant.
-    PMAPPROC_NULL,
-    PMAPPROC_SET,
-    PMAPPROC_UNSET,
-    PMAPPROC_GETPORT,
-    PMAPPROC_DUMP,
-    PMAPPROC_CALLIT,
-    PMAPPROC_GETTIME,
-    PMAPPROC_UADDR2TADDR,
-    PMAPPROC_TADDR2UADDR,
-    PMAPPROC_GETVERSADDR,
-    PMAPPROC_INDIRECT,
-    PMAPPROC_GETADDRLIST,
-    PMAPPROC_GETSTAT;
-    
-    public int getValue() {
-      return ordinal();
-    }
-    
-    public static Procedure fromValue(int value) {
-      if (value < 0 || value >= values().length) {
-        return null;
-      }
-      return values()[value];
-    }
-  }
-
-  /**
-   * This procedure does no work. By convention, procedure zero of any protocol
-   * takes no parameters and returns no results.
-   */
-  public XDR nullOp(int xidd, XDR in, XDR out);
-  
-  /**
-   * When a program first becomes available on a machine, it registers itself
-   * with the port mapper program on the same machine. The program passes its
-   * program number "prog", version number "vers", transport protocol number
-   * "prot", and the port "port" on which it awaits service request. The
-   * procedure returns a boolean reply whose value is "TRUE" if the procedure
-   * successfully established the mapping and "FALSE" otherwise. The procedure
-   * refuses to establish a mapping if one already exists for the tuple
-   * "(prog, vers, prot)".
-   */
-  public XDR set(int xid, XDR in, XDR out);
-  
-  /**
-   * When a program becomes unavailable, it should unregister itself with the
-   * port mapper program on the same machine. The parameters and results have
-   * meanings identical to those of "PMAPPROC_SET". The protocol and port number
-   * fields of the argument are ignored.
-   */
-  public XDR unset(int xid, XDR in, XDR out);
-  
-  /**
-   * Given a program number "prog", version number "vers", and transport
-   * protocol number "prot", this procedure returns the port number on which the
-   * program is awaiting call requests. A port value of zeros means the program
-   * has not been registered. The "port" field of the argument is ignored.
-   */
-  public XDR getport(int xid, XDR in, XDR out);
-  
-  /**
-   * This procedure enumerates all entries in the port mapper's database. The
-   * procedure takes no parameters and returns a list of program, version,
-   * protocol, and port values.
-   */
-  public XDR dump(int xid, XDR in, XDR out);
-}

+ 1 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java

@@ -22,7 +22,6 @@ import org.apache.hadoop.oncrpc.RpcUtil;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.hadoop.portmap.PortmapInterface.Procedure;
 
 
 /**
 /**
  * Helper utility for building portmap request
  * Helper utility for building portmap request
@@ -37,7 +36,7 @@ public class PortmapRequest {
     RpcCall call = RpcCall.getInstance(
     RpcCall call = RpcCall.getInstance(
         RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
         RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
         RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
         RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
-        Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(),
+        RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(),
         new VerifierNone());
         new VerifierNone());
     call.write(request);
     call.write(request);
     return mapping.serialize(request);
     return mapping.serialize(request);

+ 66 - 42
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.portmap;
 package org.apache.hadoop.portmap;
 
 
-import java.util.HashMap;
+import java.util.concurrent.ConcurrentHashMap;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -40,20 +40,26 @@ import org.jboss.netty.handler.timeout.IdleState;
 import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
 import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
 import org.jboss.netty.handler.timeout.IdleStateEvent;
 import org.jboss.netty.handler.timeout.IdleStateEvent;
 
 
-final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler implements PortmapInterface {
+final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
   static final int PROGRAM = 100000;
   static final int PROGRAM = 100000;
   static final int VERSION = 2;
   static final int VERSION = 2;
+
+  static final int PMAPPROC_NULL = 0;
+  static final int PMAPPROC_SET = 1;
+  static final int PMAPPROC_UNSET = 2;
+  static final int PMAPPROC_GETPORT = 3;
+  static final int PMAPPROC_DUMP = 4;
+  static final int PMAPPROC_GETVERSADDR = 9;
+
   private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
   private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
 
 
-  /** Map synchronized usis monitor lock of this instance */
-  private final HashMap<String, PortmapMapping> map;
+  private final ConcurrentHashMap<String, PortmapMapping> map = new ConcurrentHashMap<String, PortmapMapping>();
 
 
   /** ChannelGroup that remembers all active channels for gracefully shutdown. */
   /** ChannelGroup that remembers all active channels for gracefully shutdown. */
   private final ChannelGroup allChannels;
   private final ChannelGroup allChannels;
 
 
   RpcProgramPortmap(ChannelGroup allChannels) {
   RpcProgramPortmap(ChannelGroup allChannels) {
     this.allChannels = allChannels;
     this.allChannels = allChannels;
-    map = new HashMap<String, PortmapMapping>(256);
     PortmapMapping m = new PortmapMapping(PROGRAM, VERSION,
     PortmapMapping m = new PortmapMapping(PROGRAM, VERSION,
         PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT);
         PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT);
     PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION,
     PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION,
@@ -61,48 +67,66 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
     map.put(PortmapMapping.key(m), m);
     map.put(PortmapMapping.key(m), m);
     map.put(PortmapMapping.key(m1), m1);
     map.put(PortmapMapping.key(m1), m1);
   }
   }
-  
-  @Override
-  public XDR nullOp(int xid, XDR in, XDR out) {
+
+  /**
+   * This procedure does no work. By convention, procedure zero of any protocol
+   * takes no parameters and returns no results.
+   */
+  private XDR nullOp(int xid, XDR in, XDR out) {
     return PortmapResponse.voidReply(out, xid);
     return PortmapResponse.voidReply(out, xid);
   }
   }
 
 
-  @Override
-  public XDR set(int xid, XDR in, XDR out) {
+  /**
+   * When a program first becomes available on a machine, it registers itself
+   * with the port mapper program on the same machine. The program passes its
+   * program number "prog", version number "vers", transport protocol number
+   * "prot", and the port "port" on which it awaits service request. The
+   * procedure returns a boolean reply whose value is "TRUE" if the procedure
+   * successfully established the mapping and "FALSE" otherwise. The procedure
+   * refuses to establish a mapping if one already exists for the tuple
+   * "(prog, vers, prot)".
+   */
+  private XDR set(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
     PortmapMapping mapping = PortmapRequest.mapping(in);
     String key = PortmapMapping.key(mapping);
     String key = PortmapMapping.key(mapping);
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Portmap set key=" + key);
       LOG.debug("Portmap set key=" + key);
     }
     }
 
 
-    PortmapMapping value = null;
-    synchronized(this) {
-      map.put(key, mapping);
-      value = map.get(key);
-    }  
-    return PortmapResponse.intReply(out, xid, value.getPort());
+    map.put(key, mapping);
+    return PortmapResponse.intReply(out, xid, mapping.getPort());
   }
   }
 
 
-  @Override
-  public synchronized XDR unset(int xid, XDR in, XDR out) {
+  /**
+   * When a program becomes unavailable, it should unregister itself with the
+   * port mapper program on the same machine. The parameters and results have
+   * meanings identical to those of "PMAPPROC_SET". The protocol and port number
+   * fields of the argument are ignored.
+   */
+  private XDR unset(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
     PortmapMapping mapping = PortmapRequest.mapping(in);
-    synchronized(this) {
-      map.remove(PortmapMapping.key(mapping));
-    }
+    String key = PortmapMapping.key(mapping);
+
+    if (LOG.isDebugEnabled())
+      LOG.debug("Portmap remove key=" + key);
+
+    map.remove(key);
     return PortmapResponse.booleanReply(out, xid, true);
     return PortmapResponse.booleanReply(out, xid, true);
   }
   }
 
 
-  @Override
-  public synchronized XDR getport(int xid, XDR in, XDR out) {
+  /**
+   * Given a program number "prog", version number "vers", and transport
+   * protocol number "prot", this procedure returns the port number on which the
+   * program is awaiting call requests. A port value of zeros means the program
+   * has not been registered. The "port" field of the argument is ignored.
+   */
+  private XDR getport(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
     PortmapMapping mapping = PortmapRequest.mapping(in);
     String key = PortmapMapping.key(mapping);
     String key = PortmapMapping.key(mapping);
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Portmap GETPORT key=" + key + " " + mapping);
       LOG.debug("Portmap GETPORT key=" + key + " " + mapping);
     }
     }
-    PortmapMapping value = null;
-    synchronized(this) {
-      value = map.get(key);
-    }
+    PortmapMapping value = map.get(key);
     int res = 0;
     int res = 0;
     if (value != null) {
     if (value != null) {
       res = value.getPort();
       res = value.getPort();
@@ -115,13 +139,13 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
     return PortmapResponse.intReply(out, xid, res);
     return PortmapResponse.intReply(out, xid, res);
   }
   }
 
 
-  @Override
-  public synchronized XDR dump(int xid, XDR in, XDR out) {
-    PortmapMapping[] pmapList = null;
-    synchronized(this) {
-      pmapList = new PortmapMapping[map.values().size()];
-      map.values().toArray(pmapList);
-    }
+  /**
+   * This procedure enumerates all entries in the port mapper's database. The
+   * procedure takes no parameters and returns a list of program, version,
+   * protocol, and port values.
+   */
+  private XDR dump(int xid, XDR in, XDR out) {
+    PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
     return PortmapResponse.pmapList(out, xid, pmapList);
     return PortmapResponse.pmapList(out, xid, pmapList);
   }
   }
 
 
@@ -131,23 +155,23 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
 
 
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcCall rpcCall = (RpcCall) info.header();
     RpcCall rpcCall = (RpcCall) info.header();
-    final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure());
+    final int portmapProc = rpcCall.getProcedure();
     int xid = rpcCall.getXid();
     int xid = rpcCall.getXid();
     XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
     XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
         XDR.State.READING);
         XDR.State.READING);
     XDR out = new XDR();
     XDR out = new XDR();
 
 
-    if (portmapProc == Procedure.PMAPPROC_NULL) {
+    if (portmapProc == PMAPPROC_NULL) {
       out = nullOp(xid, in, out);
       out = nullOp(xid, in, out);
-    } else if (portmapProc == Procedure.PMAPPROC_SET) {
+    } else if (portmapProc == PMAPPROC_SET) {
       out = set(xid, in, out);
       out = set(xid, in, out);
-    } else if (portmapProc == Procedure.PMAPPROC_UNSET) {
+    } else if (portmapProc == PMAPPROC_UNSET) {
       out = unset(xid, in, out);
       out = unset(xid, in, out);
-    } else if (portmapProc == Procedure.PMAPPROC_DUMP) {
+    } else if (portmapProc == PMAPPROC_DUMP) {
       out = dump(xid, in, out);
       out = dump(xid, in, out);
-    } else if (portmapProc == Procedure.PMAPPROC_GETPORT) {
+    } else if (portmapProc == PMAPPROC_GETPORT) {
       out = getport(xid, in, out);
       out = getport(xid, in, out);
-    } else if (portmapProc == Procedure.PMAPPROC_GETVERSADDR) {
+    } else if (portmapProc == PMAPPROC_GETVERSADDR) {
       out = getport(xid, in, out);
       out = getport(xid, in, out);
     } else {
     } else {
       LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
       LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
@@ -161,7 +185,7 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);
     RpcUtil.sendRpcResponse(ctx, rsp);
   }
   }
-  
+
   @Override
   @Override
   public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e)
   public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e)
       throws Exception {
       throws Exception {

+ 3 - 3
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java

@@ -23,7 +23,7 @@ import java.net.DatagramPacket;
 import java.net.DatagramSocket;
 import java.net.DatagramSocket;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.Socket;
-import java.util.HashMap;
+import java.util.Map;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
@@ -80,7 +80,7 @@ public class TestPortmap {
     XDR req = new XDR();
     XDR req = new XDR();
     RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
     RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
         RpcProgramPortmap.VERSION,
         RpcProgramPortmap.VERSION,
-        PortmapInterface.Procedure.PMAPPROC_SET.getValue(),
+        RpcProgramPortmap.PMAPPROC_SET,
         new CredentialsNone(), new VerifierNone()).write(req);
         new CredentialsNone(), new VerifierNone()).write(req);
 
 
     PortmapMapping sent = new PortmapMapping(90000, 1,
     PortmapMapping sent = new PortmapMapping(90000, 1,
@@ -101,7 +101,7 @@ public class TestPortmap {
     Thread.sleep(100);
     Thread.sleep(100);
     boolean found = false;
     boolean found = false;
     @SuppressWarnings("unchecked")
     @SuppressWarnings("unchecked")
-    HashMap<String, PortmapMapping> map = (HashMap<String, PortmapMapping>) Whitebox
+    Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
         .getInternalState(pm.getHandler(), "map");
         .getInternalState(pm.getHandler(), "map");
 
 
     for (PortmapMapping m : map.values()) {
     for (PortmapMapping m : map.values()) {

+ 32 - 19
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -708,15 +708,28 @@ class OpenFileCtx {
     }
     }
     return response;
     return response;
   }
   }
-
+  
+  /**
+   * Check the commit status with the given offset
+   * @param commitOffset the offset to commit
+   * @param channel the channel to return response
+   * @param xid the xid of the commit request
+   * @param preOpAttr the preOp attribute
+   * @param fromRead whether the commit is triggered from read request
+   * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
+   * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
+   */
   public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
   public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
-      Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
-    // Keep stream active
-    updateLastAccessTime();
+      Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
+    if (!fromRead) {
+      Preconditions.checkState(channel != null && preOpAttr != null);
+      // Keep stream active
+      updateLastAccessTime();
+    }
     Preconditions.checkState(commitOffset >= 0);
     Preconditions.checkState(commitOffset >= 0);
 
 
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
-        preOpAttr);
+        preOpAttr, fromRead);
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Got commit status: " + ret.name());
       LOG.debug("Got commit status: " + ret.name());
     }
     }
@@ -743,14 +756,10 @@ class OpenFileCtx {
     }
     }
     return ret;
     return ret;
   }
   }
-
-  /**
-   * return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
-   * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
-   */
+  
   @VisibleForTesting
   @VisibleForTesting
   synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
   synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
-      Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
+      Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
     if (!activeState) {
     if (!activeState) {
       if (pendingWrites.isEmpty()) {
       if (pendingWrites.isEmpty()) {
         return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
         return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
@@ -767,9 +776,11 @@ class OpenFileCtx {
 
 
     if (commitOffset > 0) {
     if (commitOffset > 0) {
       if (commitOffset > flushed) {
       if (commitOffset > flushed) {
-        CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
-            preOpAttr);
-        pendingCommits.put(commitOffset, commitCtx);
+        if (!fromRead) {
+          CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
+              preOpAttr);
+          pendingCommits.put(commitOffset, commitCtx);
+        }
         return COMMIT_STATUS.COMMIT_WAIT;
         return COMMIT_STATUS.COMMIT_WAIT;
       } else {
       } else {
         return COMMIT_STATUS.COMMIT_DO_SYNC;
         return COMMIT_STATUS.COMMIT_DO_SYNC;
@@ -784,11 +795,13 @@ class OpenFileCtx {
       // do a sync here though the output stream might be closed.
       // do a sync here though the output stream might be closed.
       return COMMIT_STATUS.COMMIT_FINISHED;
       return COMMIT_STATUS.COMMIT_FINISHED;
     } else {
     } else {
-      // Insert commit
-      long maxOffset = key.getKey().getMax() - 1;
-      Preconditions.checkState(maxOffset > 0);
-      CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
-      pendingCommits.put(maxOffset, commitCtx);
+      if (!fromRead) {
+        // Insert commit
+        long maxOffset = key.getKey().getMax() - 1;
+        Preconditions.checkState(maxOffset > 0);
+        CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
+        pendingCommits.put(maxOffset, commitCtx);
+      }
       return COMMIT_STATUS.COMMIT_WAIT;
       return COMMIT_STATUS.COMMIT_WAIT;
     }
     }
   }
   }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -628,6 +628,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
       }
     }
     }
     
     
+    // In case there is buffered data for the same file, flush it. This can be
+    // optimized later by reading from the cache.
+    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
+    if (ret != Nfs3Status.NFS3_OK) {
+      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
+          + ". Read may not get most recent data.");
+    }
+
     try {
     try {
       int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
       int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
       byte[] readbuffer = new byte[buffSize];
       byte[] readbuffer = new byte[buffSize];

+ 47 - 5
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.concurrent.ConcurrentMap;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -41,11 +40,9 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.hadoop.util.Daemon;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.Channel;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
 
 
 /**
 /**
  * Manage the writes and responds asynchronously.
  * Manage the writes and responds asynchronously.
@@ -207,6 +204,51 @@ public class WriteManager {
     return;
     return;
   }
   }
 
 
+  // Do a possible commit before read request in case there is buffered data
+  // inside DFSClient which has been flushed but not synced.
+  int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
+      long commitOffset) {
+    int status;
+    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
+
+    if (openFileCtx == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
+            + " commitOffset=" + commitOffset
+            + ". Return success in this case.");
+      }
+      status = Nfs3Status.NFS3_OK;
+
+    } else {
+      COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
+          null, 0, null, true);
+      switch (ret) {
+      case COMMIT_FINISHED:
+      case COMMIT_INACTIVE_CTX:
+        status = Nfs3Status.NFS3_OK;
+        break;
+      case COMMIT_INACTIVE_WITH_PENDING_WRITE:
+      case COMMIT_ERROR:
+        status = Nfs3Status.NFS3ERR_IO;
+        break;
+      case COMMIT_WAIT:
+        /**
+         * This should happen rarely in some possible cases, such as read
+         * request arrives before DFSClient is able to quickly flush data to DN,
+         * or Prerequisite writes is not available. Won't wait since we don't
+         * want to block read.
+         */     
+        status = Nfs3Status.NFS3ERR_JUKEBOX;
+        break;
+      default:
+        LOG.error("Should not get commit return code:" + ret.name());
+        throw new RuntimeException("Should not get commit return code:"
+            + ret.name());
+      }
+    }
+    return status;
+  }
+  
   void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
   void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
       long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
       long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
     int status;
     int status;
@@ -219,9 +261,8 @@ public class WriteManager {
       
       
     } else {
     } else {
       COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
       COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
-          channel, xid, preOpAttr);
+          channel, xid, preOpAttr, false);
       switch (ret) {
       switch (ret) {
-      case COMMIT_DO_SYNC:
       case COMMIT_FINISHED:
       case COMMIT_FINISHED:
       case COMMIT_INACTIVE_CTX:
       case COMMIT_INACTIVE_CTX:
         status = Nfs3Status.NFS3_OK;
         status = Nfs3Status.NFS3_OK;
@@ -234,6 +275,7 @@ public class WriteManager {
         // Do nothing. Commit is async now.
         // Do nothing. Commit is async now.
         return;
         return;
       default:
       default:
+        LOG.error("Should not get commit return code:" + ret.name());
         throw new RuntimeException("Should not get commit return code:"
         throw new RuntimeException("Should not get commit return code:"
             + ret.name());
             + ret.name());
       }
       }

+ 83 - 9
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
@@ -26,6 +27,7 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -39,6 +41,7 @@ import org.apache.hadoop.nfs.nfs3.IdUserGroup;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
+import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
 import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
 import org.apache.hadoop.nfs.nfs3.request.READ3Request;
 import org.apache.hadoop.nfs.nfs3.request.READ3Request;
 import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
 import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
@@ -47,6 +50,7 @@ import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
 import org.apache.hadoop.nfs.nfs3.response.READ3Response;
 import org.apache.hadoop.nfs.nfs3.response.READ3Response;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.SecurityHandler;
 import org.apache.hadoop.oncrpc.security.SecurityHandler;
+import org.jboss.netty.channel.Channel;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -139,32 +143,33 @@ public class TestWrites {
 
 
     // Test inactive open file context
     // Test inactive open file context
     ctx.setActiveStatusForTest(false);
     ctx.setActiveStatusForTest(false);
-    ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+    Channel ch = Mockito.mock(Channel.class);
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
 
 
     ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
     ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
         new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
         new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
-    ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
 
 
     // Test request with non zero commit offset
     // Test request with non zero commit offset
     ctx.setActiveStatusForTest(true);
     ctx.setActiveStatusForTest(true);
     Mockito.when(fos.getPos()).thenReturn((long) 10);
     Mockito.when(fos.getPos()).thenReturn((long) 10);
-    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr);
+    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
     Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
     Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
     // Do_SYNC state will be updated to FINISHED after data sync
     // Do_SYNC state will be updated to FINISHED after data sync
-    ret = ctx.checkCommit(dfsClient, 5, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
     
     
-    status = ctx.checkCommitInternal(10, null, 1, attr);
+    status = ctx.checkCommitInternal(10, ch, 1, attr, false);
     Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
     Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
-    ret = ctx.checkCommit(dfsClient, 10, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
 
 
     ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
     ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
         .getPendingCommitsForTest();
         .getPendingCommitsForTest();
     Assert.assertTrue(commits.size() == 0);
     Assert.assertTrue(commits.size() == 0);
-    ret = ctx.checkCommit(dfsClient, 11, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
     Assert.assertTrue(commits.size() == 1);
     Assert.assertTrue(commits.size() == 1);
     long key = commits.firstKey();
     long key = commits.firstKey();
@@ -173,7 +178,7 @@ public class TestWrites {
     // Test request with zero commit offset
     // Test request with zero commit offset
     commits.remove(new Long(11));
     commits.remove(new Long(11));
     // There is one pending write [5,10]
     // There is one pending write [5,10]
-    ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
     Assert.assertTrue(commits.size() == 1);
     Assert.assertTrue(commits.size() == 1);
     key = commits.firstKey();
     key = commits.firstKey();
@@ -181,10 +186,79 @@ public class TestWrites {
 
 
     // Empty pending writes
     // Empty pending writes
     ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
     ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
-    ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
   }
   }
 
 
+  @Test
+  // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
+  // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
+  // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
+  public void testCheckCommitFromRead() throws IOException {
+    DFSClient dfsClient = Mockito.mock(DFSClient.class);
+    Nfs3FileAttributes attr = new Nfs3FileAttributes();
+    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
+    Mockito.when(fos.getPos()).thenReturn((long) 0);
+
+    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
+        new IdUserGroup());
+
+    FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
+    COMMIT_STATUS ret;
+    WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
+    assertTrue(wm.addOpenFileStream(h, ctx));
+    
+    // Test inactive open file context
+    ctx.setActiveStatusForTest(false);
+    Channel ch = Mockito.mock(Channel.class);
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+    assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
+    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+    
+    ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
+    assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
+    
+    // Test request with non zero commit offset
+    ctx.setActiveStatusForTest(true);
+    Mockito.when(fos.getPos()).thenReturn((long) 10);
+    COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
+    assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
+    // Do_SYNC state will be updated to FINISHED after data sync
+    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
+ 
+    status = ctx.checkCommitInternal(10, ch, 1, attr, true);
+    assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
+    ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
+
+    ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
+        .getPendingCommitsForTest();
+    assertTrue(commits.size() == 0);
+    ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
+    assertEquals(0, commits.size()); // commit triggered by read doesn't wait
+    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11));
+
+    // Test request with zero commit offset
+    // There is one pending write [5,10]
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
+    assertEquals(0, commits.size());
+    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
+
+    // Empty pending writes
+    ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
+    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+  }
+  
   private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
   private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
       throws InterruptedException {
       throws InterruptedException {
     int waitedTime = 0;
     int waitedTime = 0;

+ 42 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -212,6 +212,22 @@ Trunk (Unreleased)
     and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
     and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
     (jing9 via szetszwo)
     (jing9 via szetszwo)
 
 
+    HDFS-5538. URLConnectionFactory should pick up the SSL related configuration 
+    by default. (Haohui Mai via jing9)
+
+    HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
+    with DirectoryWithQuotaFeature.  (szetszwo)
+
+    HDFS-5556. Add some more NameNode cache statistics, cache pool stats
+    (cmccabe)
+
+    HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui
+    Mai via jing9)
+
+    HDFS-5537. Remove FileWithSnapshot interface.  (jing9 via szetszwo)
+
+    HDFS-5430. Support TTL on CacheDirectives. (wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 
 
@@ -399,6 +415,12 @@ Trunk (Unreleased)
     HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests
     HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests
     (cmccabe)
     (cmccabe)
 
 
+    HDFS-5565. CacheAdmin help should match against non-dashed commands
+    (wang via cmccabe)
+
+    HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out
+    native mlock. (Colin McCabe and Akira Ajisaka via wang)
+
 Release 2.3.0 - UNRELEASED
 Release 2.3.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -536,6 +558,12 @@ Release 2.3.0 - UNRELEASED
     
     
     HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
     HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
 
 
+    HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain 
+    text instead of HTML. (Haohui Mai via jing9)
+
+    HDFS-5581. NameNodeFsck should use only one instance of
+    BlockPlacementPolicy. (vinay via cmccabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -612,6 +640,9 @@ Release 2.3.0 - UNRELEASED
     HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html.
     HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html.
     (Haohui Mai via jing9)
     (Haohui Mai via jing9)
 
 
+    HDFS-5533. Symlink delete/create should be treated as DELETE/CREATE in snapshot diff 
+    report. (Binglin Chang via jing9)
+
 Release 2.2.1 - UNRELEASED
 Release 2.2.1 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -634,6 +665,8 @@ Release 2.2.1 - UNRELEASED
 
 
     HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
     HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
 
 
+    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -727,6 +760,13 @@ Release 2.2.1 - UNRELEASED
 
 
     HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
     HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
 
 
+    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
+
+    HDFS-5577. NFS user guide update (brandonli)
+
+    HDFS-5563. NFS gateway should commit the buffered data when read request comes
+    after write to the same file (brandonli)
+
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -4018,6 +4058,8 @@ Release 0.23.10 - UNRELEASED
     HDFS-4329. DFSShell issues with directories with spaces in name (Cristina
     HDFS-4329. DFSShell issues with directories with spaces in name (Cristina
     L. Abad via jeagles)
     L. Abad via jeagles)
 
 
+    HDFS-5526. Datanode cannot roll back to previous layout version (kihwal)
+
 Release 0.23.9 - 2013-07-08
 Release 0.23.9 - 2013-07-08
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -352,6 +352,11 @@
       <Method name="getReplication" />
       <Method name="getReplication" />
       <Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
       <Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
     </Match>
     </Match>
+    <Match>
+      <Class name="org.apache.hadoop.hdfs.protocol.CacheDirective" />
+      <Method name="insertInternal" />
+      <Bug pattern="BC_UNCONFIRMED_CAST" />
+    </Match>
     <!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
     <!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
     <Match>
     <Match>
       <Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
       <Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -109,6 +109,7 @@ import org.apache.hadoop.hdfs.client.ClientMmapManager;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -2358,7 +2359,7 @@ public class DFSClient implements java.io.Closeable {
     }
     }
   }
   }
 
 
-  public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
       return namenode.listCachePools("");
       return namenode.listCachePools("");

+ 80 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -38,12 +38,15 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.security.SecureRandom;
 import java.security.SecureRandom;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Comparator;
+import java.util.Date;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
 import java.util.Set;
 import java.util.Set;
@@ -75,6 +78,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -1427,4 +1431,79 @@ public class DFSUtil {
     return (value == null || value.isEmpty()) ?
     return (value == null || value.isEmpty()) ?
         defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
         defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
   }
   }
-}
+
+  public static HttpServer.Builder loadSslConfToHttpServerBuilder(
+      HttpServer.Builder builder, Configuration sslConf) {
+    return builder
+        .needsClientAuth(
+            sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
+        .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
+        .keyStore(sslConf.get("ssl.server.keystore.location"),
+            sslConf.get("ssl.server.keystore.password"),
+            sslConf.get("ssl.server.keystore.type", "jks"))
+        .trustStore(sslConf.get("ssl.server.truststore.location"),
+            sslConf.get("ssl.server.truststore.password"),
+            sslConf.get("ssl.server.truststore.type", "jks"));
+  }
+
+  /**
+   * Converts a Date into an ISO-8601 formatted datetime string.
+   */
+  public static String dateToIso8601String(Date date) {
+    SimpleDateFormat df =
+        new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
+    return df.format(date);
+  }
+
+  /**
+   * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
+   */
+  public static String durationToString(long durationMs) {
+    Preconditions.checkArgument(durationMs >= 0, "Invalid negative duration");
+    // Chop off the milliseconds
+    long durationSec = durationMs / 1000;
+    final int secondsPerMinute = 60;
+    final int secondsPerHour = 60*60;
+    final int secondsPerDay = 60*60*24;
+    final long days = durationSec / secondsPerDay;
+    durationSec -= days * secondsPerDay;
+    final long hours = durationSec / secondsPerHour;
+    durationSec -= hours * secondsPerHour;
+    final long minutes = durationSec / secondsPerMinute;
+    durationSec -= minutes * secondsPerMinute;
+    final long seconds = durationSec;
+    return String.format("%03d:%02d:%02d:%02d", days, hours, minutes, seconds);
+  }
+
+  /**
+   * Converts a relative time string into a duration in milliseconds.
+   */
+  public static long parseRelativeTime(String relTime) throws IOException {
+    if (relTime.length() < 2) {
+      throw new IOException("Unable to parse relative time value of " + relTime
+          + ": too short");
+    }
+    String ttlString = relTime.substring(0, relTime.length()-1);
+    int ttl;
+    try {
+      ttl = Integer.parseInt(ttlString);
+    } catch (NumberFormatException e) {
+      throw new IOException("Unable to parse relative time value of " + relTime
+          + ": " + ttlString + " is not a number");
+    }
+    if (relTime.endsWith("s")) {
+      // pass
+    } else if (relTime.endsWith("m")) {
+      ttl *= 60;
+    } else if (relTime.endsWith("h")) {
+      ttl *= 60*60;
+    } else if (relTime.endsWith("d")) {
+      ttl *= 60*60*24;
+    } else {
+      throw new IOException("Unable to parse relative time value of " + relTime
+          + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
+    }
+    return ttl*1000;
+  }
+}

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -1713,12 +1714,12 @@ public class DistributedFileSystem extends FileSystem {
   /**
   /**
    * List all cache pools.
    * List all cache pools.
    *
    *
-   * @return A remote iterator from which you can get CachePoolInfo objects.
+   * @return A remote iterator from which you can get CachePoolEntry objects.
    *          Requests will be made as needed.
    *          Requests will be made as needed.
    * @throws IOException
    * @throws IOException
    *          If there was an error listing cache pools.
    *          If there was an error listing cache pools.
    */
    */
-  public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
     return dfs.listCachePools();
     return dfs.listCachePools();
   }
   }
 }
 }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
@@ -213,12 +214,12 @@ public class HdfsAdmin {
   /**
   /**
    * List all cache pools.
    * List all cache pools.
    *
    *
-   * @return A remote iterator from which you can get CachePoolInfo objects.
+   * @return A remote iterator from which you can get CachePoolEntry objects.
    *          Requests will be made as needed.
    *          Requests will be made as needed.
    * @throws IOException
    * @throws IOException
    *          If there was an error listing cache pools.
    *          If there was an error listing cache pools.
    */
    */
-  public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
     return dfs.listCachePools();
     return dfs.listCachePools();
   }
   }
 }
 }

+ 105 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java

@@ -17,65 +17,94 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Date;
+
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
+import org.apache.hadoop.util.IntrusiveCollection;
+import org.apache.hadoop.util.IntrusiveCollection.Element;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
 /**
 /**
- * Represents an entry in the PathBasedCache on the NameNode.
+ * Namenode class that tracks state related to a cached path.
  *
  *
  * This is an implementation class, not part of the public API.
  * This is an implementation class, not part of the public API.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public final class CacheDirective {
-  private final long entryId;
+public final class CacheDirective implements IntrusiveCollection.Element {
+  private final long id;
   private final String path;
   private final String path;
   private final short replication;
   private final short replication;
-  private final CachePool pool;
+  private CachePool pool;
+  private final long expiryTime;
+
   private long bytesNeeded;
   private long bytesNeeded;
   private long bytesCached;
   private long bytesCached;
   private long filesAffected;
   private long filesAffected;
-
-  public CacheDirective(long entryId, String path,
-      short replication, CachePool pool) {
-    Preconditions.checkArgument(entryId > 0);
-    this.entryId = entryId;
+  private Element prev;
+  private Element next;
+
+  public CacheDirective(long id, String path,
+      short replication, long expiryTime) {
+    Preconditions.checkArgument(id > 0);
+    this.id = id;
+    this.path = checkNotNull(path);
     Preconditions.checkArgument(replication > 0);
     Preconditions.checkArgument(replication > 0);
-    this.path = path;
-    Preconditions.checkNotNull(pool);
     this.replication = replication;
     this.replication = replication;
-    Preconditions.checkNotNull(path);
-    this.pool = pool;
+    this.expiryTime = expiryTime;
     this.bytesNeeded = 0;
     this.bytesNeeded = 0;
     this.bytesCached = 0;
     this.bytesCached = 0;
     this.filesAffected = 0;
     this.filesAffected = 0;
   }
   }
 
 
-  public long getEntryId() {
-    return entryId;
+  public long getId() {
+    return id;
   }
   }
 
 
   public String getPath() {
   public String getPath() {
     return path;
     return path;
   }
   }
 
 
+  public short getReplication() {
+    return replication;
+  }
+
   public CachePool getPool() {
   public CachePool getPool() {
     return pool;
     return pool;
   }
   }
 
 
-  public short getReplication() {
-    return replication;
+  /**
+   * @return When this directive expires, in milliseconds since Unix epoch
+   */
+  public long getExpiryTime() {
+    return expiryTime;
   }
   }
 
 
-  public CacheDirectiveInfo toDirective() {
+  /**
+   * @return When this directive expires, as an ISO-8601 formatted string.
+   */
+  public String getExpiryTimeString() {
+    return DFSUtil.dateToIso8601String(new Date(expiryTime));
+  }
+
+  /**
+   * Returns a {@link CacheDirectiveInfo} based on this CacheDirective.
+   * <p>
+   * This always sets an absolute expiry time, never a relative TTL.
+   */
+  public CacheDirectiveInfo toInfo() {
     return new CacheDirectiveInfo.Builder().
     return new CacheDirectiveInfo.Builder().
-        setId(entryId).
+        setId(id).
         setPath(new Path(path)).
         setPath(new Path(path)).
         setReplication(replication).
         setReplication(replication).
         setPool(pool.getPoolName()).
         setPool(pool.getPoolName()).
+        setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)).
         build();
         build();
   }
   }
 
 
@@ -84,20 +113,22 @@ public final class CacheDirective {
         setBytesNeeded(bytesNeeded).
         setBytesNeeded(bytesNeeded).
         setBytesCached(bytesCached).
         setBytesCached(bytesCached).
         setFilesAffected(filesAffected).
         setFilesAffected(filesAffected).
+        setHasExpired(new Date().getTime() > expiryTime).
         build();
         build();
   }
   }
 
 
   public CacheDirectiveEntry toEntry() {
   public CacheDirectiveEntry toEntry() {
-    return new CacheDirectiveEntry(toDirective(), toStats());
+    return new CacheDirectiveEntry(toInfo(), toStats());
   }
   }
   
   
   @Override
   @Override
   public String toString() {
   public String toString() {
     StringBuilder builder = new StringBuilder();
     StringBuilder builder = new StringBuilder();
-    builder.append("{ entryId:").append(entryId).
+    builder.append("{ id:").append(id).
       append(", path:").append(path).
       append(", path:").append(path).
       append(", replication:").append(replication).
       append(", replication:").append(replication).
       append(", pool:").append(pool).
       append(", pool:").append(pool).
+      append(", expiryTime: ").append(getExpiryTimeString()).
       append(", bytesNeeded:").append(bytesNeeded).
       append(", bytesNeeded:").append(bytesNeeded).
       append(", bytesCached:").append(bytesCached).
       append(", bytesCached:").append(bytesCached).
       append(", filesAffected:").append(filesAffected).
       append(", filesAffected:").append(filesAffected).
@@ -113,12 +144,12 @@ public final class CacheDirective {
       return false;
       return false;
     }
     }
     CacheDirective other = (CacheDirective)o;
     CacheDirective other = (CacheDirective)o;
-    return entryId == other.entryId;
+    return id == other.id;
   }
   }
 
 
   @Override
   @Override
   public int hashCode() {
   public int hashCode() {
-    return new HashCodeBuilder().append(entryId).toHashCode();
+    return new HashCodeBuilder().append(id).toHashCode();
   }
   }
 
 
   public long getBytesNeeded() {
   public long getBytesNeeded() {
@@ -156,4 +187,55 @@ public final class CacheDirective {
   public void incrementFilesAffected() {
   public void incrementFilesAffected() {
     this.filesAffected++;
     this.filesAffected++;
   }
   }
+
+  @SuppressWarnings("unchecked")
+  @Override // IntrusiveCollection.Element
+  public void insertInternal(IntrusiveCollection<? extends Element> list,
+      Element prev, Element next) {
+    assert this.pool == null;
+    this.pool = ((CachePool.DirectiveList)list).getCachePool();
+    this.prev = prev;
+    this.next = next;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) {
+    assert list == pool.getDirectiveList();
+    this.prev = prev;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public void setNext(IntrusiveCollection<? extends Element> list, Element next) {
+    assert list == pool.getDirectiveList();
+    this.next = next;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public void removeInternal(IntrusiveCollection<? extends Element> list) {
+    assert list == pool.getDirectiveList();
+    this.pool = null;
+    this.prev = null;
+    this.next = null;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public Element getPrev(IntrusiveCollection<? extends Element> list) {
+    if (list != pool.getDirectiveList()) {
+      return null;
+    }
+    return this.prev;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public Element getNext(IntrusiveCollection<? extends Element> list) {
+    if (list != pool.getDirectiveList()) {
+      return null;
+    }
+    return this.next;
+  }
+
+  @Override // IntrusiveCollection.Element
+  public boolean isInList(IntrusiveCollection<? extends Element> list) {
+    return pool == null ? false : list == pool.getDirectiveList();
+  }
 };
 };

+ 141 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java

@@ -17,11 +17,14 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
+import java.util.Date;
+
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
 
 
 /**
 /**
  * Describes a path-based cache directive.
  * Describes a path-based cache directive.
@@ -37,6 +40,7 @@ public class CacheDirectiveInfo {
     private Path path;
     private Path path;
     private Short replication;
     private Short replication;
     private String pool;
     private String pool;
+    private Expiration expiration;
 
 
     /**
     /**
      * Builds a new CacheDirectiveInfo populated with the set properties.
      * Builds a new CacheDirectiveInfo populated with the set properties.
@@ -44,7 +48,7 @@ public class CacheDirectiveInfo {
      * @return New CacheDirectiveInfo.
      * @return New CacheDirectiveInfo.
      */
      */
     public CacheDirectiveInfo build() {
     public CacheDirectiveInfo build() {
-      return new CacheDirectiveInfo(id, path, replication, pool);
+      return new CacheDirectiveInfo(id, path, replication, pool, expiration);
     }
     }
 
 
     /**
     /**
@@ -62,6 +66,7 @@ public class CacheDirectiveInfo {
       this.path = directive.getPath();
       this.path = directive.getPath();
       this.replication = directive.getReplication();
       this.replication = directive.getReplication();
       this.pool = directive.getPool();
       this.pool = directive.getPool();
+      this.expiration = directive.getExpiration();
     }
     }
 
 
     /**
     /**
@@ -107,18 +112,134 @@ public class CacheDirectiveInfo {
       this.pool = pool;
       this.pool = pool;
       return this;
       return this;
     }
     }
+
+    /**
+     * Sets when the CacheDirective should expire. A
+     * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
+     * relative expiration time.
+     * 
+     * @param expiration when this CacheDirective should expire
+     * @return This builder, for call chaining
+     */
+    public Builder setExpiration(Expiration expiration) {
+      this.expiration = expiration;
+      return this;
+    }
+  }
+
+  /**
+   * Denotes a relative or absolute expiration time for a CacheDirective. Use
+   * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and
+   * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
+   * Expiration.
+   * <p>
+   * In either case, the server-side clock is used to determine when a
+   * CacheDirective expires.
+   */
+  public static class Expiration {
+
+    /** Denotes a CacheDirectiveInfo that never expires **/
+    public static final int EXPIRY_NEVER = -1;
+
+    /**
+     * Create a new relative Expiration.
+     * 
+     * @param ms how long until the CacheDirective expires, in milliseconds
+     * @return A relative Expiration
+     */
+    public static Expiration newRelative(long ms) {
+      return new Expiration(ms, true);
+    }
+
+    /**
+     * Create a new absolute Expiration.
+     * 
+     * @param date when the CacheDirective expires
+     * @return An absolute Expiration
+     */
+    public static Expiration newAbsolute(Date date) {
+      return new Expiration(date.getTime(), false);
+    }
+
+    /**
+     * Create a new absolute Expiration.
+     * 
+     * @param ms when the CacheDirective expires, in milliseconds since the Unix
+     *          epoch.
+     * @return An absolute Expiration
+     */
+    public static Expiration newAbsolute(long ms) {
+      return new Expiration(ms, false);
+    }
+
+    private final long ms;
+    private final boolean isRelative;
+
+    private Expiration(long ms, boolean isRelative) {
+      this.ms = ms;
+      this.isRelative = isRelative;
+    }
+
+    /**
+     * @return true if Expiration was specified as a relative duration, false if
+     *         specified as an absolute time.
+     */
+    public boolean isRelative() {
+      return isRelative;
+    }
+
+    /**
+     * @return The raw underlying millisecond value, either a relative duration
+     *         or an absolute time as milliseconds since the Unix epoch.
+     */
+    public long getMillis() {
+      return ms;
+    }
+
+    /**
+     * @return Expiration time as a {@link Date} object. This converts a
+     *         relative Expiration into an absolute Date based on the local
+     *         clock.
+     */
+    public Date getAbsoluteDate() {
+      return new Date(getAbsoluteMillis());
+    }
+
+    /**
+     * @return Expiration time in milliseconds from the Unix epoch. This
+     *         converts a relative Expiration into an absolute time based on the
+     *         local clock.
+     */
+    public long getAbsoluteMillis() {
+      if (!isRelative) {
+        return ms;
+      } else {
+        return new Date().getTime() + ms;
+      }
+    }
+
+    @Override
+    public String toString() {
+      if (isRelative) {
+        return DFSUtil.durationToString(ms);
+      }
+      return DFSUtil.dateToIso8601String(new Date(ms));
+    }
   }
   }
 
 
   private final Long id;
   private final Long id;
   private final Path path;
   private final Path path;
   private final Short replication;
   private final Short replication;
   private final String pool;
   private final String pool;
+  private final Expiration expiration;
 
 
-  CacheDirectiveInfo(Long id, Path path, Short replication, String pool) {
+  CacheDirectiveInfo(Long id, Path path, Short replication, String pool,
+      Expiration expiration) {
     this.id = id;
     this.id = id;
     this.path = path;
     this.path = path;
     this.replication = replication;
     this.replication = replication;
     this.pool = pool;
     this.pool = pool;
+    this.expiration = expiration;
   }
   }
 
 
   /**
   /**
@@ -148,7 +269,14 @@ public class CacheDirectiveInfo {
   public String getPool() {
   public String getPool() {
     return pool;
     return pool;
   }
   }
-  
+
+  /**
+   * @return When this directive expires.
+   */
+  public Expiration getExpiration() {
+    return expiration;
+  }
+
   @Override
   @Override
   public boolean equals(Object o) {
   public boolean equals(Object o) {
     if (o == null) {
     if (o == null) {
@@ -162,6 +290,7 @@ public class CacheDirectiveInfo {
         append(getPath(), other.getPath()).
         append(getPath(), other.getPath()).
         append(getReplication(), other.getReplication()).
         append(getReplication(), other.getReplication()).
         append(getPool(), other.getPool()).
         append(getPool(), other.getPool()).
+        append(getExpiration(), other.getExpiration()).
         isEquals();
         isEquals();
   }
   }
 
 
@@ -171,6 +300,7 @@ public class CacheDirectiveInfo {
         append(path).
         append(path).
         append(replication).
         append(replication).
         append(pool).
         append(pool).
+        append(expiration).
         hashCode();
         hashCode();
   }
   }
 
 
@@ -181,19 +311,23 @@ public class CacheDirectiveInfo {
     String prefix = "";
     String prefix = "";
     if (id != null) {
     if (id != null) {
       builder.append(prefix).append("id: ").append(id);
       builder.append(prefix).append("id: ").append(id);
-      prefix = ",";
+      prefix = ", ";
     }
     }
     if (path != null) {
     if (path != null) {
       builder.append(prefix).append("path: ").append(path);
       builder.append(prefix).append("path: ").append(path);
-      prefix = ",";
+      prefix = ", ";
     }
     }
     if (replication != null) {
     if (replication != null) {
       builder.append(prefix).append("replication: ").append(replication);
       builder.append(prefix).append("replication: ").append(replication);
-      prefix = ",";
+      prefix = ", ";
     }
     }
     if (pool != null) {
     if (pool != null) {
       builder.append(prefix).append("pool: ").append(pool);
       builder.append(prefix).append("pool: ").append(pool);
-      prefix = ",";
+      prefix = ", ";
+    }
+    if (expiration != null) {
+      builder.append(prefix).append("expiration: ").append(expiration);
+      prefix = ", ";
     }
     }
     builder.append("}");
     builder.append("}");
     return builder.toString();
     return builder.toString();

+ 31 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java

@@ -30,6 +30,7 @@ public class CacheDirectiveStats {
     private long bytesNeeded;
     private long bytesNeeded;
     private long bytesCached;
     private long bytesCached;
     private long filesAffected;
     private long filesAffected;
+    private boolean hasExpired;
 
 
     /**
     /**
      * Builds a new CacheDirectiveStats populated with the set properties.
      * Builds a new CacheDirectiveStats populated with the set properties.
@@ -37,7 +38,8 @@ public class CacheDirectiveStats {
      * @return New CacheDirectiveStats.
      * @return New CacheDirectiveStats.
      */
      */
     public CacheDirectiveStats build() {
     public CacheDirectiveStats build() {
-      return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected);
+      return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected,
+          hasExpired);
     }
     }
 
 
     /**
     /**
@@ -52,7 +54,7 @@ public class CacheDirectiveStats {
      * @param bytesNeeded The bytes needed.
      * @param bytesNeeded The bytes needed.
      * @return This builder, for call chaining.
      * @return This builder, for call chaining.
      */
      */
-    public Builder setBytesNeeded(Long bytesNeeded) {
+    public Builder setBytesNeeded(long bytesNeeded) {
       this.bytesNeeded = bytesNeeded;
       this.bytesNeeded = bytesNeeded;
       return this;
       return this;
     }
     }
@@ -63,7 +65,7 @@ public class CacheDirectiveStats {
      * @param bytesCached The bytes cached.
      * @param bytesCached The bytes cached.
      * @return This builder, for call chaining.
      * @return This builder, for call chaining.
      */
      */
-    public Builder setBytesCached(Long bytesCached) {
+    public Builder setBytesCached(long bytesCached) {
       this.bytesCached = bytesCached;
       this.bytesCached = bytesCached;
       return this;
       return this;
     }
     }
@@ -74,44 +76,64 @@ public class CacheDirectiveStats {
      * @param filesAffected The files affected.
      * @param filesAffected The files affected.
      * @return This builder, for call chaining.
      * @return This builder, for call chaining.
      */
      */
-    public Builder setFilesAffected(Long filesAffected) {
+    public Builder setFilesAffected(long filesAffected) {
       this.filesAffected = filesAffected;
       this.filesAffected = filesAffected;
       return this;
       return this;
     }
     }
+
+    /**
+     * Sets whether this directive has expired.
+     * 
+     * @param hasExpired if this directive has expired
+     * @return This builder, for call chaining.
+     */
+    public Builder setHasExpired(boolean hasExpired) {
+      this.hasExpired = hasExpired;
+      return this;
+    }
   }
   }
 
 
   private final long bytesNeeded;
   private final long bytesNeeded;
   private final long bytesCached;
   private final long bytesCached;
   private final long filesAffected;
   private final long filesAffected;
+  private final boolean hasExpired;
 
 
   private CacheDirectiveStats(long bytesNeeded, long bytesCached,
   private CacheDirectiveStats(long bytesNeeded, long bytesCached,
-      long filesAffected) {
+      long filesAffected, boolean hasExpired) {
     this.bytesNeeded = bytesNeeded;
     this.bytesNeeded = bytesNeeded;
     this.bytesCached = bytesCached;
     this.bytesCached = bytesCached;
     this.filesAffected = filesAffected;
     this.filesAffected = filesAffected;
+    this.hasExpired = hasExpired;
   }
   }
 
 
   /**
   /**
    * @return The bytes needed.
    * @return The bytes needed.
    */
    */
-  public Long getBytesNeeded() {
+  public long getBytesNeeded() {
     return bytesNeeded;
     return bytesNeeded;
   }
   }
 
 
   /**
   /**
    * @return The bytes cached.
    * @return The bytes cached.
    */
    */
-  public Long getBytesCached() {
+  public long getBytesCached() {
     return bytesCached;
     return bytesCached;
   }
   }
 
 
   /**
   /**
    * @return The files affected.
    * @return The files affected.
    */
    */
-  public Long getFilesAffected() {
+  public long getFilesAffected() {
     return filesAffected;
     return filesAffected;
   }
   }
 
 
+  /**
+   * @return Whether this directive has expired.
+   */
+  public boolean hasExpired() {
+    return hasExpired;
+  }
+
   @Override
   @Override
   public String toString() {
   public String toString() {
     StringBuilder builder = new StringBuilder();
     StringBuilder builder = new StringBuilder();
@@ -119,6 +141,7 @@ public class CacheDirectiveStats {
     builder.append("bytesNeeded: ").append(bytesNeeded);
     builder.append("bytesNeeded: ").append(bytesNeeded);
     builder.append(", ").append("bytesCached: ").append(bytesCached);
     builder.append(", ").append("bytesCached: ").append(bytesCached);
     builder.append(", ").append("filesAffected: ").append(filesAffected);
     builder.append(", ").append("filesAffected: ").append(filesAffected);
+    builder.append(", ").append("hasExpired: ").append(hasExpired);
     builder.append("}");
     builder.append("}");
     return builder.toString();
     return builder.toString();
   }
   }

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Describes a Cache Pool entry.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CachePoolEntry {
+  private final CachePoolInfo info;
+  private final CachePoolStats stats;
+
+  public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) {
+    this.info = info;
+    this.stats = stats;
+  }
+
+  public CachePoolInfo getInfo() {
+    return info;
+  }
+
+  public CachePoolStats getStats() {
+    return stats;
+  }
+}

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java

@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
@@ -150,7 +151,10 @@ public class CachePoolInfo {
 
 
   public static void validate(CachePoolInfo info) throws IOException {
   public static void validate(CachePoolInfo info) throws IOException {
     if (info == null) {
     if (info == null) {
-      throw new IOException("CachePoolInfo is null");
+      throw new InvalidRequestException("CachePoolInfo is null");
+    }
+    if ((info.getWeight() != null) && (info.getWeight() < 0)) {
+      throw new InvalidRequestException("CachePool weight is negative.");
     }
     }
     validateName(info.poolName);
     validateName(info.poolName);
   }
   }

+ 87 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java

@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * CachePoolStats describes cache pool statistics.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CachePoolStats {
+  public static class Builder {
+    private long bytesNeeded;
+    private long bytesCached;
+    private long filesAffected;
+
+    public Builder() {
+    }
+
+    public Builder setBytesNeeded(long bytesNeeded) {
+      this.bytesNeeded = bytesNeeded;
+      return this;
+    }
+
+    public Builder setBytesCached(long bytesCached) {
+      this.bytesCached = bytesCached;
+      return this;
+    }
+
+    public Builder setFilesAffected(long filesAffected) {
+      this.filesAffected = filesAffected;
+      return this;
+    }
+
+    public CachePoolStats build() {
+      return new CachePoolStats(bytesNeeded, bytesCached, filesAffected);
+    }
+  };
+
+  private final long bytesNeeded;
+  private final long bytesCached;
+  private final long filesAffected;
+
+  private CachePoolStats(long bytesNeeded, long bytesCached, long filesAffected) {
+    this.bytesNeeded = bytesNeeded;
+    this.bytesCached = bytesCached;
+    this.filesAffected = filesAffected;
+  }
+
+  public long getBytesNeeded() {
+    return bytesNeeded;
+  }
+
+  public long getBytesCached() {
+    return bytesNeeded;
+  }
+
+  public long getFilesAffected() {
+    return filesAffected;
+  }
+
+  public String toString() {
+    return new StringBuilder().append("{").
+      append("bytesNeeded:").append(bytesNeeded).
+      append(", bytesCached:").append(bytesCached).
+      append(", filesAffected:").append(filesAffected).
+      append("}").toString();
+  }
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1179,6 +1179,6 @@ public interface ClientProtocol {
    * @return A RemoteIterator which returns CachePool objects.
    * @return A RemoteIterator which returns CachePool objects.
    */
    */
   @Idempotent
   @Idempotent
-  public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
+  public RemoteIterator<CachePoolEntry> listCachePools(String prevPool)
       throws IOException;
       throws IOException;
 }
 }

+ 7 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -51,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -103,7 +106,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
@@ -1141,18 +1143,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public ListCachePoolsResponseProto listCachePools(RpcController controller,
   public ListCachePoolsResponseProto listCachePools(RpcController controller,
       ListCachePoolsRequestProto request) throws ServiceException {
       ListCachePoolsRequestProto request) throws ServiceException {
     try {
     try {
-      RemoteIterator<CachePoolInfo> iter =
+      RemoteIterator<CachePoolEntry> iter =
         server.listCachePools(request.getPrevPoolName());
         server.listCachePools(request.getPrevPoolName());
       ListCachePoolsResponseProto.Builder responseBuilder =
       ListCachePoolsResponseProto.Builder responseBuilder =
         ListCachePoolsResponseProto.newBuilder();
         ListCachePoolsResponseProto.newBuilder();
       String prevPoolName = null;
       String prevPoolName = null;
       while (iter.hasNext()) {
       while (iter.hasNext()) {
-        CachePoolInfo pool = iter.next();
-        ListCachePoolsResponseElementProto.Builder elemBuilder = 
-            ListCachePoolsResponseElementProto.newBuilder();
-        elemBuilder.setInfo(PBHelper.convert(pool));
-        responseBuilder.addElements(elemBuilder.build());
-        prevPoolName = pool.getPoolName();
+        CachePoolEntry entry = iter.next();
+        responseBuilder.addEntries(PBHelper.convert(entry));
+        prevPoolName = entry.getInfo().getPoolName();
       }
       }
       // fill in hasNext
       // fill in hasNext
       if (prevPoolName == null) {
       if (prevPoolName == null) {

+ 16 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCac
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -96,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
@@ -1141,23 +1142,23 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
     }
   }
   }
 
 
-  private static class BatchedCachePoolInfo
-    implements BatchedEntries<CachePoolInfo> {
+  private static class BatchedCachePoolEntries
+    implements BatchedEntries<CachePoolEntry> {
       private final ListCachePoolsResponseProto proto;
       private final ListCachePoolsResponseProto proto;
     
     
-    public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) {
+    public BatchedCachePoolEntries(ListCachePoolsResponseProto proto) {
       this.proto = proto;
       this.proto = proto;
     }
     }
       
       
     @Override
     @Override
-    public CachePoolInfo get(int i) {
-      ListCachePoolsResponseElementProto elem = proto.getElements(i);
-      return PBHelper.convert(elem.getInfo());
+    public CachePoolEntry get(int i) {
+      CachePoolEntryProto elem = proto.getEntries(i);
+      return PBHelper.convert(elem);
     }
     }
 
 
     @Override
     @Override
     public int size() {
     public int size() {
-      return proto.getElementsCount();
+      return proto.getEntriesCount();
     }
     }
     
     
     @Override
     @Override
@@ -1165,19 +1166,19 @@ public class ClientNamenodeProtocolTranslatorPB implements
       return proto.getHasMore();
       return proto.getHasMore();
     }
     }
   }
   }
-  
+
   private class CachePoolIterator 
   private class CachePoolIterator 
-      extends BatchedRemoteIterator<String, CachePoolInfo> {
+      extends BatchedRemoteIterator<String, CachePoolEntry> {
 
 
     public CachePoolIterator(String prevKey) {
     public CachePoolIterator(String prevKey) {
       super(prevKey);
       super(prevKey);
     }
     }
 
 
     @Override
     @Override
-    public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
+    public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
         throws IOException {
         throws IOException {
       try {
       try {
-        return new BatchedCachePoolInfo(
+        return new BatchedCachePoolEntries(
             rpcProxy.listCachePools(null, 
             rpcProxy.listCachePools(null, 
               ListCachePoolsRequestProto.newBuilder().
               ListCachePoolsRequestProto.newBuilder().
                 setPrevPoolName(prevKey).build()));
                 setPrevPoolName(prevKey).build()));
@@ -1187,13 +1188,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
     }
 
 
     @Override
     @Override
-    public String elementToPrevKey(CachePoolInfo element) {
-      return element.getPoolName();
+    public String elementToPrevKey(CachePoolEntry entry) {
+      return entry.getInfo().getPoolName();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
+  public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
       throws IOException {
     return new CachePoolIterator(prevKey);
     return new CachePoolIterator(prevKey);
   }
   }

+ 59 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -39,7 +39,9 @@ import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -60,8 +62,11 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
@@ -1698,6 +1703,9 @@ public class PBHelper {
     if (info.getPool() != null) {
     if (info.getPool() != null) {
       builder.setPool(info.getPool());
       builder.setPool(info.getPool());
     }
     }
+    if (info.getExpiration() != null) {
+      builder.setExpiration(convert(info.getExpiration()));
+    }
     return builder.build();
     return builder.build();
   }
   }
 
 
@@ -1718,15 +1726,35 @@ public class PBHelper {
     if (proto.hasPool()) {
     if (proto.hasPool()) {
       builder.setPool(proto.getPool());
       builder.setPool(proto.getPool());
     }
     }
+    if (proto.hasExpiration()) {
+      builder.setExpiration(convert(proto.getExpiration()));
+    }
     return builder.build();
     return builder.build();
   }
   }
-  
+
+  public static CacheDirectiveInfoExpirationProto convert(
+      CacheDirectiveInfo.Expiration expiration) {
+    return CacheDirectiveInfoExpirationProto.newBuilder()
+        .setIsRelative(expiration.isRelative())
+        .setMillis(expiration.getMillis())
+        .build();
+  }
+
+  public static CacheDirectiveInfo.Expiration convert(
+      CacheDirectiveInfoExpirationProto proto) {
+    if (proto.getIsRelative()) {
+      return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis());
+    }
+    return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis());
+  }
+
   public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
   public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
     CacheDirectiveStatsProto.Builder builder = 
     CacheDirectiveStatsProto.Builder builder = 
         CacheDirectiveStatsProto.newBuilder();
         CacheDirectiveStatsProto.newBuilder();
     builder.setBytesNeeded(stats.getBytesNeeded());
     builder.setBytesNeeded(stats.getBytesNeeded());
     builder.setBytesCached(stats.getBytesCached());
     builder.setBytesCached(stats.getBytesCached());
     builder.setFilesAffected(stats.getFilesAffected());
     builder.setFilesAffected(stats.getFilesAffected());
+    builder.setHasExpired(stats.hasExpired());
     return builder.build();
     return builder.build();
   }
   }
   
   
@@ -1735,6 +1763,7 @@ public class PBHelper {
     builder.setBytesNeeded(proto.getBytesNeeded());
     builder.setBytesNeeded(proto.getBytesNeeded());
     builder.setBytesCached(proto.getBytesCached());
     builder.setBytesCached(proto.getBytesCached());
     builder.setFilesAffected(proto.getFilesAffected());
     builder.setFilesAffected(proto.getFilesAffected());
+    builder.setHasExpired(proto.getHasExpired());
     return builder.build();
     return builder.build();
   }
   }
 
 
@@ -1789,6 +1818,35 @@ public class PBHelper {
     return info;
     return info;
   }
   }
 
 
+  public static CachePoolStatsProto convert(CachePoolStats stats) {
+    CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
+    builder.setBytesNeeded(stats.getBytesNeeded());
+    builder.setBytesCached(stats.getBytesCached());
+    builder.setFilesAffected(stats.getFilesAffected());
+    return builder.build();
+  }
+
+  public static CachePoolStats convert (CachePoolStatsProto proto) {
+    CachePoolStats.Builder builder = new CachePoolStats.Builder();
+    builder.setBytesNeeded(proto.getBytesNeeded());
+    builder.setBytesCached(proto.getBytesCached());
+    builder.setFilesAffected(proto.getFilesAffected());
+    return builder.build();
+  }
+
+  public static CachePoolEntryProto convert(CachePoolEntry entry) {
+    CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
+    builder.setInfo(PBHelper.convert(entry.getInfo()));
+    builder.setStats(PBHelper.convert(entry.getStats()));
+    return builder.build();
+  }
+
+  public static CachePoolEntry convert (CachePoolEntryProto proto) {
+    CachePoolInfo info = PBHelper.convert(proto.getInfo());
+    CachePoolStats stats = PBHelper.convert(proto.getStats());
+    return new CachePoolEntry(info, stats);
+  }
+  
   public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
   public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
     return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
     return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java

@@ -150,5 +150,5 @@ interface AsyncLogger {
    * Append an HTML-formatted report for this logger's status to the provided
    * Append an HTML-formatted report for this logger's status to the provided
    * StringBuilder. This is displayed on the NN web UI.
    * StringBuilder. This is displayed on the NN web UI.
    */
    */
-  public void appendHtmlReport(StringBuilder sb);
+  public void appendReport(StringBuilder sb);
 }
 }

+ 9 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRe
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.jasper.compiler.JspUtil;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
@@ -177,17 +176,16 @@ class AsyncLoggerSet {
    * state of the underlying loggers.
    * state of the underlying loggers.
    * @param sb the StringBuilder to append to
    * @param sb the StringBuilder to append to
    */
    */
-  void appendHtmlReport(StringBuilder sb) {
-    sb.append("<table class=\"storage\">");
-    sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
-    for (AsyncLogger l : loggers) {
-      sb.append("<tr>");
-      sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
-      sb.append("<td>");
-      l.appendHtmlReport(sb);
-      sb.append("</td></tr>\n");
+  void appendReport(StringBuilder sb) {
+    for (int i = 0, len = loggers.size(); i < len; ++i) {
+      AsyncLogger l = loggers.get(i);
+      if (i != 0) {
+        sb.append(", ");
+      }
+      sb.append(l).append(" (");
+      l.appendReport(sb);
+      sb.append(")");
     }
     }
-    sb.append("</table>");
   }
   }
 
 
   /**
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java

@@ -569,7 +569,7 @@ public class IPCLoggerChannel implements AsyncLogger {
   }
   }
 
 
   @Override
   @Override
-  public synchronized void appendHtmlReport(StringBuilder sb) {
+  public synchronized void appendReport(StringBuilder sb) {
     sb.append("Written txid ").append(highestAckedTxId);
     sb.append("Written txid ").append(highestAckedTxId);
     long behind = getLagTxns();
     long behind = getLagTxns();
     if (behind > 0) {
     if (behind > 0) {

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -87,6 +88,7 @@ public class QuorumJournalManager implements JournalManager {
   private final AsyncLoggerSet loggers;
   private final AsyncLoggerSet loggers;
 
 
   private int outputBufferCapacity = 512 * 1024;
   private int outputBufferCapacity = 512 * 1024;
+  private final URLConnectionFactory connectionFactory;
   
   
   public QuorumJournalManager(Configuration conf,
   public QuorumJournalManager(Configuration conf,
       URI uri, NamespaceInfo nsInfo) throws IOException {
       URI uri, NamespaceInfo nsInfo) throws IOException {
@@ -102,6 +104,8 @@ public class QuorumJournalManager implements JournalManager {
     this.uri = uri;
     this.uri = uri;
     this.nsInfo = nsInfo;
     this.nsInfo = nsInfo;
     this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
     this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
+    this.connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
 
 
     // Configure timeouts.
     // Configure timeouts.
     this.startSegmentTimeoutMs = conf.getInt(
     this.startSegmentTimeoutMs = conf.getInt(
@@ -475,8 +479,8 @@ public class QuorumJournalManager implements JournalManager {
         URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
         URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
 
 
         EditLogInputStream elis = EditLogFileInputStream.fromUrl(
         EditLogInputStream elis = EditLogFileInputStream.fromUrl(
-            url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
-            remoteLog.isInProgress());
+            connectionFactory, url, remoteLog.getStartTxId(),
+            remoteLog.getEndTxId(), remoteLog.isInProgress());
         allStreams.add(elis);
         allStreams.add(elis);
       }
       }
     }
     }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java

@@ -114,10 +114,10 @@ class QuorumOutputStream extends EditLogOutputStream {
   }
   }
 
 
   @Override
   @Override
-  public String generateHtmlReport() {
+  public String generateReport() {
     StringBuilder sb = new StringBuilder();
     StringBuilder sb = new StringBuilder();
-    sb.append("Writing segment beginning at txid " + segmentTxId + "<br/>\n");
-    loggers.appendHtmlReport(sb);
+    sb.append("Writing segment beginning at txid " + segmentTxId + ". \n");
+    loggers.appendReport(sb);
     return sb.toString();
     return sb.toString();
   }
   }
   
   

+ 12 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java

@@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNE
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
 
 
@@ -69,8 +71,15 @@ public class JournalNodeHttpServer {
         bindAddr.getHostName()));
         bindAddr.getHostName()));
 
 
     int tmpInfoPort = bindAddr.getPort();
     int tmpInfoPort = bindAddr.getPort();
+    URI httpEndpoint;
+    try {
+      httpEndpoint = new URI("http://" + NetUtils.getHostPortString(bindAddr));
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+
     httpServer = new HttpServer.Builder().setName("journal")
     httpServer = new HttpServer.Builder().setName("journal")
-        .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort)
+        .addEndpoint(httpEndpoint)
         .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
         .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
@@ -85,7 +94,7 @@ public class JournalNodeHttpServer {
     httpServer.start();
     httpServer.start();
 
 
     // The web-server port can be ephemeral... ensure we have the correct info
     // The web-server port can be ephemeral... ensure we have the correct info
-    infoPort = httpServer.getPort();
+    infoPort = httpServer.getConnectorAddress(0).getPort();
 
 
     LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
     LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
   }
   }
@@ -104,7 +113,7 @@ public class JournalNodeHttpServer {
    * Return the actual address bound to by the running server.
    * Return the actual address bound to by the running server.
    */
    */
   public InetSocketAddress getAddress() {
   public InetSocketAddress getAddress() {
-    InetSocketAddress addr = httpServer.getListenerAddress();
+    InetSocketAddress addr = httpServer.getConnectorAddress(0);
     assert addr.getPort() != 0;
     assert addr.getPort() != 0;
     return addr;
     return addr;
   }
   }

+ 21 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collection;
+import java.util.Date;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
@@ -208,17 +209,27 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
   /**
   /**
    * Scan all CacheDirectives.  Use the information to figure out
    * Scan all CacheDirectives.  Use the information to figure out
    * what cache replication factor each block should have.
    * what cache replication factor each block should have.
-   *
-   * @param mark       Whether the current scan is setting or clearing the mark
    */
    */
   private void rescanCacheDirectives() {
   private void rescanCacheDirectives() {
     FSDirectory fsDir = namesystem.getFSDirectory();
     FSDirectory fsDir = namesystem.getFSDirectory();
-    for (CacheDirective pce : cacheManager.getEntriesById().values()) {
+    final long now = new Date().getTime();
+    for (CacheDirective directive : cacheManager.getEntriesById().values()) {
+      // Reset the directive
+      directive.clearBytesNeeded();
+      directive.clearBytesCached();
+      directive.clearFilesAffected();
+      // Skip processing this entry if it has expired
+      LOG.info("Directive expiry is at " + directive.getExpiryTime());
+      if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Skipping directive id " + directive.getId()
+              + " because it has expired (" + directive.getExpiryTime() + ">="
+              + now);
+        }
+        continue;
+      }
       scannedDirectives++;
       scannedDirectives++;
-      pce.clearBytesNeeded();
-      pce.clearBytesCached();
-      pce.clearFilesAffected();
-      String path = pce.getPath();
+      String path = directive.getPath();
       INode node;
       INode node;
       try {
       try {
         node = fsDir.getINode(path);
         node = fsDir.getINode(path);
@@ -235,11 +246,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
         ReadOnlyList<INode> children = dir.getChildrenList(null);
         ReadOnlyList<INode> children = dir.getChildrenList(null);
         for (INode child : children) {
         for (INode child : children) {
           if (child.isFile()) {
           if (child.isFile()) {
-            rescanFile(pce, child.asFile());
+            rescanFile(directive, child.asFile());
           }
           }
         }
         }
       } else if (node.isFile()) {
       } else if (node.isFile()) {
-        rescanFile(pce, node.asFile());
+        rescanFile(directive, node.asFile());
       } else {
       } else {
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Ignoring non-directory, non-file inode " + node +
           LOG.debug("Ignoring non-directory, non-file inode " + node +
@@ -301,7 +312,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
     pce.addBytesNeeded(neededTotal);
     pce.addBytesNeeded(neededTotal);
     pce.addBytesCached(cachedTotal);
     pce.addBytesCached(cachedTotal);
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
-      LOG.debug("Directive " + pce.getEntryId() + " is caching " +
+      LOG.debug("Directive " + pce.getId() + " is caching " +
           file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
           file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
     }
     }
   }
   }

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java

@@ -42,6 +42,12 @@ public interface DatanodeStatistics {
 
 
   /** @return the percentage of the block pool used space over the total capacity. */
   /** @return the percentage of the block pool used space over the total capacity. */
   public float getPercentBlockPoolUsed();
   public float getPercentBlockPoolUsed();
+  
+  /** @return the total cache capacity of all DataNodes */
+  public long getCacheCapacity();
+
+  /** @return the total cache used by all DataNodes */
+  public long getCacheUsed();
 
 
   /** @return the xceiver count */
   /** @return the xceiver count */
   public int getXceiverCount();
   public int getXceiverCount();

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -149,6 +149,17 @@ class HeartbeatManager implements DatanodeStatistics {
   public synchronized int getXceiverCount() {
   public synchronized int getXceiverCount() {
     return stats.xceiverCount;
     return stats.xceiverCount;
   }
   }
+  
+  @Override
+  public synchronized long getCacheCapacity() {
+    return stats.cacheCapacity;
+  }
+
+  @Override
+  public synchronized long getCacheUsed() {
+    return stats.cacheUsed;
+  }
+  
 
 
   @Override
   @Override
   public synchronized long[] getStats() {
   public synchronized long[] getStats() {
@@ -309,6 +320,8 @@ class HeartbeatManager implements DatanodeStatistics {
     private long capacityRemaining = 0L;
     private long capacityRemaining = 0L;
     private long blockPoolUsed = 0L;
     private long blockPoolUsed = 0L;
     private int xceiverCount = 0;
     private int xceiverCount = 0;
+    private long cacheCapacity = 0L;
+    private long cacheUsed = 0L;
 
 
     private int expiredHeartbeats = 0;
     private int expiredHeartbeats = 0;
 
 
@@ -322,6 +335,8 @@ class HeartbeatManager implements DatanodeStatistics {
       } else {
       } else {
         capacityTotal += node.getDfsUsed();
         capacityTotal += node.getDfsUsed();
       }
       }
+      cacheCapacity += node.getCacheCapacity();
+      cacheUsed += node.getCacheUsed();
     }
     }
 
 
     private void subtract(final DatanodeDescriptor node) {
     private void subtract(final DatanodeDescriptor node) {
@@ -334,6 +349,8 @@ class HeartbeatManager implements DatanodeStatistics {
       } else {
       } else {
         capacityTotal -= node.getDfsUsed();
         capacityTotal -= node.getDfsUsed();
       }
       }
+      cacheCapacity -= node.getCacheCapacity();
+      cacheUsed -= node.getCacheUsed();
     }
     }
     
     
     /** Increment expired heartbeat counter. */
     /** Increment expired heartbeat counter. */

+ 20 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -52,6 +52,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 
 import javax.management.ObjectName;
 import javax.management.ObjectName;
 
 
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -234,6 +235,7 @@ public class DataNode extends Configured
   private volatile boolean heartbeatsDisabledForTests = false;
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
   private HttpServer infoServer = null;
+  private int infoPort;
   private int infoSecurePort;
   private int infoSecurePort;
   DataNodeMetrics metrics;
   DataNodeMetrics metrics;
   private InetSocketAddress streamingAddr;
   private InetSocketAddress streamingAddr;
@@ -354,27 +356,33 @@ public class DataNode extends Configured
     String infoHost = infoSocAddr.getHostName();
     String infoHost = infoSocAddr.getHostName();
     int tmpInfoPort = infoSocAddr.getPort();
     int tmpInfoPort = infoSocAddr.getPort();
     HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
     HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
-        .setBindAddress(infoHost).setPort(tmpInfoPort)
+        .addEndpoint(URI.create("http://" + NetUtils.getHostPortString(infoSocAddr)))
         .setFindPort(tmpInfoPort == 0).setConf(conf)
         .setFindPort(tmpInfoPort == 0).setConf(conf)
         .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
         .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
-    this.infoServer = (secureResources == null) ? builder.build() :
-        builder.setConnector(secureResources.getListener()).build();
 
 
     LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
     LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
     if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
     if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
-      boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-                                               DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
           DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
           DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
-      Configuration sslConf = new HdfsConfiguration(false);
-      sslConf.addResource(conf.get(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-          "ssl-server.xml"));
-      this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
+      builder.addEndpoint(URI.create("https://"
+          + NetUtils.getHostPortString(secInfoSocAddr)));
+      Configuration sslConf = new Configuration(false);
+      sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
+          .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+              DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
+      sslConf.addResource(conf.get(
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+      DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
+
       if(LOG.isDebugEnabled()) {
       if(LOG.isDebugEnabled()) {
         LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
         LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
       }
       }
       infoSecurePort = secInfoSocAddr.getPort();
       infoSecurePort = secInfoSocAddr.getPort();
     }
     }
+
+    this.infoServer = (secureResources == null) ? builder.build() :
+      builder.setConnector(secureResources.getListener()).build();
     this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
     this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
     this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
     this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
         FileChecksumServlets.GetServlet.class);
         FileChecksumServlets.GetServlet.class);
@@ -390,6 +398,7 @@ public class DataNode extends Configured
           WebHdfsFileSystem.PATH_PREFIX + "/*");
           WebHdfsFileSystem.PATH_PREFIX + "/*");
     }
     }
     this.infoServer.start();
     this.infoServer.start();
+    this.infoPort = infoServer.getConnectorAddress(0).getPort();
   }
   }
   
   
   private void startPlugins(Configuration conf) {
   private void startPlugins(Configuration conf) {
@@ -712,7 +721,7 @@ public class DataNode extends Configured
     this.dnConf = new DNConf(conf);
     this.dnConf = new DNConf(conf);
 
 
     if (dnConf.maxLockedMemory > 0) {
     if (dnConf.maxLockedMemory > 0) {
-      if (!NativeIO.isAvailable()) {
+      if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
         throw new RuntimeException(String.format(
         throw new RuntimeException(String.format(
             "Cannot start datanode because the configured max locked memory" +
             "Cannot start datanode because the configured max locked memory" +
             " size (%s) is greater than zero and native code is not available.",
             " size (%s) is greater than zero and native code is not available.",
@@ -2320,7 +2329,7 @@ public class DataNode extends Configured
    * @return the datanode's http port
    * @return the datanode's http port
    */
    */
   public int getInfoPort() {
   public int getInfoPort() {
-    return infoServer.getPort();
+    return infoPort;
   }
   }
 
 
   /**
   /**

+ 61 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -310,7 +310,16 @@ public class DataStorage extends Storage {
   @Override
   @Override
   protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
   protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
       throws IOException {
       throws IOException {
-    setLayoutVersion(props, sd);
+    setFieldsFromProperties(props, sd, false, 0);
+  }
+
+  private void setFieldsFromProperties(Properties props, StorageDirectory sd,
+      boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
+    if (overrideLayoutVersion) {
+      this.layoutVersion = toLayoutVersion;
+    } else {
+      setLayoutVersion(props, sd);
+    }
     setcTime(props, sd);
     setcTime(props, sd);
     setStorageType(props, sd);
     setStorageType(props, sd);
     setClusterId(props, layoutVersion, sd);
     setClusterId(props, layoutVersion, sd);
@@ -374,13 +383,20 @@ public class DataStorage extends Storage {
     return true;
     return true;
   }
   }
   
   
+  /** Read VERSION file for rollback */
+  void readProperties(StorageDirectory sd, int rollbackLayoutVersion)
+      throws IOException {
+    Properties props = readPropertiesFile(sd.getVersionFile());
+    setFieldsFromProperties(props, sd, true, rollbackLayoutVersion);
+  }
+
   /**
   /**
    * Analize which and whether a transition of the fs state is required
    * Analize which and whether a transition of the fs state is required
    * and perform it if necessary.
    * and perform it if necessary.
    * 
    * 
-   * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime
-   * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime
-   * Regular startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
+   * Rollback if the rollback startup option was specified.
+   * Upgrade if this.LV > LAYOUT_VERSION
+   * Regular startup if this.LV = LAYOUT_VERSION
    * 
    * 
    * @param datanode Datanode to which this storage belongs to
    * @param datanode Datanode to which this storage belongs to
    * @param sd  storage directory
    * @param sd  storage directory
@@ -420,25 +436,28 @@ public class DataStorage extends Storage {
           + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID());
           + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID());
     }
     }
     
     
-    // regular start up
-    if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION 
-        && this.cTime == nsInfo.getCTime())
+    // After addition of the federation feature, ctime check is only 
+    // meaningful at BlockPoolSliceStorage level. 
+
+    // regular start up. 
+    if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION)
       return; // regular startup
       return; // regular startup
     
     
     // do upgrade
     // do upgrade
-    if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
-        || this.cTime < nsInfo.getCTime()) {
+    if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION) {
       doUpgrade(sd, nsInfo);  // upgrade
       doUpgrade(sd, nsInfo);  // upgrade
       return;
       return;
     }
     }
     
     
-    // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
-    // must shutdown
-    throw new IOException("Datanode state: LV = " + this.getLayoutVersion() 
-                          + " CTime = " + this.getCTime() 
-                          + " is newer than the namespace state: LV = "
-                          + nsInfo.getLayoutVersion() 
-                          + " CTime = " + nsInfo.getCTime());
+    // layoutVersion < LAYOUT_VERSION. I.e. stored layout version is newer
+    // than the version supported by datanode. This should have been caught
+    // in readProperties(), even if rollback was not carried out or somehow
+    // failed.
+    throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
+                          + " is newer than the supported LV = "
+                          + HdfsConstants.LAYOUT_VERSION
+                          + " or name node LV = "
+                          + nsInfo.getLayoutVersion());
   }
   }
 
 
   /**
   /**
@@ -464,8 +483,13 @@ public class DataStorage extends Storage {
    * @throws IOException on error
    * @throws IOException on error
    */
    */
   void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
   void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
+    // If the existing on-disk layout version supportes federation, simply
+    // update its layout version.
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
-      clusterID = nsInfo.getClusterID();
+      // The VERSION file is already read in. Override the layoutVersion 
+      // field and overwrite the file.
+      LOG.info("Updating layout version from " + layoutVersion + " to "
+          + nsInfo.getLayoutVersion() + " for storage " + sd.getRoot());
       layoutVersion = nsInfo.getLayoutVersion();
       layoutVersion = nsInfo.getLayoutVersion();
       writeProperties(sd);
       writeProperties(sd);
       return;
       return;
@@ -550,15 +574,32 @@ public class DataStorage extends Storage {
    * <li> Remove removed.tmp </li>
    * <li> Remove removed.tmp </li>
    * </ol>
    * </ol>
    * 
    * 
-   * Do nothing, if previous directory does not exist.
+   * If previous directory does not exist and the current version supports
+   * federation, perform a simple rollback of layout version. This does not
+   * involve saving/restoration of actual data.
    */
    */
   void doRollback( StorageDirectory sd,
   void doRollback( StorageDirectory sd,
                    NamespaceInfo nsInfo
                    NamespaceInfo nsInfo
                    ) throws IOException {
                    ) throws IOException {
     File prevDir = sd.getPreviousDir();
     File prevDir = sd.getPreviousDir();
-    // regular startup if previous dir does not exist
-    if (!prevDir.exists())
+    // This is a regular startup or a post-federation rollback
+    if (!prevDir.exists()) {
+      // The current datanode version supports federation and the layout
+      // version from namenode matches what the datanode supports. An invalid
+      // rollback may happen if namenode didn't rollback and datanode is
+      // running a wrong version.  But this will be detected in block pool
+      // level and the invalid VERSION content will be overwritten when
+      // the error is corrected and rollback is retried.
+      if (LayoutVersion.supports(Feature.FEDERATION,
+          HdfsConstants.LAYOUT_VERSION) && 
+          HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()) {
+        readProperties(sd, nsInfo.getLayoutVersion());
+        writeProperties(sd);
+        LOG.info("Layout version rolled back to " +
+            nsInfo.getLayoutVersion() + " for storage " + sd.getRoot());
+      }
       return;
       return;
+    }
     DataStorage prevInfo = new DataStorage();
     DataStorage prevInfo = new DataStorage();
     prevInfo.readPreviousVersionProperties(sd);
     prevInfo.readPreviousVersionProperties(sd);
 
 

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java

@@ -145,6 +145,8 @@ public class FsDatasetCache {
    */
    */
   private final HashMap<Key, Value> mappableBlockMap = new HashMap<Key, Value>();
   private final HashMap<Key, Value> mappableBlockMap = new HashMap<Key, Value>();
 
 
+  private final AtomicLong numBlocksCached = new AtomicLong(0);
+
   private final FsDatasetImpl dataset;
   private final FsDatasetImpl dataset;
 
 
   private final ThreadPoolExecutor uncachingExecutor;
   private final ThreadPoolExecutor uncachingExecutor;
@@ -417,6 +419,7 @@ public class FsDatasetCache {
           LOG.debug("Successfully cached block " + key.id + " in " + key.bpid +
           LOG.debug("Successfully cached block " + key.id + " in " + key.bpid +
               ".  We are now caching " + newUsedBytes + " bytes in total.");
               ".  We are now caching " + newUsedBytes + " bytes in total.");
         }
         }
+        numBlocksCached.addAndGet(1);
         success = true;
         success = true;
       } finally {
       } finally {
         if (!success) {
         if (!success) {
@@ -465,6 +468,7 @@ public class FsDatasetCache {
       }
       }
       long newUsedBytes =
       long newUsedBytes =
           usedBytesCount.release(value.mappableBlock.getLength());
           usedBytesCount.release(value.mappableBlock.getLength());
+      numBlocksCached.addAndGet(-1);
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Uncaching of block " + key.id + " in " + key.bpid +
         LOG.debug("Uncaching of block " + key.id + " in " + key.bpid +
             " completed.  usedBytes = " + newUsedBytes);
             " completed.  usedBytes = " + newUsedBytes);
@@ -477,14 +481,14 @@ public class FsDatasetCache {
   /**
   /**
    * Get the approximate amount of cache space used.
    * Get the approximate amount of cache space used.
    */
    */
-  public long getDnCacheUsed() {
+  public long getCacheUsed() {
     return usedBytesCount.get();
     return usedBytesCount.get();
   }
   }
 
 
   /**
   /**
    * Get the maximum amount of bytes we can cache.  This is a constant.
    * Get the maximum amount of bytes we can cache.  This is a constant.
    */
    */
-  public long getDnCacheCapacity() {
+  public long getCacheCapacity() {
     return maxBytes;
     return maxBytes;
   }
   }
 
 
@@ -496,4 +500,7 @@ public class FsDatasetCache {
     return numBlocksFailedToUncache.get();
     return numBlocksFailedToUncache.get();
   }
   }
 
 
+  public long getNumBlocksCached() {
+    return numBlocksCached.get();
+  }
 }
 }

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -341,12 +341,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
 
   @Override // FSDatasetMBean
   @Override // FSDatasetMBean
   public long getCacheUsed() {
   public long getCacheUsed() {
-    return cacheManager.getDnCacheUsed();
+    return cacheManager.getCacheUsed();
   }
   }
 
 
   @Override // FSDatasetMBean
   @Override // FSDatasetMBean
   public long getCacheCapacity() {
   public long getCacheCapacity() {
-    return cacheManager.getDnCacheCapacity();
+    return cacheManager.getCacheCapacity();
   }
   }
 
 
   @Override // FSDatasetMBean
   @Override // FSDatasetMBean
@@ -359,6 +359,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     return cacheManager.getNumBlocksFailedToUncache();
     return cacheManager.getNumBlocksFailedToUncache();
   }
   }
 
 
+  @Override // FSDatasetMBean
+  public long getNumBlocksCached() {
+    return cacheManager.getNumBlocksCached();
+  }
+
   /**
   /**
    * Find the block's on-disk length
    * Find the block's on-disk length
    */
    */

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java

@@ -88,6 +88,11 @@ public interface FSDatasetMBean {
    */
    */
   public long getCacheCapacity();
   public long getCacheCapacity();
 
 
+  /**
+   * Returns the number of blocks cached.
+   */
+  public long getNumBlocksCached();
+
   /**
   /**
    * Returns the number of blocks that the datanode was unable to cache
    * Returns the number of blocks that the datanode was unable to cache
    */
    */

+ 180 - 146
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -17,12 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
 
 
@@ -43,17 +43,18 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.CacheDirective;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
-import org.apache.hadoop.hdfs.protocol.CacheDirective;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
@@ -99,24 +100,24 @@ public final class CacheManager {
   private final BlockManager blockManager;
   private final BlockManager blockManager;
 
 
   /**
   /**
-   * Cache entries, sorted by ID.
+   * Cache directives, sorted by ID.
    *
    *
    * listCacheDirectives relies on the ordering of elements in this map
    * listCacheDirectives relies on the ordering of elements in this map
    * to track what has already been listed by the client.
    * to track what has already been listed by the client.
    */
    */
-  private final TreeMap<Long, CacheDirective> entriesById =
+  private final TreeMap<Long, CacheDirective> directivesById =
       new TreeMap<Long, CacheDirective>();
       new TreeMap<Long, CacheDirective>();
 
 
   /**
   /**
-   * The entry ID to use for a new entry.  Entry IDs always increase, and are
+   * The directive ID to use for a new directive.  IDs always increase, and are
    * never reused.
    * never reused.
    */
    */
-  private long nextEntryId;
+  private long nextDirectiveId;
 
 
   /**
   /**
-   * Cache entries, sorted by path
+   * Cache directives, sorted by path
    */
    */
-  private final TreeMap<String, List<CacheDirective>> entriesByPath =
+  private final TreeMap<String, List<CacheDirective>> directivesByPath =
       new TreeMap<String, List<CacheDirective>>();
       new TreeMap<String, List<CacheDirective>>();
 
 
   /**
   /**
@@ -177,7 +178,7 @@ public final class CacheManager {
       BlockManager blockManager) {
       BlockManager blockManager) {
     this.namesystem = namesystem;
     this.namesystem = namesystem;
     this.blockManager = blockManager;
     this.blockManager = blockManager;
-    this.nextEntryId = 1;
+    this.nextDirectiveId = 1;
     this.maxListCachePoolsResponses = conf.getInt(
     this.maxListCachePoolsResponses = conf.getInt(
         DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
         DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
         DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
         DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
@@ -239,7 +240,7 @@ public final class CacheManager {
 
 
   public TreeMap<Long, CacheDirective> getEntriesById() {
   public TreeMap<Long, CacheDirective> getEntriesById() {
     assert namesystem.hasReadLock();
     assert namesystem.hasReadLock();
-    return entriesById;
+    return directivesById;
   }
   }
   
   
   @VisibleForTesting
   @VisibleForTesting
@@ -248,12 +249,12 @@ public final class CacheManager {
     return cachedBlocks;
     return cachedBlocks;
   }
   }
 
 
-  private long getNextEntryId() throws IOException {
+  private long getNextDirectiveId() throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
-    if (nextEntryId >= Long.MAX_VALUE - 1) {
+    if (nextDirectiveId >= Long.MAX_VALUE - 1) {
       throw new IOException("No more available IDs.");
       throw new IOException("No more available IDs.");
     }
     }
-    return nextEntryId++;
+    return nextDirectiveId++;
   }
   }
 
 
   // Helper getter / validation methods
   // Helper getter / validation methods
@@ -301,7 +302,35 @@ public final class CacheManager {
   }
   }
 
 
   /**
   /**
-   * Get a CacheDirective by ID, validating the ID and that the entry
+   * Calculates the absolute expiry time of the directive from the
+   * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
+   * into an absolute time based on the local clock.
+   * 
+   * @param directive from which to get the expiry time
+   * @param defaultValue to use if Expiration is not set
+   * @return Absolute expiry time in milliseconds since Unix epoch
+   * @throws InvalidRequestException if the Expiration is invalid
+   */
+  private static long validateExpiryTime(CacheDirectiveInfo directive,
+      long defaultValue) throws InvalidRequestException {
+    long expiryTime;
+    CacheDirectiveInfo.Expiration expiration = directive.getExpiration();
+    if (expiration != null) {
+      if (expiration.getMillis() < 0) {
+        throw new InvalidRequestException("Cannot set a negative expiration: "
+            + expiration.getMillis());
+      }
+      // Converts a relative duration into an absolute time based on the local
+      // clock
+      expiryTime = expiration.getAbsoluteMillis();
+    } else {
+      expiryTime = defaultValue;
+    }
+    return expiryTime;
+  }
+
+  /**
+   * Get a CacheDirective by ID, validating the ID and that the directive
    * exists.
    * exists.
    */
    */
   private CacheDirective getById(long id) throws InvalidRequestException {
   private CacheDirective getById(long id) throws InvalidRequestException {
@@ -309,13 +338,13 @@ public final class CacheManager {
     if (id <= 0) {
     if (id <= 0) {
       throw new InvalidRequestException("Invalid negative ID.");
       throw new InvalidRequestException("Invalid negative ID.");
     }
     }
-    // Find the entry.
-    CacheDirective entry = entriesById.get(id);
-    if (entry == null) {
+    // Find the directive.
+    CacheDirective directive = directivesById.get(id);
+    if (directive == null) {
       throw new InvalidRequestException("No directive with ID " + id
       throw new InvalidRequestException("No directive with ID " + id
           + " found.");
           + " found.");
     }
     }
-    return entry;
+    return directive;
   }
   }
 
 
   /**
   /**
@@ -332,122 +361,134 @@ public final class CacheManager {
 
 
   // RPC handlers
   // RPC handlers
 
 
-  private void addInternal(CacheDirective entry) {
-    entriesById.put(entry.getEntryId(), entry);
-    String path = entry.getPath();
-    List<CacheDirective> entryList = entriesByPath.get(path);
-    if (entryList == null) {
-      entryList = new ArrayList<CacheDirective>(1);
-      entriesByPath.put(path, entryList);
+  private void addInternal(CacheDirective directive, CachePool pool) {
+    boolean addedDirective = pool.getDirectiveList().add(directive);
+    assert addedDirective;
+    directivesById.put(directive.getId(), directive);
+    String path = directive.getPath();
+    List<CacheDirective> directives = directivesByPath.get(path);
+    if (directives == null) {
+      directives = new ArrayList<CacheDirective>(1);
+      directivesByPath.put(path, directives);
+    }
+    directives.add(directive);
+  }
+
+  /**
+   * To be called only from the edit log loading code
+   */
+  CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
+      throws InvalidRequestException {
+    long id = directive.getId();
+    CacheDirective entry =
+        new CacheDirective(
+            directive.getId(),
+            directive.getPath().toUri().getPath(),
+            directive.getReplication(),
+            directive.getExpiration().getAbsoluteMillis());
+    CachePool pool = cachePools.get(directive.getPool());
+    addInternal(entry, pool);
+    if (nextDirectiveId <= id) {
+      nextDirectiveId = id + 1;
     }
     }
-    entryList.add(entry);
+    return entry.toInfo();
   }
   }
 
 
   public CacheDirectiveInfo addDirective(
   public CacheDirectiveInfo addDirective(
-      CacheDirectiveInfo directive, FSPermissionChecker pc)
+      CacheDirectiveInfo info, FSPermissionChecker pc)
       throws IOException {
       throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
-    CacheDirective entry;
+    CacheDirective directive;
     try {
     try {
-      CachePool pool = getCachePool(validatePoolName(directive));
+      CachePool pool = getCachePool(validatePoolName(info));
       checkWritePermission(pc, pool);
       checkWritePermission(pc, pool);
-      String path = validatePath(directive);
-      short replication = validateReplication(directive, (short)1);
-      long id;
-      if (directive.getId() != null) {
-        // We are loading an entry from the edit log.
-        // Use the ID from the edit log.
-        id = directive.getId();
-        if (id <= 0) {
-          throw new InvalidRequestException("can't add an ID " +
-              "of " + id + ": it is not positive.");
-        }
-        if (id >= Long.MAX_VALUE) {
-          throw new InvalidRequestException("can't add an ID " +
-              "of " + id + ": it is too big.");
-        }
-        if (nextEntryId <= id) {
-          nextEntryId = id + 1;
-        }
-      } else {
-        // Add a new entry with the next available ID.
-        id = getNextEntryId();
-      }
-      entry = new CacheDirective(id, path, replication, pool);
-      addInternal(entry);
+      String path = validatePath(info);
+      short replication = validateReplication(info, (short)1);
+      long expiryTime = validateExpiryTime(info,
+          CacheDirectiveInfo.Expiration.EXPIRY_NEVER);
+      // All validation passed
+      // Add a new entry with the next available ID.
+      long id = getNextDirectiveId();
+      directive = new CacheDirective(id, path, replication, expiryTime);
+      addInternal(directive, pool);
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.warn("addDirective of " + directive + " failed: ", e);
+      LOG.warn("addDirective of " + info + " failed: ", e);
       throw e;
       throw e;
     }
     }
-    LOG.info("addDirective of " + directive + " successful.");
+    LOG.info("addDirective of " + info + " successful.");
     if (monitor != null) {
     if (monitor != null) {
       monitor.kick();
       monitor.kick();
     }
     }
-    return entry.toDirective();
+    return directive.toInfo();
   }
   }
 
 
-  public void modifyDirective(CacheDirectiveInfo directive,
+  public void modifyDirective(CacheDirectiveInfo info,
       FSPermissionChecker pc) throws IOException {
       FSPermissionChecker pc) throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     String idString =
     String idString =
-        (directive.getId() == null) ?
-            "(null)" : directive.getId().toString();
+        (info.getId() == null) ?
+            "(null)" : info.getId().toString();
     try {
     try {
       // Check for invalid IDs.
       // Check for invalid IDs.
-      Long id = directive.getId();
+      Long id = info.getId();
       if (id == null) {
       if (id == null) {
         throw new InvalidRequestException("Must supply an ID.");
         throw new InvalidRequestException("Must supply an ID.");
       }
       }
       CacheDirective prevEntry = getById(id);
       CacheDirective prevEntry = getById(id);
       checkWritePermission(pc, prevEntry.getPool());
       checkWritePermission(pc, prevEntry.getPool());
       String path = prevEntry.getPath();
       String path = prevEntry.getPath();
-      if (directive.getPath() != null) {
-        path = validatePath(directive);
+      if (info.getPath() != null) {
+        path = validatePath(info);
       }
       }
+
       short replication = prevEntry.getReplication();
       short replication = prevEntry.getReplication();
-      if (directive.getReplication() != null) {
-        replication = validateReplication(directive, replication);
-      }
+      replication = validateReplication(info, replication);
+
+      long expiryTime = prevEntry.getExpiryTime();
+      expiryTime = validateExpiryTime(info, expiryTime);
+
       CachePool pool = prevEntry.getPool();
       CachePool pool = prevEntry.getPool();
-      if (directive.getPool() != null) {
-        pool = getCachePool(validatePoolName(directive));
+      if (info.getPool() != null) {
+        pool = getCachePool(validatePoolName(info));
         checkWritePermission(pc, pool);
         checkWritePermission(pc, pool);
       }
       }
       removeInternal(prevEntry);
       removeInternal(prevEntry);
       CacheDirective newEntry =
       CacheDirective newEntry =
-          new CacheDirective(id, path, replication, pool);
-      addInternal(newEntry);
+          new CacheDirective(id, path, replication, expiryTime);
+      addInternal(newEntry, pool);
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.warn("modifyDirective of " + idString + " failed: ", e);
       LOG.warn("modifyDirective of " + idString + " failed: ", e);
       throw e;
       throw e;
     }
     }
     LOG.info("modifyDirective of " + idString + " successfully applied " +
     LOG.info("modifyDirective of " + idString + " successfully applied " +
-        directive + ".");
+        info+ ".");
   }
   }
 
 
-  public void removeInternal(CacheDirective existing)
+  public void removeInternal(CacheDirective directive)
       throws InvalidRequestException {
       throws InvalidRequestException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
-    // Remove the corresponding entry in entriesByPath.
-    String path = existing.getPath();
-    List<CacheDirective> entries = entriesByPath.get(path);
-    if (entries == null || !entries.remove(existing)) {
+    // Remove the corresponding entry in directivesByPath.
+    String path = directive.getPath();
+    List<CacheDirective> directives = directivesByPath.get(path);
+    if (directives == null || !directives.remove(directive)) {
       throw new InvalidRequestException("Failed to locate entry " +
       throw new InvalidRequestException("Failed to locate entry " +
-          existing.getEntryId() + " by path " + existing.getPath());
+          directive.getId() + " by path " + directive.getPath());
     }
     }
-    if (entries.size() == 0) {
-      entriesByPath.remove(path);
+    if (directives.size() == 0) {
+      directivesByPath.remove(path);
     }
     }
-    entriesById.remove(existing.getEntryId());
+    directivesById.remove(directive.getId());
+    directive.getPool().getDirectiveList().remove(directive);
+    assert directive.getPool() == null;
   }
   }
 
 
   public void removeDirective(long id, FSPermissionChecker pc)
   public void removeDirective(long id, FSPermissionChecker pc)
       throws IOException {
       throws IOException {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     try {
     try {
-      CacheDirective existing = getById(id);
-      checkWritePermission(pc, existing.getPool());
-      removeInternal(existing);
+      CacheDirective directive = getById(id);
+      checkWritePermission(pc, directive.getPool());
+      removeInternal(directive);
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.warn("removeDirective of " + id + " failed: ", e);
       LOG.warn("removeDirective of " + id + " failed: ", e);
       throw e;
       throw e;
@@ -478,13 +519,13 @@ public final class CacheManager {
         new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
         new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
     int numReplies = 0;
     int numReplies = 0;
     SortedMap<Long, CacheDirective> tailMap =
     SortedMap<Long, CacheDirective> tailMap =
-      entriesById.tailMap(prevId + 1);
+      directivesById.tailMap(prevId + 1);
     for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
     for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
       if (numReplies >= maxListCacheDirectivesNumResponses) {
       if (numReplies >= maxListCacheDirectivesNumResponses) {
         return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
         return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
       }
       }
-      CacheDirective curEntry = cur.getValue();
-      CacheDirectiveInfo info = cur.getValue().toDirective();
+      CacheDirective curDirective = cur.getValue();
+      CacheDirectiveInfo info = cur.getValue().toInfo();
       if (filter.getPool() != null && 
       if (filter.getPool() != null && 
           !info.getPool().equals(filter.getPool())) {
           !info.getPool().equals(filter.getPool())) {
         continue;
         continue;
@@ -496,7 +537,7 @@ public final class CacheManager {
       boolean hasPermission = true;
       boolean hasPermission = true;
       if (pc != null) {
       if (pc != null) {
         try {
         try {
-          pc.checkPermission(curEntry.getPool(), FsAction.READ);
+          pc.checkPermission(curDirective.getPool(), FsAction.READ);
         } catch (AccessControlException e) {
         } catch (AccessControlException e) {
           hasPermission = false;
           hasPermission = false;
         }
         }
@@ -530,7 +571,7 @@ public final class CacheManager {
     pool = CachePool.createFromInfoAndDefaults(info);
     pool = CachePool.createFromInfoAndDefaults(info);
     cachePools.put(pool.getPoolName(), pool);
     cachePools.put(pool.getPoolName(), pool);
     LOG.info("Created new cache pool " + pool);
     LOG.info("Created new cache pool " + pool);
-    return pool.getInfo(null);
+    return pool.getInfo(true);
   }
   }
 
 
   /**
   /**
@@ -599,39 +640,34 @@ public final class CacheManager {
       throw new InvalidRequestException(
       throw new InvalidRequestException(
           "Cannot remove non-existent cache pool " + poolName);
           "Cannot remove non-existent cache pool " + poolName);
     }
     }
-    
-    // Remove entries using this pool
-    // TODO: could optimize this somewhat to avoid the need to iterate
-    // over all entries in entriesById
-    Iterator<Entry<Long, CacheDirective>> iter = 
-        entriesById.entrySet().iterator();
+    // Remove all directives in this pool.
+    Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
-      Entry<Long, CacheDirective> entry = iter.next();
-      if (entry.getValue().getPool() == pool) {
-        entriesByPath.remove(entry.getValue().getPath());
-        iter.remove();
-      }
+      CacheDirective directive = iter.next();
+      directivesByPath.remove(directive.getPath());
+      directivesById.remove(directive.getId());
+      iter.remove();
     }
     }
     if (monitor != null) {
     if (monitor != null) {
       monitor.kick();
       monitor.kick();
     }
     }
   }
   }
 
 
-  public BatchedListEntries<CachePoolInfo>
+  public BatchedListEntries<CachePoolEntry>
       listCachePools(FSPermissionChecker pc, String prevKey) {
       listCachePools(FSPermissionChecker pc, String prevKey) {
     assert namesystem.hasReadLock();
     assert namesystem.hasReadLock();
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
-    ArrayList<CachePoolInfo> results = 
-        new ArrayList<CachePoolInfo>(NUM_PRE_ALLOCATED_ENTRIES);
+    ArrayList<CachePoolEntry> results = 
+        new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
     SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
     SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
     int numListed = 0;
     int numListed = 0;
     for (Entry<String, CachePool> cur : tailMap.entrySet()) {
     for (Entry<String, CachePool> cur : tailMap.entrySet()) {
       if (numListed++ >= maxListCachePoolsResponses) {
       if (numListed++ >= maxListCachePoolsResponses) {
-        return new BatchedListEntries<CachePoolInfo>(results, true);
+        return new BatchedListEntries<CachePoolEntry>(results, true);
       }
       }
-      results.add(cur.getValue().getInfo(pc));
+      results.add(cur.getValue().getEntry(pc));
     }
     }
-    return new BatchedListEntries<CachePoolInfo>(results, false);
+    return new BatchedListEntries<CachePoolEntry>(results, false);
   }
   }
 
 
   public void setCachedLocations(LocatedBlock block) {
   public void setCachedLocations(LocatedBlock block) {
@@ -693,13 +729,6 @@ public final class CacheManager {
     for (Iterator<Long> iter = blockIds.iterator(); iter.hasNext(); ) {
     for (Iterator<Long> iter = blockIds.iterator(); iter.hasNext(); ) {
       Block block = new Block(iter.next());
       Block block = new Block(iter.next());
       BlockInfo blockInfo = blockManager.getStoredBlock(block);
       BlockInfo blockInfo = blockManager.getStoredBlock(block);
-      if (blockInfo.getGenerationStamp() < block.getGenerationStamp()) {
-        // The NameNode will eventually remove or update the out-of-date block.
-        // Until then, we pretend that it isn't cached.
-        LOG.warn("Genstamp in cache report disagrees with our genstamp for " +
-          block + ": expected genstamp " + blockInfo.getGenerationStamp());
-        continue;
-      }
       if (!blockInfo.isComplete()) {
       if (!blockInfo.isComplete()) {
         LOG.warn("Ignoring block id " + block.getBlockId() + ", because " +
         LOG.warn("Ignoring block id " + block.getBlockId() + ", because " +
             "it is in not complete yet.  It is in state " + 
             "it is in not complete yet.  It is in state " + 
@@ -743,9 +772,9 @@ public final class CacheManager {
    */
    */
   public void saveState(DataOutput out, String sdPath)
   public void saveState(DataOutput out, String sdPath)
       throws IOException {
       throws IOException {
-    out.writeLong(nextEntryId);
+    out.writeLong(nextDirectiveId);
     savePools(out, sdPath);
     savePools(out, sdPath);
-    saveEntries(out, sdPath);
+    saveDirectives(out, sdPath);
   }
   }
 
 
   /**
   /**
@@ -755,10 +784,10 @@ public final class CacheManager {
    * @throws IOException
    * @throws IOException
    */
    */
   public void loadState(DataInput in) throws IOException {
   public void loadState(DataInput in) throws IOException {
-    nextEntryId = in.readLong();
-    // pools need to be loaded first since entries point to their parent pool
+    nextDirectiveId = in.readLong();
+    // pools need to be loaded first since directives point to their parent pool
     loadPools(in);
     loadPools(in);
-    loadEntries(in);
+    loadDirectives(in);
   }
   }
 
 
   /**
   /**
@@ -773,7 +802,7 @@ public final class CacheManager {
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
     out.writeInt(cachePools.size());
     out.writeInt(cachePools.size());
     for (CachePool pool: cachePools.values()) {
     for (CachePool pool: cachePools.values()) {
-      pool.getInfo(null).writeTo(out);
+      pool.getInfo(true).writeTo(out);
       counter.increment();
       counter.increment();
     }
     }
     prog.endStep(Phase.SAVING_CHECKPOINT, step);
     prog.endStep(Phase.SAVING_CHECKPOINT, step);
@@ -782,19 +811,20 @@ public final class CacheManager {
   /*
   /*
    * Save cache entries to fsimage
    * Save cache entries to fsimage
    */
    */
-  private void saveEntries(DataOutput out, String sdPath)
+  private void saveDirectives(DataOutput out, String sdPath)
       throws IOException {
       throws IOException {
     StartupProgress prog = NameNode.getStartupProgress();
     StartupProgress prog = NameNode.getStartupProgress();
     Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
     Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
     prog.beginStep(Phase.SAVING_CHECKPOINT, step);
     prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-    prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size());
+    prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
     Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
-    out.writeInt(entriesById.size());
-    for (CacheDirective entry: entriesById.values()) {
-      out.writeLong(entry.getEntryId());
-      Text.writeString(out, entry.getPath());
-      out.writeShort(entry.getReplication());
-      Text.writeString(out, entry.getPool().getPoolName());
+    out.writeInt(directivesById.size());
+    for (CacheDirective directive : directivesById.values()) {
+      out.writeLong(directive.getId());
+      Text.writeString(out, directive.getPath());
+      out.writeShort(directive.getReplication());
+      Text.writeString(out, directive.getPool().getPoolName());
+      out.writeLong(directive.getExpiryTime());
       counter.increment();
       counter.increment();
     }
     }
     prog.endStep(Phase.SAVING_CHECKPOINT, step);
     prog.endStep(Phase.SAVING_CHECKPOINT, step);
@@ -819,38 +849,42 @@ public final class CacheManager {
   }
   }
 
 
   /**
   /**
-   * Load cache entries from the fsimage
+   * Load cache directives from the fsimage
    */
    */
-  private void loadEntries(DataInput in) throws IOException {
+  private void loadDirectives(DataInput in) throws IOException {
     StartupProgress prog = NameNode.getStartupProgress();
     StartupProgress prog = NameNode.getStartupProgress();
     Step step = new Step(StepType.CACHE_ENTRIES);
     Step step = new Step(StepType.CACHE_ENTRIES);
     prog.beginStep(Phase.LOADING_FSIMAGE, step);
     prog.beginStep(Phase.LOADING_FSIMAGE, step);
-    int numberOfEntries = in.readInt();
-    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfEntries);
+    int numDirectives = in.readInt();
+    prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
     Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
     Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
-    for (int i = 0; i < numberOfEntries; i++) {
-      long entryId = in.readLong();
+    for (int i = 0; i < numDirectives; i++) {
+      long directiveId = in.readLong();
       String path = Text.readString(in);
       String path = Text.readString(in);
       short replication = in.readShort();
       short replication = in.readShort();
       String poolName = Text.readString(in);
       String poolName = Text.readString(in);
+      long expiryTime = in.readLong();
       // Get pool reference by looking it up in the map
       // Get pool reference by looking it up in the map
       CachePool pool = cachePools.get(poolName);
       CachePool pool = cachePools.get(poolName);
       if (pool == null) {
       if (pool == null) {
-        throw new IOException("Entry refers to pool " + poolName +
+        throw new IOException("Directive refers to pool " + poolName +
             ", which does not exist.");
             ", which does not exist.");
       }
       }
-      CacheDirective entry =
-          new CacheDirective(entryId, path, replication, pool);
-      if (entriesById.put(entry.getEntryId(), entry) != null) {
-        throw new IOException("An entry with ID " + entry.getEntryId() +
+      CacheDirective directive =
+          new CacheDirective(directiveId, path, replication, expiryTime);
+      boolean addedDirective = pool.getDirectiveList().add(directive);
+      assert addedDirective;
+      if (directivesById.put(directive.getId(), directive) != null) {
+        throw new IOException("A directive with ID " + directive.getId() +
             " already exists");
             " already exists");
       }
       }
-      List<CacheDirective> entries = entriesByPath.get(entry.getPath());
-      if (entries == null) {
-        entries = new LinkedList<CacheDirective>();
-        entriesByPath.put(entry.getPath(), entries);
+      List<CacheDirective> directives =
+          directivesByPath.get(directive.getPath());
+      if (directives == null) {
+        directives = new LinkedList<CacheDirective>();
+        directivesByPath.put(directive.getPath(), directives);
       }
       }
-      entries.add(entry);
+      directives.add(directive);
       counter.increment();
       counter.increment();
     }
     }
     prog.endStep(Phase.LOADING_FSIMAGE, step);
     prog.endStep(Phase.LOADING_FSIMAGE, step);

+ 42 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java

@@ -26,9 +26,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.CacheDirective;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.IntrusiveCollection;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
@@ -69,6 +73,22 @@ public final class CachePool {
   
   
   private int weight;
   private int weight;
 
 
+  public final static class DirectiveList
+      extends IntrusiveCollection<CacheDirective> {
+    private CachePool cachePool;
+
+    private DirectiveList(CachePool cachePool) {
+      this.cachePool = cachePool;
+    }
+
+    public CachePool getCachePool() {
+      return cachePool;
+    }
+  }
+
+  @Nonnull
+  private final DirectiveList directiveList = new DirectiveList(this);
+
   /**
   /**
    * Create a new cache pool based on a CachePoolInfo object and the defaults.
    * Create a new cache pool based on a CachePoolInfo object and the defaults.
    * We will fill in information that was not supplied according to the
    * We will fill in information that was not supplied according to the
@@ -171,7 +191,7 @@ public final class CachePool {
    * @return
    * @return
    *          Cache pool information.
    *          Cache pool information.
    */
    */
-  private CachePoolInfo getInfo(boolean fullInfo) {
+  CachePoolInfo getInfo(boolean fullInfo) {
     CachePoolInfo info = new CachePoolInfo(poolName);
     CachePoolInfo info = new CachePoolInfo(poolName);
     if (!fullInfo) {
     if (!fullInfo) {
       return info;
       return info;
@@ -182,6 +202,19 @@ public final class CachePool {
         setWeight(weight);
         setWeight(weight);
   }
   }
 
 
+  /**
+   * Get statistics about this CachePool.
+   *
+   * @return   Cache pool statistics.
+   */
+  private CachePoolStats getStats() {
+    return new CachePoolStats.Builder().
+        setBytesNeeded(0).
+        setBytesCached(0).
+        setFilesAffected(0).
+        build();
+  }
+
   /**
   /**
    * Returns a CachePoolInfo describing this CachePool based on the permissions
    * Returns a CachePoolInfo describing this CachePool based on the permissions
    * of the calling user. Unprivileged users will see only minimal descriptive
    * of the calling user. Unprivileged users will see only minimal descriptive
@@ -189,9 +222,9 @@ public final class CachePool {
    * 
    * 
    * @param pc Permission checker to be used to validate the user's permissions,
    * @param pc Permission checker to be used to validate the user's permissions,
    *          or null
    *          or null
-   * @return CachePoolInfo describing this CachePool
+   * @return CachePoolEntry describing this CachePool
    */
    */
-  public CachePoolInfo getInfo(FSPermissionChecker pc) {
+  public CachePoolEntry getEntry(FSPermissionChecker pc) {
     boolean hasPermission = true;
     boolean hasPermission = true;
     if (pc != null) {
     if (pc != null) {
       try {
       try {
@@ -200,7 +233,8 @@ public final class CachePool {
         hasPermission = false;
         hasPermission = false;
       }
       }
     }
     }
-    return getInfo(hasPermission);
+    return new CachePoolEntry(getInfo(hasPermission), 
+        hasPermission ? getStats() : new CachePoolStats.Builder().build());
   }
   }
 
 
   public String toString() {
   public String toString() {
@@ -212,4 +246,8 @@ public final class CachePool {
         append(", weight:").append(weight).
         append(", weight:").append(weight).
         append(" }").toString();
         append(" }").toString();
   }
   }
+
+  public DirectiveList getDirectiveList() {
+    return directiveList;
+  }
 }
 }

+ 48 - 92
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java

@@ -17,121 +17,76 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
 /**
- * Directory INode class that has a quota restriction
+ * Quota feature for {@link INodeDirectory}. 
  */
  */
-public class INodeDirectoryWithQuota extends INodeDirectory {
+public final class DirectoryWithQuotaFeature extends INodeDirectory.Feature {
+  public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE;
+  public static final long DEFAULT_DISKSPACE_QUOTA = HdfsConstants.QUOTA_RESET;
+
   /** Name space quota */
   /** Name space quota */
-  private long nsQuota = Long.MAX_VALUE;
+  private long nsQuota = DEFAULT_NAMESPACE_QUOTA;
   /** Name space count */
   /** Name space count */
   private long namespace = 1L;
   private long namespace = 1L;
   /** Disk space quota */
   /** Disk space quota */
-  private long dsQuota = HdfsConstants.QUOTA_RESET;
+  private long dsQuota = DEFAULT_DISKSPACE_QUOTA;
   /** Disk space count */
   /** Disk space count */
   private long diskspace = 0L;
   private long diskspace = 0L;
   
   
-  /** Convert an existing directory inode to one with the given quota
-   * 
-   * @param nsQuota Namespace quota to be assigned to this inode
-   * @param dsQuota Diskspace quota to be assigned to this indoe
-   * @param other The other inode from which all other properties are copied
-   */
-  INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
-      long nsQuota, long dsQuota) {
-    super(other, adopt);
-    final Quota.Counts counts = other.computeQuotaUsage();
-    this.namespace = counts.get(Quota.NAMESPACE);
-    this.diskspace = counts.get(Quota.DISKSPACE);
+  DirectoryWithQuotaFeature(long nsQuota, long dsQuota) {
     this.nsQuota = nsQuota;
     this.nsQuota = nsQuota;
     this.dsQuota = dsQuota;
     this.dsQuota = dsQuota;
   }
   }
-  
-  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
-      Quota.Counts quota) {
-    this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
-  }
 
 
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
-      long modificationTime, long nsQuota, long dsQuota) {
-    super(id, name, permissions, modificationTime);
-    this.nsQuota = nsQuota;
-    this.dsQuota = dsQuota;
-  }
-  
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions) {
-    super(id, name, permissions, 0L);
-  }
-  
-  @Override
-  public Quota.Counts getQuotaCounts() {
+  /** @return the quota set or -1 if it is not set. */
+  Quota.Counts getQuota() {
     return Quota.Counts.newInstance(nsQuota, dsQuota);
     return Quota.Counts.newInstance(nsQuota, dsQuota);
   }
   }
   
   
   /** Set this directory's quota
   /** Set this directory's quota
    * 
    * 
    * @param nsQuota Namespace quota to be set
    * @param nsQuota Namespace quota to be set
-   * @param dsQuota diskspace quota to be set
+   * @param dsQuota Diskspace quota to be set
    */
    */
-  public void setQuota(long nsQuota, long dsQuota) {
+  void setQuota(long nsQuota, long dsQuota) {
     this.nsQuota = nsQuota;
     this.nsQuota = nsQuota;
     this.dsQuota = dsQuota;
     this.dsQuota = dsQuota;
   }
   }
   
   
-  @Override
-  public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
-      int lastSnapshotId) {
-    if (useCache && isQuotaSet()) {
-      // use cache value
-      counts.add(Quota.NAMESPACE, namespace);
-      counts.add(Quota.DISKSPACE, diskspace);
-    } else {
-      super.computeQuotaUsage(counts, false, lastSnapshotId);
-    }
+  Quota.Counts addNamespaceDiskspace(Quota.Counts counts) {
+    counts.add(Quota.NAMESPACE, namespace);
+    counts.add(Quota.DISKSPACE, diskspace);
     return counts;
     return counts;
   }
   }
 
 
-  @Override
-  public ContentSummaryComputationContext computeContentSummary(
+  ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
       final ContentSummaryComputationContext summary) {
       final ContentSummaryComputationContext summary) {
     final long original = summary.getCounts().get(Content.DISKSPACE);
     final long original = summary.getCounts().get(Content.DISKSPACE);
     long oldYieldCount = summary.getYieldCount();
     long oldYieldCount = summary.getYieldCount();
-    super.computeContentSummary(summary);
+    dir.computeDirectoryContentSummary(summary);
     // Check only when the content has not changed in the middle.
     // Check only when the content has not changed in the middle.
     if (oldYieldCount == summary.getYieldCount()) {
     if (oldYieldCount == summary.getYieldCount()) {
-      checkDiskspace(summary.getCounts().get(Content.DISKSPACE) - original);
+      checkDiskspace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
     }
     }
     return summary;
     return summary;
   }
   }
   
   
-  private void checkDiskspace(final long computed) {
-    if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
+  private void checkDiskspace(final INodeDirectory dir, final long computed) {
+    if (-1 != getQuota().get(Quota.DISKSPACE) && diskspace != computed) {
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
-          + getFullPathName() + ". Cached = " + diskspace
+          + dir.getFullPathName() + ". Cached = " + diskspace
           + " != Computed = " + computed);
           + " != Computed = " + computed);
     }
     }
   }
   }
 
 
-  /** Get the number of names in the subtree rooted at this directory
-   * @return the size of the subtree rooted at this directory
-   */
-  long numItemsInTree() {
-    return namespace;
-  }
-  
-  @Override
-  public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
-      boolean verify) throws QuotaExceededException {
-    if (isQuotaSet()) { 
+  void addSpaceConsumed(final INodeDirectory dir, final long nsDelta,
+      final long dsDelta, boolean verify) throws QuotaExceededException {
+    if (dir.isQuotaSet()) { 
       // The following steps are important: 
       // The following steps are important: 
       // check quotas in this inode and all ancestors before changing counts
       // check quotas in this inode and all ancestors before changing counts
       // so that no change is made if there is any quota violation.
       // so that no change is made if there is any quota violation.
@@ -141,11 +96,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
         verifyQuota(nsDelta, dsDelta);
         verifyQuota(nsDelta, dsDelta);
       }
       }
       // (2) verify quota and then add count in ancestors 
       // (2) verify quota and then add count in ancestors 
-      super.addSpaceConsumed(nsDelta, dsDelta, verify);
+      dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
       // (3) add count in this inode
       // (3) add count in this inode
       addSpaceConsumed2Cache(nsDelta, dsDelta);
       addSpaceConsumed2Cache(nsDelta, dsDelta);
     } else {
     } else {
-      super.addSpaceConsumed(nsDelta, dsDelta, verify);
+      dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
     }
     }
   }
   }
   
   
@@ -154,7 +109,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
    * @param nsDelta the change of the tree size
    * @param nsDelta the change of the tree size
    * @param dsDelta change to disk space occupied
    * @param dsDelta change to disk space occupied
    */
    */
-  protected void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
+  public void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
     namespace += nsDelta;
     namespace += nsDelta;
     diskspace += dsDelta;
     diskspace += dsDelta;
   }
   }
@@ -172,41 +127,42 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
     this.diskspace = diskspace;
     this.diskspace = diskspace;
   }
   }
   
   
+  /** @return the namespace and diskspace consumed. */
+  public Quota.Counts getSpaceConsumed() {
+    return Quota.Counts.newInstance(namespace, diskspace);
+  }
+
   /** Verify if the namespace quota is violated after applying delta. */
   /** Verify if the namespace quota is violated after applying delta. */
-  void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
+  private void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
     if (Quota.isViolated(nsQuota, namespace, delta)) {
     if (Quota.isViolated(nsQuota, namespace, delta)) {
       throw new NSQuotaExceededException(nsQuota, namespace + delta);
       throw new NSQuotaExceededException(nsQuota, namespace + delta);
     }
     }
   }
   }
+  /** Verify if the diskspace quota is violated after applying delta. */
+  private void verifyDiskspaceQuota(long delta) throws DSQuotaExceededException {
+    if (Quota.isViolated(dsQuota, diskspace, delta)) {
+      throw new DSQuotaExceededException(dsQuota, diskspace + delta);
+    }
+  }
 
 
-  /** Verify if the namespace count disk space satisfies the quota restriction 
-   * @throws QuotaExceededException if the given quota is less than the count
+  /**
+   * @throws QuotaExceededException if namespace or diskspace quotas is
+   *         violated after applying the deltas.
    */
    */
   void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
   void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
     verifyNamespaceQuota(nsDelta);
     verifyNamespaceQuota(nsDelta);
-
-    if (Quota.isViolated(dsQuota, diskspace, dsDelta)) {
-      throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
-    }
+    verifyDiskspaceQuota(dsDelta);
   }
   }
 
 
-  String namespaceString() {
+  private String namespaceString() {
     return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
     return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
   }
   }
-  String diskspaceString() {
+  private String diskspaceString() {
     return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
     return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
   }
   }
-  String quotaString() {
-    return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]";
-  }
   
   
-  @VisibleForTesting
-  public long getNamespace() {
-    return this.namespace;
-  }
-  
-  @VisibleForTesting
-  public long getDiskspace() {
-    return this.diskspace;
+  @Override
+  public String toString() {
+    return "Quota[" + namespaceString() + ", " + diskspaceString() + "]";
   }
   }
 }
 }

+ 29 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java

@@ -36,8 +36,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -100,15 +103,22 @@ public class EditLogFileInputStream extends EditLogInputStream {
   /**
   /**
    * Open an EditLogInputStream for the given URL.
    * Open an EditLogInputStream for the given URL.
    *
    *
-   * @param url the url hosting the log
-   * @param startTxId the expected starting txid
-   * @param endTxId the expected ending txid
-   * @param inProgress whether the log is in-progress
+   * @param connectionFactory
+   *          the URLConnectionFactory used to create the connection.
+   * @param url
+   *          the url hosting the log
+   * @param startTxId
+   *          the expected starting txid
+   * @param endTxId
+   *          the expected ending txid
+   * @param inProgress
+   *          whether the log is in-progress
    * @return a stream from which edits may be read
    * @return a stream from which edits may be read
    */
    */
-  public static EditLogInputStream fromUrl(URL url, long startTxId,
-      long endTxId, boolean inProgress) {
-    return new EditLogFileInputStream(new URLLog(url),
+  public static EditLogInputStream fromUrl(
+      URLConnectionFactory connectionFactory, URL url, long startTxId,
+ long endTxId, boolean inProgress) {
+    return new EditLogFileInputStream(new URLLog(connectionFactory, url),
         startTxId, endTxId, inProgress);
         startTxId, endTxId, inProgress);
   }
   }
   
   
@@ -365,8 +375,12 @@ public class EditLogFileInputStream extends EditLogInputStream {
     private long advertisedSize = -1;
     private long advertisedSize = -1;
 
 
     private final static String CONTENT_LENGTH = "Content-Length";
     private final static String CONTENT_LENGTH = "Content-Length";
+    private final URLConnectionFactory connectionFactory;
+    private final boolean isSpnegoEnabled;
 
 
-    public URLLog(URL url) {
+    public URLLog(URLConnectionFactory connectionFactory, URL url) {
+      this.connectionFactory = connectionFactory;
+      this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
       this.url = url;
       this.url = url;
     }
     }
 
 
@@ -376,8 +390,13 @@ public class EditLogFileInputStream extends EditLogInputStream {
           new PrivilegedExceptionAction<InputStream>() {
           new PrivilegedExceptionAction<InputStream>() {
             @Override
             @Override
             public InputStream run() throws IOException {
             public InputStream run() throws IOException {
-              HttpURLConnection connection = (HttpURLConnection)
-                  SecurityUtil.openSecureHttpConnection(url);
+              HttpURLConnection connection;
+              try {
+                connection = (HttpURLConnection)
+                    connectionFactory.openConnection(url, isSpnegoEnabled);
+              } catch (AuthenticationException e) {
+                throw new IOException(e);
+              }
               
               
               if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
               if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
                 throw new HttpGetFailedException(
                 throw new HttpGetFailedException(

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java

@@ -24,7 +24,6 @@ import static org.apache.hadoop.util.Time.now;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.jasper.compiler.JspUtil;
 
 
 /**
 /**
  * A generic abstract class to support journaling of edits logs into 
  * A generic abstract class to support journaling of edits logs into 
@@ -141,10 +140,10 @@ public abstract class EditLogOutputStream implements Closeable {
   }
   }
 
 
   /**
   /**
-   * @return a short HTML snippet suitable for describing the current
+   * @return a short text snippet suitable for describing the current
    * status of the stream
    * status of the stream
    */
    */
-  public String generateHtmlReport() {
-    return JspUtil.escapeXml(this.toString());
+  public String generateReport() {
+    return toString();
   }
   }
 }
 }

+ 24 - 40
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -87,11 +87,15 @@ import com.google.common.base.Preconditions;
  * 
  * 
  *************************************************/
  *************************************************/
 public class FSDirectory implements Closeable {
 public class FSDirectory implements Closeable {
-  private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
-    final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
+  private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
+    final INodeDirectory r = new INodeDirectory(
         INodeId.ROOT_INODE_ID,
         INodeId.ROOT_INODE_ID,
         INodeDirectory.ROOT_NAME,
         INodeDirectory.ROOT_NAME,
-        namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
+        namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)),
+        0L);
+    r.addDirectoryWithQuotaFeature(
+        DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA,
+        DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA);
     final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
     final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
     s.setSnapshotQuota(0);
     s.setSnapshotQuota(0);
     return s;
     return s;
@@ -107,7 +111,7 @@ public class FSDirectory implements Closeable {
   public final static String DOT_INODES_STRING = ".inodes";
   public final static String DOT_INODES_STRING = ".inodes";
   public final static byte[] DOT_INODES = 
   public final static byte[] DOT_INODES = 
       DFSUtil.string2Bytes(DOT_INODES_STRING);
       DFSUtil.string2Bytes(DOT_INODES_STRING);
-  INodeDirectoryWithQuota rootDir;
+  INodeDirectory rootDir;
   FSImage fsImage;  
   FSImage fsImage;  
   private final FSNamesystem namesystem;
   private final FSNamesystem namesystem;
   private volatile boolean ready = false;
   private volatile boolean ready = false;
@@ -202,7 +206,7 @@ public class FSDirectory implements Closeable {
   }
   }
 
 
   /** @return the root directory inode. */
   /** @return the root directory inode. */
-  public INodeDirectoryWithQuota getRoot() {
+  public INodeDirectory getRoot() {
     return rootDir;
     return rootDir;
   }
   }
 
 
@@ -452,8 +456,8 @@ public class FSDirectory implements Closeable {
   
   
   boolean unprotectedRemoveBlock(String path,
   boolean unprotectedRemoveBlock(String path,
       INodeFile fileNode, Block block) throws IOException {
       INodeFile fileNode, Block block) throws IOException {
-    Preconditions.checkArgument(fileNode.isUnderConstruction());
     // modify file-> block and blocksMap
     // modify file-> block and blocksMap
+    // fileNode should be under construction
     boolean removed = fileNode.removeLastBlock(block);
     boolean removed = fileNode.removeLastBlock(block);
     if (!removed) {
     if (!removed) {
       return false;
       return false;
@@ -1800,9 +1804,8 @@ public class FSDirectory implements Closeable {
     final INode[] inodes = inodesInPath.getINodes();
     final INode[] inodes = inodesInPath.getINodes();
     for(int i=0; i < numOfINodes; i++) {
     for(int i=0; i < numOfINodes; i++) {
       if (inodes[i].isQuotaSet()) { // a directory with quota
       if (inodes[i].isQuotaSet()) { // a directory with quota
-        INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i]
-            .asDirectory(); 
-        node.addSpaceConsumed2Cache(nsDelta, dsDelta);
+        inodes[i].asDirectory().getDirectoryWithQuotaFeature()
+            .addSpaceConsumed2Cache(nsDelta, dsDelta);
       }
       }
     }
     }
   }
   }
@@ -2035,10 +2038,11 @@ public class FSDirectory implements Closeable {
         // Stop checking for quota when common ancestor is reached
         // Stop checking for quota when common ancestor is reached
         return;
         return;
       }
       }
-      if (inodes[i].isQuotaSet()) { // a directory with quota
+      final DirectoryWithQuotaFeature q
+          = inodes[i].asDirectory().getDirectoryWithQuotaFeature();
+      if (q != null) { // a directory with quota
         try {
         try {
-          ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota(
-              nsDelta, dsDelta);
+          q.verifyQuota(nsDelta, dsDelta);
         } catch (QuotaExceededException e) {
         } catch (QuotaExceededException e) {
           e.setPathName(getFullPathName(inodes, i));
           e.setPathName(getFullPathName(inodes, i));
           throw e;
           throw e;
@@ -2385,35 +2389,14 @@ public class FSDirectory implements Closeable {
       if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
       if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
         dsQuota = oldDsQuota;
         dsQuota = oldDsQuota;
       }        
       }        
+      if (oldNsQuota == nsQuota && oldDsQuota == dsQuota) {
+        return null;
+      }
 
 
       final Snapshot latest = iip.getLatestSnapshot();
       final Snapshot latest = iip.getLatestSnapshot();
-      if (dirNode instanceof INodeDirectoryWithQuota) {
-        INodeDirectoryWithQuota quotaNode = (INodeDirectoryWithQuota) dirNode;
-        Quota.Counts counts = null;
-        if (!quotaNode.isQuotaSet()) {
-          // dirNode must be an INodeDirectoryWithSnapshot whose quota has not
-          // been set yet
-          counts = quotaNode.computeQuotaUsage();
-        }
-        // a directory with quota; so set the quota to the new value
-        quotaNode.setQuota(nsQuota, dsQuota);
-        if (quotaNode.isQuotaSet() && counts != null) {
-          quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE),
-              counts.get(Quota.DISKSPACE));
-        } else if (!quotaNode.isQuotaSet() && latest == null) {
-          // do not replace the node if the node is a snapshottable directory
-          // without snapshots
-          if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) {
-            // will not come here for root because root is snapshottable and
-            // root's nsQuota is always set
-            return quotaNode.replaceSelf4INodeDirectory(inodeMap);
-          }
-        }
-      } else {
-        // a non-quota directory; so replace it with a directory with quota
-        return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota, inodeMap);
-      }
-      return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null;
+      dirNode = dirNode.recordModification(latest, inodeMap);
+      dirNode.setQuota(nsQuota, dsQuota);
+      return dirNode;
     }
     }
   }
   }
   
   
@@ -2442,7 +2425,8 @@ public class FSDirectory implements Closeable {
   long totalInodes() {
   long totalInodes() {
     readLock();
     readLock();
     try {
     try {
-      return rootDir.numItemsInTree();
+      return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed()
+          .get(Quota.NAMESPACE);
     } finally {
     } finally {
       readUnlock();
       readUnlock();
     }
     }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -953,7 +953,11 @@ public class FSEditLog implements LogsPurgeable {
         .setSnapshotRoot(path);
         .setSnapshotRoot(path);
     logEdit(op);
     logEdit(op);
   }
   }
-  
+
+  /**
+   * Log a CacheDirectiveInfo returned from
+   * {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker)}
+   */
   void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
   void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
       boolean toLogRpcIds) {
       boolean toLogRpcIds) {
     AddCacheDirectiveInfoOp op =
     AddCacheDirectiveInfoOp op =

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -636,17 +636,17 @@ public class FSEditLogLoader {
       fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
       fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
       break;
       break;
     }
     }
-    case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
+    case OP_ADD_CACHE_DIRECTIVE: {
       AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
       AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
       CacheDirectiveInfo result = fsNamesys.
       CacheDirectiveInfo result = fsNamesys.
-          getCacheManager().addDirective(addOp.directive, null);
+          getCacheManager().addDirectiveFromEditLog(addOp.directive);
       if (toAddRetryCache) {
       if (toAddRetryCache) {
         Long id = result.getId();
         Long id = result.getId();
         fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
         fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
       }
       }
       break;
       break;
     }
     }
-    case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
+    case OP_MODIFY_CACHE_DIRECTIVE: {
       ModifyCacheDirectiveInfoOp modifyOp =
       ModifyCacheDirectiveInfoOp modifyOp =
           (ModifyCacheDirectiveInfoOp) op;
           (ModifyCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().modifyDirective(
       fsNamesys.getCacheManager().modifyDirective(
@@ -656,7 +656,7 @@ public class FSEditLogLoader {
       }
       }
       break;
       break;
     }
     }
-    case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
+    case OP_REMOVE_CACHE_DIRECTIVE: {
       RemoveCacheDirectiveInfoOp removeOp =
       RemoveCacheDirectiveInfoOp removeOp =
           (RemoveCacheDirectiveInfoOp) op;
           (RemoveCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
       fsNamesys.getCacheManager().removeDirective(removeOp.id, null);

+ 51 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -18,9 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
-import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE;
-import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN;
@@ -35,10 +34,11 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_END_LOG
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL;
-import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
@@ -64,6 +64,7 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.Date;
 import java.util.EnumMap;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.List;
 import java.util.zip.CheckedInputStream;
 import java.util.zip.CheckedInputStream;
@@ -81,12 +82,12 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@@ -109,7 +110,6 @@ import org.xml.sax.helpers.AttributesImpl;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
 
 
 /**
 /**
  * Helper classes for reading the ops from an InputStream.
  * Helper classes for reading the ops from an InputStream.
@@ -165,11 +165,11 @@ public abstract class FSEditLogOp {
       inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
       inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
       inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
       inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
       inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
       inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
-      inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
+      inst.put(OP_ADD_CACHE_DIRECTIVE,
           new AddCacheDirectiveInfoOp());
           new AddCacheDirectiveInfoOp());
-      inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
+      inst.put(OP_MODIFY_CACHE_DIRECTIVE,
           new ModifyCacheDirectiveInfoOp());
           new ModifyCacheDirectiveInfoOp());
-      inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
+      inst.put(OP_REMOVE_CACHE_DIRECTIVE,
           new RemoveCacheDirectiveInfoOp());
           new RemoveCacheDirectiveInfoOp());
       inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
       inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
       inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
       inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
@@ -2874,12 +2874,12 @@ public abstract class FSEditLogOp {
     CacheDirectiveInfo directive;
     CacheDirectiveInfo directive;
 
 
     public AddCacheDirectiveInfoOp() {
     public AddCacheDirectiveInfoOp() {
-      super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
+      super(OP_ADD_CACHE_DIRECTIVE);
     }
     }
 
 
     static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
     static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
       return (AddCacheDirectiveInfoOp) cache
       return (AddCacheDirectiveInfoOp) cache
-          .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
+          .get(OP_ADD_CACHE_DIRECTIVE);
     }
     }
 
 
     public AddCacheDirectiveInfoOp setDirective(
     public AddCacheDirectiveInfoOp setDirective(
@@ -2889,6 +2889,7 @@ public abstract class FSEditLogOp {
       assert(directive.getPath() != null);
       assert(directive.getPath() != null);
       assert(directive.getReplication() != null);
       assert(directive.getReplication() != null);
       assert(directive.getPool() != null);
       assert(directive.getPool() != null);
+      assert(directive.getExpiration() != null);
       return this;
       return this;
     }
     }
 
 
@@ -2898,11 +2899,13 @@ public abstract class FSEditLogOp {
       String path = FSImageSerialization.readString(in);
       String path = FSImageSerialization.readString(in);
       short replication = FSImageSerialization.readShort(in);
       short replication = FSImageSerialization.readShort(in);
       String pool = FSImageSerialization.readString(in);
       String pool = FSImageSerialization.readString(in);
+      long expiryTime = FSImageSerialization.readLong(in);
       directive = new CacheDirectiveInfo.Builder().
       directive = new CacheDirectiveInfo.Builder().
           setId(id).
           setId(id).
           setPath(new Path(path)).
           setPath(new Path(path)).
           setReplication(replication).
           setReplication(replication).
           setPool(pool).
           setPool(pool).
+          setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)).
           build();
           build();
       readRpcIds(in, logVersion);
       readRpcIds(in, logVersion);
     }
     }
@@ -2913,6 +2916,8 @@ public abstract class FSEditLogOp {
       FSImageSerialization.writeString(directive.getPath().toUri().getPath(), out);
       FSImageSerialization.writeString(directive.getPath().toUri().getPath(), out);
       FSImageSerialization.writeShort(directive.getReplication(), out);
       FSImageSerialization.writeShort(directive.getReplication(), out);
       FSImageSerialization.writeString(directive.getPool(), out);
       FSImageSerialization.writeString(directive.getPool(), out);
+      FSImageSerialization.writeLong(
+          directive.getExpiration().getMillis(), out);
       writeRpcIds(rpcClientId, rpcCallId, out);
       writeRpcIds(rpcClientId, rpcCallId, out);
     }
     }
 
 
@@ -2925,6 +2930,8 @@ public abstract class FSEditLogOp {
       XMLUtils.addSaxString(contentHandler, "REPLICATION",
       XMLUtils.addSaxString(contentHandler, "REPLICATION",
           Short.toString(directive.getReplication()));
           Short.toString(directive.getReplication()));
       XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
       XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
+      XMLUtils.addSaxString(contentHandler, "EXPIRATION",
+          "" + directive.getExpiration().getMillis());
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
     }
 
 
@@ -2935,6 +2942,8 @@ public abstract class FSEditLogOp {
           setPath(new Path(st.getValue("PATH"))).
           setPath(new Path(st.getValue("PATH"))).
           setReplication(Short.parseShort(st.getValue("REPLICATION"))).
           setReplication(Short.parseShort(st.getValue("REPLICATION"))).
           setPool(st.getValue("POOL")).
           setPool(st.getValue("POOL")).
+          setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(
+              Long.parseLong(st.getValue("EXPIRATION")))).
           build();
           build();
       readRpcIdsFromXml(st);
       readRpcIdsFromXml(st);
     }
     }
@@ -2946,7 +2955,8 @@ public abstract class FSEditLogOp {
       builder.append("id=" + directive.getId() + ",");
       builder.append("id=" + directive.getId() + ",");
       builder.append("path=" + directive.getPath().toUri().getPath() + ",");
       builder.append("path=" + directive.getPath().toUri().getPath() + ",");
       builder.append("replication=" + directive.getReplication() + ",");
       builder.append("replication=" + directive.getReplication() + ",");
-      builder.append("pool=" + directive.getPool());
+      builder.append("pool=" + directive.getPool() + ",");
+      builder.append("expiration=" + directive.getExpiration().getMillis());
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       builder.append("]");
       builder.append("]");
       return builder.toString();
       return builder.toString();
@@ -2961,12 +2971,12 @@ public abstract class FSEditLogOp {
     CacheDirectiveInfo directive;
     CacheDirectiveInfo directive;
 
 
     public ModifyCacheDirectiveInfoOp() {
     public ModifyCacheDirectiveInfoOp() {
-      super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
+      super(OP_MODIFY_CACHE_DIRECTIVE);
     }
     }
 
 
     static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
     static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
       return (ModifyCacheDirectiveInfoOp) cache
       return (ModifyCacheDirectiveInfoOp) cache
-          .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
+          .get(OP_MODIFY_CACHE_DIRECTIVE);
     }
     }
 
 
     public ModifyCacheDirectiveInfoOp setDirective(
     public ModifyCacheDirectiveInfoOp setDirective(
@@ -2991,7 +3001,12 @@ public abstract class FSEditLogOp {
       if ((flags & 0x4) != 0) {
       if ((flags & 0x4) != 0) {
         builder.setPool(FSImageSerialization.readString(in));
         builder.setPool(FSImageSerialization.readString(in));
       }
       }
-      if ((flags & ~0x7) != 0) {
+      if ((flags & 0x8) != 0) {
+        builder.setExpiration(
+            CacheDirectiveInfo.Expiration.newAbsolute(
+                FSImageSerialization.readLong(in)));
+      }
+      if ((flags & ~0xF) != 0) {
         throw new IOException("unknown flags set in " +
         throw new IOException("unknown flags set in " +
             "ModifyCacheDirectiveInfoOp: " + flags);
             "ModifyCacheDirectiveInfoOp: " + flags);
       }
       }
@@ -3005,7 +3020,8 @@ public abstract class FSEditLogOp {
       byte flags = (byte)(
       byte flags = (byte)(
           ((directive.getPath() != null) ? 0x1 : 0) |
           ((directive.getPath() != null) ? 0x1 : 0) |
           ((directive.getReplication() != null) ? 0x2 : 0) |
           ((directive.getReplication() != null) ? 0x2 : 0) |
-          ((directive.getPool() != null) ? 0x4 : 0)
+          ((directive.getPool() != null) ? 0x4 : 0) |
+          ((directive.getExpiration() != null) ? 0x8 : 0)
         );
         );
       out.writeByte(flags);
       out.writeByte(flags);
       if (directive.getPath() != null) {
       if (directive.getPath() != null) {
@@ -3018,6 +3034,10 @@ public abstract class FSEditLogOp {
       if (directive.getPool() != null) {
       if (directive.getPool() != null) {
         FSImageSerialization.writeString(directive.getPool(), out);
         FSImageSerialization.writeString(directive.getPool(), out);
       }
       }
+      if (directive.getExpiration() != null) {
+        FSImageSerialization.writeLong(directive.getExpiration().getMillis(),
+            out);
+      }
       writeRpcIds(rpcClientId, rpcCallId, out);
       writeRpcIds(rpcClientId, rpcCallId, out);
     }
     }
 
 
@@ -3036,6 +3056,10 @@ public abstract class FSEditLogOp {
       if (directive.getPool() != null) {
       if (directive.getPool() != null) {
         XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
         XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
       }
       }
+      if (directive.getExpiration() != null) {
+        XMLUtils.addSaxString(contentHandler, "EXPIRATION",
+            "" + directive.getExpiration().getMillis());
+      }
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
     }
 
 
@@ -3056,6 +3080,11 @@ public abstract class FSEditLogOp {
       if (pool != null) {
       if (pool != null) {
         builder.setPool(pool);
         builder.setPool(pool);
       }
       }
+      String expiryTime = st.getValueOrNull("EXPIRATION");
+      if (expiryTime != null) {
+        builder.setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(
+            Long.parseLong(expiryTime)));
+      }
       this.directive = builder.build();
       this.directive = builder.build();
       readRpcIdsFromXml(st);
       readRpcIdsFromXml(st);
     }
     }
@@ -3075,6 +3104,10 @@ public abstract class FSEditLogOp {
       if (directive.getPool() != null) {
       if (directive.getPool() != null) {
         builder.append(",").append("pool=").append(directive.getPool());
         builder.append(",").append("pool=").append(directive.getPool());
       }
       }
+      if (directive.getExpiration() != null) {
+        builder.append(",").append("expiration=").
+            append(directive.getExpiration().getMillis());
+      }
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       builder.append("]");
       builder.append("]");
       return builder.toString();
       return builder.toString();
@@ -3089,12 +3122,12 @@ public abstract class FSEditLogOp {
     long id;
     long id;
 
 
     public RemoveCacheDirectiveInfoOp() {
     public RemoveCacheDirectiveInfoOp() {
-      super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
+      super(OP_REMOVE_CACHE_DIRECTIVE);
     }
     }
 
 
     static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
     static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
       return (RemoveCacheDirectiveInfoOp) cache
       return (RemoveCacheDirectiveInfoOp) cache
-          .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
+          .get(OP_REMOVE_CACHE_DIRECTIVE);
     }
     }
 
 
     public RemoveCacheDirectiveInfoOp setId(long id) {
     public RemoveCacheDirectiveInfoOp setId(long id) {
@@ -3162,7 +3195,7 @@ public abstract class FSEditLogOp {
 
 
     @Override
     @Override
     public void writeFields(DataOutputStream out) throws IOException {
     public void writeFields(DataOutputStream out) throws IOException {
-      info .writeTo(out);
+      info.writeTo(out);
       writeRpcIds(rpcClientId, rpcCallId, out);
       writeRpcIds(rpcClientId, rpcCallId, out);
     }
     }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java

@@ -64,12 +64,12 @@ public enum FSEditLogOpCodes {
   OP_DISALLOW_SNAPSHOT          ((byte) 30),
   OP_DISALLOW_SNAPSHOT          ((byte) 30),
   OP_SET_GENSTAMP_V2            ((byte) 31),
   OP_SET_GENSTAMP_V2            ((byte) 31),
   OP_ALLOCATE_BLOCK_ID          ((byte) 32),
   OP_ALLOCATE_BLOCK_ID          ((byte) 32),
-  OP_ADD_PATH_BASED_CACHE_DIRECTIVE       ((byte) 33),
-  OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE    ((byte) 34),
+  OP_ADD_CACHE_DIRECTIVE       ((byte) 33),
+  OP_REMOVE_CACHE_DIRECTIVE    ((byte) 34),
   OP_ADD_CACHE_POOL                       ((byte) 35),
   OP_ADD_CACHE_POOL                       ((byte) 35),
   OP_MODIFY_CACHE_POOL                    ((byte) 36),
   OP_MODIFY_CACHE_POOL                    ((byte) 36),
   OP_REMOVE_CACHE_POOL                    ((byte) 37),
   OP_REMOVE_CACHE_POOL                    ((byte) 37),
-  OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE    ((byte) 38);
+  OP_MODIFY_CACHE_DIRECTIVE    ((byte) 38);
 
 
   private byte opCode;
   private byte opCode;
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -755,7 +755,7 @@ public class FSImage implements Closeable {
    * This is an update of existing state of the filesystem and does not
    * This is an update of existing state of the filesystem and does not
    * throw QuotaExceededException.
    * throw QuotaExceededException.
    */
    */
-  static void updateCountForQuota(INodeDirectoryWithQuota root) {
+  static void updateCountForQuota(INodeDirectory root) {
     updateCountForQuotaRecursively(root, Quota.Counts.newInstance());
     updateCountForQuotaRecursively(root, Quota.Counts.newInstance());
   }
   }
   
   
@@ -795,7 +795,7 @@ public class FSImage implements Closeable {
             + " quota = " + dsQuota + " < consumed = " + diskspace);
             + " quota = " + dsQuota + " < consumed = " + diskspace);
       }
       }
 
 
-      ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);
+      dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, diskspace);
     }
     }
   }
   }
 
 

+ 15 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
@@ -375,7 +375,7 @@ public class FSImageFormat {
     final long dsQuota = q.get(Quota.DISKSPACE);
     final long dsQuota = q.get(Quota.DISKSPACE);
     FSDirectory fsDir = namesystem.dir;
     FSDirectory fsDir = namesystem.dir;
     if (nsQuota != -1 || dsQuota != -1) {
     if (nsQuota != -1 || dsQuota != -1) {
-      fsDir.rootDir.setQuota(nsQuota, dsQuota);
+      fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
     }
     }
     fsDir.rootDir.cloneModificationTime(root);
     fsDir.rootDir.cloneModificationTime(root);
     fsDir.rootDir.clonePermissionStatus(root);    
     fsDir.rootDir.clonePermissionStatus(root);    
@@ -729,10 +729,11 @@ public class FSImageFormat {
       if (counter != null) {
       if (counter != null) {
         counter.increment();
         counter.increment();
       }
       }
-      final INodeDirectory dir = nsQuota >= 0 || dsQuota >= 0?
-          new INodeDirectoryWithQuota(inodeId, localName, permissions,
-              modificationTime, nsQuota, dsQuota)
-          : new INodeDirectory(inodeId, localName, permissions, modificationTime);
+      final INodeDirectory dir = new INodeDirectory(inodeId, localName,
+          permissions, modificationTime);
+      if (nsQuota >= 0 || dsQuota >= 0) {
+        dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
+      }
       return snapshottable ? new INodeDirectorySnapshottable(dir)
       return snapshottable ? new INodeDirectorySnapshottable(dir)
           : withSnapshot ? new INodeDirectoryWithSnapshot(dir)
           : withSnapshot ? new INodeDirectoryWithSnapshot(dir)
           : dir;
           : dir;
@@ -972,13 +973,14 @@ public class FSImageFormat {
       checkNotSaved();
       checkNotSaved();
 
 
       final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
       final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
-      FSDirectory fsDir = sourceNamesystem.dir;
+      final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
+      final long numINodes = rootDir.getDirectoryWithQuotaFeature()
+          .getSpaceConsumed().get(Quota.NAMESPACE);
       String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
       String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
       Step step = new Step(StepType.INODES, sdPath);
       Step step = new Step(StepType.INODES, sdPath);
       StartupProgress prog = NameNode.getStartupProgress();
       StartupProgress prog = NameNode.getStartupProgress();
       prog.beginStep(Phase.SAVING_CHECKPOINT, step);
       prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-      prog.setTotal(Phase.SAVING_CHECKPOINT, step,
-        fsDir.rootDir.numItemsInTree());
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
       Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
       Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
       long startTime = now();
       long startTime = now();
       //
       //
@@ -997,7 +999,7 @@ public class FSImageFormat {
         // fairness-related deadlock. See the comments on HDFS-2223.
         // fairness-related deadlock. See the comments on HDFS-2223.
         out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
         out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
             .getNamespaceID());
             .getNamespaceID());
-        out.writeLong(fsDir.rootDir.numItemsInTree());
+        out.writeLong(numINodes);
         out.writeLong(sourceNamesystem.getGenerationStampV1());
         out.writeLong(sourceNamesystem.getGenerationStampV1());
         out.writeLong(sourceNamesystem.getGenerationStampV2());
         out.writeLong(sourceNamesystem.getGenerationStampV2());
         out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
         out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
@@ -1014,14 +1016,13 @@ public class FSImageFormat {
                  " using " + compression);
                  " using " + compression);
 
 
         // save the root
         // save the root
-        saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
+        saveINode2Image(rootDir, out, false, referenceMap, counter);
         // save the rest of the nodes
         // save the rest of the nodes
-        saveImage(fsDir.rootDir, out, true, false, counter);
+        saveImage(rootDir, out, true, false, counter);
         prog.endStep(Phase.SAVING_CHECKPOINT, step);
         prog.endStep(Phase.SAVING_CHECKPOINT, step);
         // Now that the step is finished, set counter equal to total to adjust
         // Now that the step is finished, set counter equal to total to adjust
         // for possible under-counting due to reference inodes.
         // for possible under-counting due to reference inodes.
-        prog.setCount(Phase.SAVING_CHECKPOINT, step,
-          fsDir.rootDir.numItemsInTree());
+        prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
         // save files under construction
         // save files under construction
         // TODO: for HDFS-5428, since we cannot break the compatibility of 
         // TODO: for HDFS-5428, since we cannot break the compatibility of 
         // fsimage, we store part of the under-construction files that are only
         // fsimage, we store part of the under-construction files that are only

+ 14 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -165,6 +165,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -6421,6 +6422,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return datanodeStatistics.getCapacityRemainingPercent();
     return datanodeStatistics.getCapacityRemainingPercent();
   }
   }
 
 
+  @Override // NameNodeMXBean
+  public long getCacheCapacity() {
+    return datanodeStatistics.getCacheCapacity();
+  }
+
+  @Override // NameNodeMXBean
+  public long getCacheUsed() {
+    return datanodeStatistics.getCacheUsed();
+  }
+
   @Override // NameNodeMXBean
   @Override // NameNodeMXBean
   public long getTotalBlocks() {
   public long getTotalBlocks() {
     return getBlocksTotal();
     return getBlocksTotal();
@@ -6627,7 +6638,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         } else if (openForWrite) {
         } else if (openForWrite) {
           EditLogOutputStream elos = jas.getCurrentStream();
           EditLogOutputStream elos = jas.getCurrentStream();
           if (elos != null) {
           if (elos != null) {
-            jasMap.put("stream", elos.generateHtmlReport());
+            jasMap.put("stream", elos.generateReport());
           } else {
           } else {
             jasMap.put("stream", "not currently writing");
             jasMap.put("stream", "not currently writing");
           }
           }
@@ -7277,11 +7288,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     getEditLog().logSync();
     getEditLog().logSync();
   }
   }
 
 
-  public BatchedListEntries<CachePoolInfo> listCachePools(String prevKey)
+  public BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
       throws IOException {
     final FSPermissionChecker pc =
     final FSPermissionChecker pc =
         isPermissionEnabled ? getPermissionChecker() : null;
         isPermissionEnabled ? getPermissionChecker() : null;
-    BatchedListEntries<CachePoolInfo> results;
+    BatchedListEntries<CachePoolEntry> results;
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
     boolean success = false;
     boolean success = false;
     readLock();
     readLock();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 
 
 /**
 /**
- * I-node for file being written.
+ * Feature for under-construction file.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class FileUnderConstructionFeature extends INodeFile.Feature {
 public class FileUnderConstructionFeature extends INodeFile.Feature {

+ 58 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.hdfs.util.ChunkedArrayList;
@@ -315,7 +314,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * 1.2.2 Else do nothing with the current INode. Recursively clean its 
    * 1.2.2 Else do nothing with the current INode. Recursively clean its 
    * children.
    * children.
    * 
    * 
-   * 1.3 The current inode is a {@link FileWithSnapshot}.
+   * 1.3 The current inode is a file with snapshot.
    * Call recordModification(..) to capture the current states.
    * Call recordModification(..) to capture the current states.
    * Mark the INode as deleted.
    * Mark the INode as deleted.
    * 
    * 
@@ -328,7 +327,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * 2. When deleting a snapshot.
    * 2. When deleting a snapshot.
    * 2.1 To clean {@link INodeFile}: do nothing.
    * 2.1 To clean {@link INodeFile}: do nothing.
    * 2.2 To clean {@link INodeDirectory}: recursively clean its children.
    * 2.2 To clean {@link INodeDirectory}: recursively clean its children.
-   * 2.3 To clean {@link FileWithSnapshot}: delete the corresponding snapshot in
+   * 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
    * its diff list.
    * its diff list.
    * 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding 
    * 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding 
    * snapshot in its diff list. Recursively clean its children.
    * snapshot in its diff list. Recursively clean its children.
@@ -406,6 +405,15 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    */
    */
   public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) 
   public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) 
       throws QuotaExceededException {
       throws QuotaExceededException {
+    addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
+  }
+
+  /**
+   * Check and add namespace/diskspace consumed to itself and the ancestors.
+   * @throws QuotaExceededException if quote is violated.
+   */
+  void addSpaceConsumed2Parent(long nsDelta, long dsDelta, boolean verify) 
+      throws QuotaExceededException {
     if (parent != null) {
     if (parent != null) {
       parent.addSpaceConsumed(nsDelta, dsDelta, verify);
       parent.addSpaceConsumed(nsDelta, dsDelta, verify);
     }
     }
@@ -744,4 +752,51 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
       toDeleteList.clear();
       toDeleteList.clear();
     }
     }
   }
   }
+
+  /** INode feature such as {@link FileUnderConstructionFeature}
+   *  and {@link DirectoryWithQuotaFeature}.
+   */
+  interface Feature<F extends Feature<F>> {
+    /** @return the next feature. */
+    public F getNextFeature();
+
+    /** Set the next feature. */
+    public void setNextFeature(F next);
+
+    /** Utility methods such as addFeature and removeFeature. */
+    static class Util {
+      /**
+       * Add a feature to the linked list.
+       * @return the new head.
+       */
+      static <F extends Feature<F>> F addFeature(F feature, F head) {
+        feature.setNextFeature(head);
+        return feature;
+      }
+
+      /**
+       * Remove a feature from the linked list.
+       * @return the new head.
+       */
+      static <F extends Feature<F>> F removeFeature(F feature, F head) {
+        if (feature == head) {
+          final F newHead = head.getNextFeature();
+          head.setNextFeature(null);
+          return newHead;
+        } else if (head != null) {
+          F prev = head;
+          F curr = head.getNextFeature();
+          for (; curr != null && curr != feature;
+              prev = curr, curr = curr.getNextFeature())
+            ;
+          if (curr != null) {
+            prev.setNextFeature(curr.getNextFeature());
+            curr.setNextFeature(null);
+            return head;
+          }
+        }
+        throw new IllegalStateException("Feature " + feature + " not found.");
+      }
+    }
+  }
 }
 }

+ 119 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -46,6 +46,21 @@ import com.google.common.base.Preconditions;
  */
  */
 public class INodeDirectory extends INodeWithAdditionalFields
 public class INodeDirectory extends INodeWithAdditionalFields
     implements INodeDirectoryAttributes {
     implements INodeDirectoryAttributes {
+  /** Directory related features such as quota and snapshots. */
+  public static abstract class Feature implements INode.Feature<Feature> {
+    private Feature nextFeature;
+
+    @Override
+    public Feature getNextFeature() {
+      return nextFeature;
+    }
+
+    @Override
+    public void setNextFeature(Feature next) {
+      this.nextFeature = next;
+    }
+  }
+
   /** Cast INode to INodeDirectory. */
   /** Cast INode to INodeDirectory. */
   public static INodeDirectory valueOf(INode inode, Object path
   public static INodeDirectory valueOf(INode inode, Object path
       ) throws FileNotFoundException, PathIsNotDirectoryException {
       ) throws FileNotFoundException, PathIsNotDirectoryException {
@@ -63,6 +78,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
   final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
   final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
 
   private List<INode> children = null;
   private List<INode> children = null;
+  
+  /** A linked list of {@link Feature}s. */
+  private Feature headFeature = null;
 
 
   /** constructor */
   /** constructor */
   public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
   public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
@@ -76,7 +94,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
    * @param adopt Indicate whether or not need to set the parent field of child
    * @param adopt Indicate whether or not need to set the parent field of child
    *              INodes to the new node
    *              INodes to the new node
    */
    */
-  public INodeDirectory(INodeDirectory other, boolean adopt) {
+  public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) {
     super(other);
     super(other);
     this.children = other.children;
     this.children = other.children;
     if (adopt && this.children != null) {
     if (adopt && this.children != null) {
@@ -84,6 +102,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
         child.setParent(this);
         child.setParent(this);
       }
       }
     }
     }
+    if (copyFeatures) {
+      this.headFeature = other.headFeature;
+    }
   }
   }
 
 
   /** @return true unconditionally. */
   /** @return true unconditionally. */
@@ -103,6 +124,73 @@ public class INodeDirectory extends INodeWithAdditionalFields
     return false;
     return false;
   }
   }
 
 
+  void setQuota(long nsQuota, long dsQuota) {
+    DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
+    if (quota != null) {
+      // already has quota; so set the quota to the new values
+      quota.setQuota(nsQuota, dsQuota);
+      if (!isQuotaSet() && !isRoot()) {
+        removeFeature(quota);
+      }
+    } else {
+      final Quota.Counts c = computeQuotaUsage();
+      quota = addDirectoryWithQuotaFeature(nsQuota, dsQuota);
+      quota.setSpaceConsumed(c.get(Quota.NAMESPACE), c.get(Quota.DISKSPACE));
+    }
+  }
+
+  @Override
+  public Quota.Counts getQuotaCounts() {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    return q != null? q.getQuota(): super.getQuotaCounts();
+  }
+
+  @Override
+  public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) 
+      throws QuotaExceededException {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      q.addSpaceConsumed(this, nsDelta, dsDelta, verify);
+    } else {
+      addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
+    }
+  }
+
+  /**
+   * If the directory contains a {@link DirectoryWithQuotaFeature}, return it;
+   * otherwise, return null.
+   */
+  public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
+    for(Feature f = headFeature; f != null; f = f.nextFeature) {
+      if (f instanceof DirectoryWithQuotaFeature) {
+        return (DirectoryWithQuotaFeature)f;
+      }
+    }
+    return null;
+  }
+
+  /** Is this directory with quota? */
+  final boolean isWithQuota() {
+    return getDirectoryWithQuotaFeature() != null;
+  }
+
+  DirectoryWithQuotaFeature addDirectoryWithQuotaFeature(
+      long nsQuota, long dsQuota) {
+    Preconditions.checkState(!isWithQuota(), "Directory is already with quota");
+    final DirectoryWithQuotaFeature quota = new DirectoryWithQuotaFeature(
+        nsQuota, dsQuota);
+    addFeature(quota);
+    return quota;
+  }
+
+  private void addFeature(Feature f) {
+    headFeature = INode.Feature.Util.addFeature(f, headFeature);
+  }
+
+  private void removeFeature(Feature f) {
+    headFeature = INode.Feature.Util.removeFeature(f, headFeature);
+  }
+
   private int searchChildren(byte[] name) {
   private int searchChildren(byte[] name) {
     return children == null? -1: Collections.binarySearch(children, name);
     return children == null? -1: Collections.binarySearch(children, name);
   }
   }
@@ -142,27 +230,6 @@ public class INodeDirectory extends INodeWithAdditionalFields
     return true;
     return true;
   }
   }
 
 
-  /**
-   * Replace itself with {@link INodeDirectoryWithQuota} or
-   * {@link INodeDirectoryWithSnapshot} depending on the latest snapshot.
-   */
-  INodeDirectoryWithQuota replaceSelf4Quota(final Snapshot latest,
-      final long nsQuota, final long dsQuota, final INodeMap inodeMap)
-      throws QuotaExceededException {
-    Preconditions.checkState(!(this instanceof INodeDirectoryWithQuota),
-        "this is already an INodeDirectoryWithQuota, this=%s", this);
-
-    if (!this.isInLatestSnapshot(latest)) {
-      final INodeDirectoryWithQuota q = new INodeDirectoryWithQuota(
-          this, true, nsQuota, dsQuota);
-      replaceSelf(q, inodeMap);
-      return q;
-    } else {
-      final INodeDirectoryWithSnapshot s = new INodeDirectoryWithSnapshot(this);
-      s.setQuota(nsQuota, dsQuota);
-      return replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
-    }
-  }
   /** Replace itself with an {@link INodeDirectorySnapshottable}. */
   /** Replace itself with an {@link INodeDirectorySnapshottable}. */
   public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
   public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
       Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
       Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
@@ -183,7 +250,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
   public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
   public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
     Preconditions.checkState(getClass() != INodeDirectory.class,
     Preconditions.checkState(getClass() != INodeDirectory.class,
         "the class is already INodeDirectory, this=%s", this);
         "the class is already INodeDirectory, this=%s", this);
-    return replaceSelf(new INodeDirectory(this, true), inodeMap);
+    return replaceSelf(new INodeDirectory(this, true, true), inodeMap);
   }
   }
 
 
   /** Replace itself with the given directory. */
   /** Replace itself with the given directory. */
@@ -439,6 +506,21 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   @Override
   public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
   public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
       int lastSnapshotId) {
       int lastSnapshotId) {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      if (useCache && isQuotaSet()) {
+        q.addNamespaceDiskspace(counts);
+      } else {
+        computeDirectoryQuotaUsage(counts, false, lastSnapshotId);
+      }
+      return counts;
+    } else {
+      return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
+    }
+  }
+
+  Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
+      int lastSnapshotId) {
     if (children != null) {
     if (children != null) {
       for (INode child : children) {
       for (INode child : children) {
         child.computeQuotaUsage(counts, useCache, lastSnapshotId);
         child.computeQuotaUsage(counts, useCache, lastSnapshotId);
@@ -456,6 +538,16 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   @Override
   public ContentSummaryComputationContext computeContentSummary(
   public ContentSummaryComputationContext computeContentSummary(
       ContentSummaryComputationContext summary) {
       ContentSummaryComputationContext summary) {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      return q.computeContentSummary(this, summary);
+    } else {
+      return computeDirectoryContentSummary(summary);
+    }
+  }
+
+  ContentSummaryComputationContext computeDirectoryContentSummary(
+      ContentSummaryComputationContext summary) {
     ReadOnlyList<INode> childrenList = getChildrenList(null);
     ReadOnlyList<INode> childrenList = getChildrenList(null);
     // Explicit traversing is done to enable repositioning after relinquishing
     // Explicit traversing is done to enable repositioning after relinquishing
     // and reacquiring locks.
     // and reacquiring locks.
@@ -570,7 +662,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
       Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
       Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
           collectedBlocks, removedINodes, null, countDiffChange);
           collectedBlocks, removedINodes, null, countDiffChange);
       if (isQuotaSet()) {
       if (isQuotaSet()) {
-        ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
+        getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
             -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
             -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
       }
       }
       return counts;
       return counts;
@@ -606,8 +698,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
       final Snapshot snapshot) {
       final Snapshot snapshot) {
     super.dumpTreeRecursively(out, prefix, snapshot);
     super.dumpTreeRecursively(out, prefix, snapshot);
     out.print(", childrenSize=" + getChildrenList(snapshot).size());
     out.print(", childrenSize=" + getChildrenList(snapshot).size());
-    if (this instanceof INodeDirectoryWithQuota) {
-      out.print(((INodeDirectoryWithQuota)this).quotaString());
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      out.print(", " + q);
     }
     }
     if (this instanceof Snapshot.Root) {
     if (this instanceof Snapshot.Root) {
       out.print(", snapshotId=" + snapshot.getId());
       out.print(", snapshotId=" + snapshot.getId());

+ 32 - 42
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -29,10 +29,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.*;
 import org.apache.hadoop.hdfs.server.blockmanagement.*;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 
@@ -47,13 +45,15 @@ public class INodeFile extends INodeWithAdditionalFields
    * A feature contains specific information for a type of INodeFile. E.g.,
    * A feature contains specific information for a type of INodeFile. E.g.,
    * we can have separate features for Under-Construction and Snapshot.
    * we can have separate features for Under-Construction and Snapshot.
    */
    */
-  public static abstract class Feature {
+  public static abstract class Feature implements INode.Feature<Feature> {
     private Feature nextFeature;
     private Feature nextFeature;
 
 
+    @Override
     public Feature getNextFeature() {
     public Feature getNextFeature() {
       return nextFeature;
       return nextFeature;
     }
     }
 
 
+    @Override
     public void setNextFeature(Feature next) {
     public void setNextFeature(Feature next) {
       this.nextFeature = next;
       this.nextFeature = next;
     }
     }
@@ -157,26 +157,12 @@ public class INodeFile extends INodeWithAdditionalFields
     return getFileUnderConstructionFeature() != null;
     return getFileUnderConstructionFeature() != null;
   }
   }
 
 
-  void addFeature(Feature f) {
-    f.nextFeature = headFeature;
-    headFeature = f;
+  private void addFeature(Feature f) {
+    headFeature = INode.Feature.Util.addFeature(f, headFeature);
   }
   }
 
 
-  void removeFeature(Feature f) {
-    if (f == headFeature) {
-      headFeature = headFeature.nextFeature;
-      return;
-    } else if (headFeature != null) {
-      Feature prev = headFeature;
-      Feature curr = headFeature.nextFeature;
-      for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature)
-        ;
-      if (curr != null) {
-        prev.nextFeature = curr.nextFeature;
-        return;
-      }
-    }
-    throw new IllegalStateException("Feature " + f + " not found.");
+  private void removeFeature(Feature f) {
+    headFeature = INode.Feature.Util.removeFeature(f, headFeature);
   }
   }
 
 
   /** @return true unconditionally. */
   /** @return true unconditionally. */
@@ -194,10 +180,10 @@ public class INodeFile extends INodeWithAdditionalFields
   /* Start of Under-Construction Feature */
   /* Start of Under-Construction Feature */
 
 
   /** Convert this file to an {@link INodeFileUnderConstruction}. */
   /** Convert this file to an {@link INodeFileUnderConstruction}. */
-  public INodeFile toUnderConstruction(String clientName, String clientMachine,
+  INodeFile toUnderConstruction(String clientName, String clientMachine,
       DatanodeDescriptor clientNode) {
       DatanodeDescriptor clientNode) {
     Preconditions.checkState(!isUnderConstruction(),
     Preconditions.checkState(!isUnderConstruction(),
-        "file is already an INodeFileUnderConstruction");
+        "file is already under construction");
     FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
     FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
         clientName, clientMachine, clientNode);
         clientName, clientMachine, clientNode);
     addFeature(uc);
     addFeature(uc);
@@ -209,6 +195,8 @@ public class INodeFile extends INodeWithAdditionalFields
    * feature.
    * feature.
    */
    */
   public INodeFile toCompleteFile(long mtime) {
   public INodeFile toCompleteFile(long mtime) {
+    Preconditions.checkState(isUnderConstruction(),
+        "file is no longer under construction");
     FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
     FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
     if (uc != null) {
     if (uc != null) {
       assertAllBlocksComplete();
       assertAllBlocksComplete();
@@ -230,15 +218,16 @@ public class INodeFile extends INodeWithAdditionalFields
     }
     }
   }
   }
 
 
-  @Override //BlockCollection
+  @Override // BlockCollection
   public void setBlock(int index, BlockInfo blk) {
   public void setBlock(int index, BlockInfo blk) {
     this.blocks[index] = blk;
     this.blocks[index] = blk;
   }
   }
 
 
-  @Override // BlockCollection
+  @Override // BlockCollection, the file should be under construction
   public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
   public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
       DatanodeStorageInfo[] locations) throws IOException {
       DatanodeStorageInfo[] locations) throws IOException {
-    Preconditions.checkState(isUnderConstruction());
+    Preconditions.checkState(isUnderConstruction(),
+        "file is no longer under construction");
 
 
     if (numBlocks() == 0) {
     if (numBlocks() == 0) {
       throw new IOException("Failed to set last block: File is empty.");
       throw new IOException("Failed to set last block: File is empty.");
@@ -256,6 +245,8 @@ public class INodeFile extends INodeWithAdditionalFields
    * the last one on the list.
    * the last one on the list.
    */
    */
   boolean removeLastBlock(Block oldblock) {
   boolean removeLastBlock(Block oldblock) {
+    Preconditions.checkState(isUnderConstruction(),
+        "file is no longer under construction");
     if (blocks == null || blocks.length == 0) {
     if (blocks == null || blocks.length == 0) {
       return false;
       return false;
     }
     }
@@ -307,10 +298,8 @@ public class INodeFile extends INodeWithAdditionalFields
   }
   }
 
 
   @Override
   @Override
-  public final short getBlockReplication() {
-    return this instanceof FileWithSnapshot?
-        Util.getBlockReplication((FileWithSnapshot)this)
-        : getFileReplication(null);
+  public short getBlockReplication() {
+    return getFileReplication(null);
   }
   }
 
 
   /** Set the replication factor of this file. */
   /** Set the replication factor of this file. */
@@ -430,8 +419,8 @@ public class INodeFile extends INodeWithAdditionalFields
     clear();
     clear();
     removedINodes.add(this);
     removedINodes.add(this);
     
     
-    if (this instanceof FileWithSnapshot) {
-      ((FileWithSnapshot) this).getDiffs().clear();
+    if (this instanceof INodeFileWithSnapshot) {
+      ((INodeFileWithSnapshot) this).getDiffs().clear();
     }
     }
   }
   }
   
   
@@ -446,8 +435,8 @@ public class INodeFile extends INodeWithAdditionalFields
       boolean useCache, int lastSnapshotId) {
       boolean useCache, int lastSnapshotId) {
     long nsDelta = 1;
     long nsDelta = 1;
     final long dsDelta;
     final long dsDelta;
-    if (this instanceof FileWithSnapshot) {
-      FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs();
+    if (this instanceof INodeFileWithSnapshot) {
+      FileDiffList fileDiffList = ((INodeFileWithSnapshot) this).getDiffs();
       Snapshot last = fileDiffList.getLastSnapshot();
       Snapshot last = fileDiffList.getLastSnapshot();
       List<FileDiff> diffs = fileDiffList.asList();
       List<FileDiff> diffs = fileDiffList.asList();
 
 
@@ -479,8 +468,8 @@ public class INodeFile extends INodeWithAdditionalFields
   private void computeContentSummary4Snapshot(final Content.Counts counts) {
   private void computeContentSummary4Snapshot(final Content.Counts counts) {
     // file length and diskspace only counted for the latest state of the file
     // file length and diskspace only counted for the latest state of the file
     // i.e. either the current state or the last snapshot
     // i.e. either the current state or the last snapshot
-    if (this instanceof FileWithSnapshot) {
-      final FileWithSnapshot withSnapshot = (FileWithSnapshot)this;
+    if (this instanceof INodeFileWithSnapshot) {
+      final INodeFileWithSnapshot withSnapshot = (INodeFileWithSnapshot) this;
       final FileDiffList diffs = withSnapshot.getDiffs();
       final FileDiffList diffs = withSnapshot.getDiffs();
       final int n = diffs.asList().size();
       final int n = diffs.asList().size();
       counts.add(Content.FILE, n);
       counts.add(Content.FILE, n);
@@ -496,8 +485,8 @@ public class INodeFile extends INodeWithAdditionalFields
   }
   }
 
 
   private void computeContentSummary4Current(final Content.Counts counts) {
   private void computeContentSummary4Current(final Content.Counts counts) {
-    if (this instanceof FileWithSnapshot
-        && ((FileWithSnapshot)this).isCurrentFileDeleted()) {
+    if (this instanceof INodeFileWithSnapshot
+        && ((INodeFileWithSnapshot) this).isCurrentFileDeleted()) {
       return;
       return;
     }
     }
 
 
@@ -516,8 +505,9 @@ public class INodeFile extends INodeWithAdditionalFields
    * otherwise, get the file size from the given snapshot.
    * otherwise, get the file size from the given snapshot.
    */
    */
   public final long computeFileSize(Snapshot snapshot) {
   public final long computeFileSize(Snapshot snapshot) {
-    if (snapshot != null && this instanceof FileWithSnapshot) {
-      final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
+    if (snapshot != null && this instanceof INodeFileWithSnapshot) {
+      final FileDiff d = ((INodeFileWithSnapshot) this).getDiffs().getDiff(
+          snapshot);
       if (d != null) {
       if (d != null) {
         return d.getFileSize();
         return d.getFileSize();
       }
       }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java

@@ -26,8 +26,8 @@ import java.util.List;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -102,8 +102,8 @@ public abstract class INodeReference extends INode {
     }
     }
     if (wn != null) {
     if (wn != null) {
       INode referred = wc.getReferredINode();
       INode referred = wc.getReferredINode();
-      if (referred instanceof FileWithSnapshot) {
-        return ((FileWithSnapshot) referred).getDiffs().getPrior(
+      if (referred instanceof INodeFileWithSnapshot) {
+        return ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
             wn.lastSnapshotId);
             wn.lastSnapshotId);
       } else if (referred instanceof INodeDirectoryWithSnapshot) { 
       } else if (referred instanceof INodeDirectoryWithSnapshot) { 
         return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
         return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
@@ -547,8 +547,8 @@ public abstract class INodeReference extends INode {
     private Snapshot getSelfSnapshot() {
     private Snapshot getSelfSnapshot() {
       INode referred = getReferredINode().asReference().getReferredINode();
       INode referred = getReferredINode().asReference().getReferredINode();
       Snapshot snapshot = null;
       Snapshot snapshot = null;
-      if (referred instanceof FileWithSnapshot) {
-        snapshot = ((FileWithSnapshot) referred).getDiffs().getPrior(
+      if (referred instanceof INodeFileWithSnapshot) {
+        snapshot = ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
             lastSnapshotId);
             lastSnapshotId);
       } else if (referred instanceof INodeDirectoryWithSnapshot) {
       } else if (referred instanceof INodeDirectoryWithSnapshot) {
         snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
         snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
@@ -637,10 +637,10 @@ public abstract class INodeReference extends INode {
         Snapshot snapshot = getSelfSnapshot(prior);
         Snapshot snapshot = getSelfSnapshot(prior);
         
         
         INode referred = getReferredINode().asReference().getReferredINode();
         INode referred = getReferredINode().asReference().getReferredINode();
-        if (referred instanceof FileWithSnapshot) {
+        if (referred instanceof INodeFileWithSnapshot) {
           // if referred is a file, it must be a FileWithSnapshot since we did
           // if referred is a file, it must be a FileWithSnapshot since we did
           // recordModification before the rename
           // recordModification before the rename
-          FileWithSnapshot sfile = (FileWithSnapshot) referred;
+          INodeFileWithSnapshot sfile = (INodeFileWithSnapshot) referred;
           // make sure we mark the file as deleted
           // make sure we mark the file as deleted
           sfile.deleteCurrentFile();
           sfile.deleteCurrentFile();
           try {
           try {
@@ -671,8 +671,8 @@ public abstract class INodeReference extends INode {
       WithCount wc = (WithCount) getReferredINode().asReference();
       WithCount wc = (WithCount) getReferredINode().asReference();
       INode referred = wc.getReferredINode();
       INode referred = wc.getReferredINode();
       Snapshot lastSnapshot = null;
       Snapshot lastSnapshot = null;
-      if (referred instanceof FileWithSnapshot) {
-        lastSnapshot = ((FileWithSnapshot) referred).getDiffs()
+      if (referred instanceof INodeFileWithSnapshot) {
+        lastSnapshot = ((INodeFileWithSnapshot) referred).getDiffs()
             .getLastSnapshot(); 
             .getLastSnapshot(); 
       } else if (referred instanceof INodeDirectoryWithSnapshot) {
       } else if (referred instanceof INodeDirectoryWithSnapshot) {
         lastSnapshot = ((INodeDirectoryWithSnapshot) referred)
         lastSnapshot = ((INodeDirectoryWithSnapshot) referred)

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.Trash;
+
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 import static org.apache.hadoop.util.ToolRunner.confirmPrompt;

+ 32 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 
 
@@ -69,25 +70,45 @@ public class NameNodeHttpServer {
     this.bindAddress = bindAddress;
     this.bindAddress = bindAddress;
   }
   }
   
   
-  public void start() throws IOException {
+  void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
     final String infoHost = bindAddress.getHostName();
     int infoPort = bindAddress.getPort();
     int infoPort = bindAddress.getPort();
-    httpServer = new HttpServer.Builder().setName("hdfs")
-        .setBindAddress(infoHost).setPort(infoPort)
+    HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs")
+        .addEndpoint(URI.create(("http://" + NetUtils.getHostPortString(bindAddress))))
         .setFindPort(infoPort == 0).setConf(conf).setACL(
         .setFindPort(infoPort == 0).setConf(conf).setACL(
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(
         .setUsernameConfKey(
             DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
             DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
         .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
         .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
-            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build();
+            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+
+    boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
+    if (certSSL) {
+      httpsAddress = NetUtils.createSocketAddr(conf.get(
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
+
+      builder.addEndpoint(URI.create("https://"
+          + NetUtils.getHostPortString(httpsAddress)));
+      Configuration sslConf = new Configuration(false);
+      sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
+          .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+              DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
+      sslConf.addResource(conf.get(
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+      DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
+    }
+
+    httpServer = builder.build();
     if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
     if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
       //add SPNEGO authentication filter for webhdfs
       //add SPNEGO authentication filter for webhdfs
       final String name = "SPNEGO";
       final String name = "SPNEGO";
       final String classname = AuthFilter.class.getName();
       final String classname = AuthFilter.class.getName();
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       Map<String, String> params = getAuthFilterParams(conf);
       Map<String, String> params = getAuthFilterParams(conf);
-      httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
           new String[]{pathSpec});
           new String[]{pathSpec});
       HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
       HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
 
 
@@ -97,34 +118,19 @@ public class NameNodeHttpServer {
           + ";" + Param.class.getPackage().getName(), pathSpec);
           + ";" + Param.class.getPackage().getName(), pathSpec);
       }
       }
 
 
-    boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
+    httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
+    httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+    setupServlets(httpServer, conf);
+    httpServer.start();
+    httpAddress = httpServer.getConnectorAddress(0);
     if (certSSL) {
     if (certSSL) {
-      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
-      httpsAddress = NetUtils.createSocketAddr(conf.get(
-          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
-
-      Configuration sslConf = new Configuration(false);
-      sslConf.addResource(conf.get(
-          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
-      httpServer.addSslListener(httpsAddress, sslConf, needClientAuth);
+      httpsAddress = httpServer.getConnectorAddress(1);
       // assume same ssl port for all datanodes
       // assume same ssl port for all datanodes
       InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
       InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
         DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
         DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
       httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort
       httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort
         .getPort());
         .getPort());
     }
     }
-    httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
-    httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
-    setupServlets(httpServer, conf);
-    httpServer.start();
-    httpAddress = new InetSocketAddress(bindAddress.getAddress(),
-        httpServer.getPort());
-    if (certSSL) {
-      httpsAddress = new InetSocketAddress(bindAddress.getAddress(),
-          httpServer.getConnectorPort(1));
-    }
   }
   }
   
   
   private Map<String, String> getAuthFilterParams(Configuration conf)
   private Map<String, String> getAuthFilterParams(Configuration conf)

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java

@@ -101,6 +101,16 @@ public interface NameNodeMXBean {
    * @return the percentage of the remaining space on the cluster
    * @return the percentage of the remaining space on the cluster
    */
    */
   public float getPercentRemaining();
   public float getPercentRemaining();
+
+  /**
+   * Returns the amount of cache used by the datanode (in bytes).
+   */
+  public long getCacheUsed();
+
+  /**
+   * Returns the total cache capacity of the datanode (in bytes).
+   */
+  public long getCacheCapacity();
   
   
   /**
   /**
    * Get the total space used by the block pools of this namenode
    * Get the total space used by the block pools of this namenode

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -1301,26 +1302,26 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   private class ServerSideCachePoolIterator 
   private class ServerSideCachePoolIterator 
-      extends BatchedRemoteIterator<String, CachePoolInfo> {
+      extends BatchedRemoteIterator<String, CachePoolEntry> {
 
 
     public ServerSideCachePoolIterator(String prevKey) {
     public ServerSideCachePoolIterator(String prevKey) {
       super(prevKey);
       super(prevKey);
     }
     }
 
 
     @Override
     @Override
-    public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
+    public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
         throws IOException {
         throws IOException {
       return namesystem.listCachePools(prevKey);
       return namesystem.listCachePools(prevKey);
     }
     }
 
 
     @Override
     @Override
-    public String elementToPrevKey(CachePoolInfo element) {
-      return element.getPoolName();
+    public String elementToPrevKey(CachePoolEntry entry) {
+      return entry.getInfo().getPoolName();
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
+  public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
       throws IOException {
     return new ServerSideCachePoolIterator(prevKey);
     return new ServerSideCachePoolIterator(prevKey);
   }
   }

+ 30 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -36,6 +36,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -46,9 +47,11 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -139,6 +142,9 @@ public class NamenodeFsck {
 
 
   private final Configuration conf;
   private final Configuration conf;
   private final PrintWriter out;
   private final PrintWriter out;
+  private List<String> snapshottableDirs = null;
+
+  private BlockPlacementPolicy bpPolicy;
 
 
   /**
   /**
    * Filesystem checker.
    * Filesystem checker.
@@ -162,6 +168,8 @@ public class NamenodeFsck {
     this.totalDatanodes = totalDatanodes;
     this.totalDatanodes = totalDatanodes;
     this.minReplication = minReplication;
     this.minReplication = minReplication;
     this.remoteAddress = remoteAddress;
     this.remoteAddress = remoteAddress;
+    this.bpPolicy = BlockPlacementPolicy.getInstance(conf, null,
+        networktopology);
 
 
     for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
     for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
       String key = it.next();
       String key = it.next();
@@ -178,6 +186,8 @@ public class NamenodeFsck {
       }
       }
       else if (key.equals("startblockafter")) {
       else if (key.equals("startblockafter")) {
         this.currentCookie[0] = pmap.get("startblockafter")[0];
         this.currentCookie[0] = pmap.get("startblockafter")[0];
+      } else if (key.equals("includeSnapshots")) {
+        this.snapshottableDirs = new ArrayList<String>();
       }
       }
     }
     }
   }
   }
@@ -194,6 +204,16 @@ public class NamenodeFsck {
       out.println(msg);
       out.println(msg);
       namenode.getNamesystem().logFsckEvent(path, remoteAddress);
       namenode.getNamesystem().logFsckEvent(path, remoteAddress);
 
 
+      if (snapshottableDirs != null) {
+        SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer()
+            .getSnapshottableDirListing();
+        if (snapshotDirs != null) {
+          for (SnapshottableDirectoryStatus dir : snapshotDirs) {
+            snapshottableDirs.add(dir.getFullPath().toString());
+          }
+        }
+      }
+
       final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
       final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
       if (file != null) {
       if (file != null) {
 
 
@@ -272,6 +292,14 @@ public class NamenodeFsck {
     boolean isOpen = false;
     boolean isOpen = false;
 
 
     if (file.isDir()) {
     if (file.isDir()) {
+      if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
+        String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path
+            + Path.SEPARATOR)
+            + HdfsConstants.DOT_SNAPSHOT_DIR;
+        HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(
+            snapshotPath);
+        check(snapshotPath, snapshotFileInfo, res);
+      }
       byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
       byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
       DirectoryListing thisListing;
       DirectoryListing thisListing;
       if (showFiles) {
       if (showFiles) {
@@ -375,9 +403,8 @@ public class NamenodeFsck {
                     locs.length + " replica(s).");
                     locs.length + " replica(s).");
       }
       }
       // verify block placement policy
       // verify block placement policy
-      BlockPlacementStatus blockPlacementStatus = 
-          BlockPlacementPolicy.getInstance(conf, null, networktopology).
-              verifyBlockPlacement(path, lBlk, targetFileReplication);
+      BlockPlacementStatus blockPlacementStatus = bpPolicy
+          .verifyBlockPlacement(path, lBlk, targetFileReplication);
       if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
       if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
         res.numMisReplicatedBlocks++;
         res.numMisReplicatedBlocks++;
         misReplicatedPerFile++;
         misReplicatedPerFile++;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -335,7 +335,7 @@ class NamenodeJspHelper {
         } else if (openForWrite) {
         } else if (openForWrite) {
           EditLogOutputStream elos = jas.getCurrentStream();
           EditLogOutputStream elos = jas.getCurrentStream();
           if (elos != null) {
           if (elos != null) {
-            out.println(elos.generateHtmlReport());
+            out.println(elos.generateReport());
           } else {
           } else {
             out.println("not currently writing");
             out.println("not currently writing");
           }
           }

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -30,6 +30,7 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Collection;
@@ -214,7 +215,7 @@ public class SecondaryNameNode implements Runnable {
   
   
   /**
   /**
    * Initialize SecondaryNameNode.
    * Initialize SecondaryNameNode.
-   * @param commandLineOpts 
+   * @param commandLineOpts
    */
    */
   private void initialize(final Configuration conf,
   private void initialize(final Configuration conf,
       CommandLineOpts commandLineOpts) throws IOException {
       CommandLineOpts commandLineOpts) throws IOException {
@@ -256,8 +257,15 @@ public class SecondaryNameNode implements Runnable {
 
 
     // initialize the webserver for uploading files.
     // initialize the webserver for uploading files.
     int tmpInfoPort = infoSocAddr.getPort();
     int tmpInfoPort = infoSocAddr.getPort();
+    URI httpEndpoint;
+    try {
+      httpEndpoint = new URI("http://" + NetUtils.getHostPortString(infoSocAddr));
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+
     infoServer = new HttpServer.Builder().setName("secondary")
     infoServer = new HttpServer.Builder().setName("secondary")
-        .setBindAddress(infoBindAddress).setPort(tmpInfoPort)
+        .addEndpoint(httpEndpoint)
         .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
         .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
             new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
@@ -275,7 +283,7 @@ public class SecondaryNameNode implements Runnable {
     LOG.info("Web server init done");
     LOG.info("Web server init done");
 
 
     // The web-server port can be ephemeral... ensure we have the correct info
     // The web-server port can be ephemeral... ensure we have the correct info
-    infoPort = infoServer.getPort();
+    infoPort = infoServer.getConnectorAddress(0).getPort();
 
 
     conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
     conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);

+ 19 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -35,7 +35,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
@@ -62,6 +64,15 @@ public class TransferFsImage {
   public final static String MD5_HEADER = "X-MD5-Digest";
   public final static String MD5_HEADER = "X-MD5-Digest";
   @VisibleForTesting
   @VisibleForTesting
   static int timeout = 0;
   static int timeout = 0;
+  private static URLConnectionFactory connectionFactory;
+  private static boolean isSpnegoEnabled;
+
+  static {
+    Configuration conf = new Configuration();
+    connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
+    isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
+  }
 
 
   private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
   private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
   
   
@@ -250,8 +261,13 @@ public class TransferFsImage {
   public static MD5Hash doGetUrl(URL url, List<File> localPaths,
   public static MD5Hash doGetUrl(URL url, List<File> localPaths,
       Storage dstStorage, boolean getChecksum) throws IOException {
       Storage dstStorage, boolean getChecksum) throws IOException {
     long startTime = Time.monotonicNow();
     long startTime = Time.monotonicNow();
-    HttpURLConnection connection = (HttpURLConnection)
-      SecurityUtil.openSecureHttpConnection(url);
+    HttpURLConnection connection;
+    try {
+      connection = (HttpURLConnection)
+        connectionFactory.openConnection(url, isSpnegoEnabled);
+    } catch (AuthenticationException e) {
+      throw new IOException(e);
+    }
 
 
     if (timeout <= 0) {
     if (timeout <= 0) {
       Configuration conf = new HdfsConfiguration();
       Configuration conf = new HdfsConfiguration();

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
+import org.apache.hadoop.hdfs.server.namenode.Quota;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
+
+/**
+ * The difference of an {@link INodeFile} between two snapshots.
+ */
+public class FileDiff extends
+    AbstractINodeDiff<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
+
+  /** The file size at snapshot creation time. */
+  private final long fileSize;
+
+  FileDiff(Snapshot snapshot, INodeFile file) {
+    super(snapshot, null, null);
+    fileSize = file.computeFileSize();
+  }
+
+  /** Constructor used by FSImage loading */
+  FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
+      FileDiff posteriorDiff, long fileSize) {
+    super(snapshot, snapshotINode, posteriorDiff);
+    this.fileSize = fileSize;
+  }
+
+  /** @return the file size in the snapshot. */
+  public long getFileSize() {
+    return fileSize;
+  }
+  
+  @Override
+  Quota.Counts combinePosteriorAndCollectBlocks(
+      INodeFileWithSnapshot currentINode, FileDiff posterior,
+      BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+    return currentINode.updateQuotaAndCollectBlocks(posterior, collectedBlocks,
+        removedINodes);
+  }
+  
+  @Override
+  public String toString() {
+    return super.toString() + " fileSize=" + fileSize + ", rep="
+        + (snapshotINode == null? "?": snapshotINode.getFileReplication());
+  }
+
+  @Override
+  void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
+    writeSnapshot(out);
+    out.writeLong(fileSize);
+
+    // write snapshotINode
+    if (snapshotINode != null) {
+      out.writeBoolean(true);
+      FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
+    } else {
+      out.writeBoolean(false);
+    }
+  }
+
+  @Override
+  Quota.Counts destroyDiffAndCollectBlocks(INodeFileWithSnapshot currentINode,
+      BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+    return currentINode.updateQuotaAndCollectBlocks(this, collectedBlocks,
+        removedINodes);
+  }
+}

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
+
+/** A list of FileDiffs for storing snapshot data. */
+public class FileDiffList extends
+    AbstractINodeDiffList<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
+  
+  @Override
+  FileDiff createDiff(Snapshot snapshot, INodeFileWithSnapshot file) {
+    return new FileDiff(snapshot, file);
+  }
+  
+  @Override
+  INodeFileAttributes createSnapshotCopy(INodeFileWithSnapshot currentINode) {
+    return new INodeFileAttributes.SnapshotCopy(currentINode);
+  }
+}

+ 0 - 227
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java

@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.snapshot;
-
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
-import org.apache.hadoop.hdfs.server.namenode.Quota;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
-
-/**
- * An interface for {@link INodeFile} to support snapshot.
- */
-@InterfaceAudience.Private
-public interface FileWithSnapshot {
-  /**
-   * The difference of an {@link INodeFile} between two snapshots.
-   */
-  public static class FileDiff extends AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
-    /** The file size at snapshot creation time. */
-    private final long fileSize;
-
-    private FileDiff(Snapshot snapshot, INodeFile file) {
-      super(snapshot, null, null);
-      fileSize = file.computeFileSize();
-    }
-
-    /** Constructor used by FSImage loading */
-    FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
-        FileDiff posteriorDiff, long fileSize) {
-      super(snapshot, snapshotINode, posteriorDiff);
-      this.fileSize = fileSize;
-    }
-
-    /** @return the file size in the snapshot. */
-    public long getFileSize() {
-      return fileSize;
-    }
-
-    private static Quota.Counts updateQuotaAndCollectBlocks(
-        INodeFile currentINode, FileDiff removed,
-        BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
-      FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
-      long oldDiskspace = currentINode.diskspaceConsumed();
-      if (removed.snapshotINode != null) {
-        short replication = removed.snapshotINode.getFileReplication();
-        short currentRepl = currentINode.getBlockReplication();
-        if (currentRepl == 0) {
-          oldDiskspace = currentINode.computeFileSize(true, true) * replication;
-        } else if (replication > currentRepl) {  
-          oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
-              * replication;
-        }
-      }
-      
-      Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
-      
-      long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
-      return Quota.Counts.newInstance(0, dsDelta);
-    }
-    
-    @Override
-    Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
-        FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
-        final List<INode> removedINodes) {
-      return updateQuotaAndCollectBlocks(currentINode, posterior,
-          collectedBlocks, removedINodes);
-    }
-    
-    @Override
-    public String toString() {
-      return super.toString() + " fileSize=" + fileSize + ", rep="
-          + (snapshotINode == null? "?": snapshotINode.getFileReplication());
-    }
-
-    @Override
-    void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
-      writeSnapshot(out);
-      out.writeLong(fileSize);
-
-      // write snapshotINode
-      if (snapshotINode != null) {
-        out.writeBoolean(true);
-        FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
-      } else {
-        out.writeBoolean(false);
-      }
-    }
-
-    @Override
-    Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
-        BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
-      return updateQuotaAndCollectBlocks(currentINode, this,
-          collectedBlocks, removedINodes);
-    }
-  }
-
-  /** A list of FileDiffs for storing snapshot data. */
-  public static class FileDiffList
-      extends AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
-
-    @Override
-    FileDiff createDiff(Snapshot snapshot, INodeFile file) {
-      return new FileDiff(snapshot, file);
-    }
-    
-    @Override
-    INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
-      return new INodeFileAttributes.SnapshotCopy(currentINode);
-    }
-  }
-
-  /** @return the {@link INodeFile} view of this object. */
-  public INodeFile asINodeFile();
-
-  /** @return the file diff list. */
-  public FileDiffList getDiffs();
-
-  /** Is the current file deleted? */
-  public boolean isCurrentFileDeleted();
-  
-  /** Delete the file from the current tree */
-  public void deleteCurrentFile();
-
-  /** Utility methods for the classes which implement the interface. */
-  public static class Util {
-    /** 
-     * @return block replication, which is the max file replication among
-     *         the file and the diff list.
-     */
-    public static short getBlockReplication(final FileWithSnapshot file) {
-      short max = file.isCurrentFileDeleted()? 0
-          : file.asINodeFile().getFileReplication();
-      for(FileDiff d : file.getDiffs()) {
-        if (d.snapshotINode != null) {
-          final short replication = d.snapshotINode.getFileReplication();
-          if (replication > max) {
-            max = replication;
-          }
-        }
-      }
-      return max;
-    }
-
-    /**
-     * If some blocks at the end of the block list no longer belongs to
-     * any inode, collect them and update the block list.
-     */
-    static void collectBlocksAndClear(final FileWithSnapshot file,
-        final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
-      // check if everything is deleted.
-      if (file.isCurrentFileDeleted()
-          && file.getDiffs().asList().isEmpty()) {
-        file.asINodeFile().destroyAndCollectBlocks(info, removedINodes);
-        return;
-      }
-
-      // find max file size.
-      final long max;
-      if (file.isCurrentFileDeleted()) {
-        final FileDiff last = file.getDiffs().getLast();
-        max = last == null? 0: last.fileSize;
-      } else { 
-        max = file.asINodeFile().computeFileSize();
-      }
-
-      collectBlocksBeyondMax(file, max, info);
-    }
-
-    private static void collectBlocksBeyondMax(final FileWithSnapshot file,
-        final long max, final BlocksMapUpdateInfo collectedBlocks) {
-      final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
-      if (oldBlocks != null) {
-        //find the minimum n such that the size of the first n blocks > max
-        int n = 0;
-        for(long size = 0; n < oldBlocks.length && max > size; n++) {
-          size += oldBlocks[n].getNumBytes();
-        }
-        
-        // starting from block n, the data is beyond max.
-        if (n < oldBlocks.length) {
-          // resize the array.  
-          final BlockInfo[] newBlocks;
-          if (n == 0) {
-            newBlocks = null;
-          } else {
-            newBlocks = new BlockInfo[n];
-            System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
-          }
-          
-          // set new blocks
-          file.asINodeFile().setBlocks(newBlocks);
-
-          // collect the blocks beyond max.  
-          if (collectedBlocks != null) {
-            for(; n < oldBlocks.length; n++) {
-              collectedBlocks.addDeleteBlock(oldBlocks[n]);
-            }
-          }
-        }
-      }
-    }
-  }
-}

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java

@@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
           parentPath.remove(parentPath.size() - 1);
           parentPath.remove(parentPath.size() - 1);
         }
         }
       }
       }
-    } else if (node.isFile() && node.asFile() instanceof FileWithSnapshot) {
-      FileWithSnapshot file = (FileWithSnapshot) node.asFile();
+    } else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) {
+      INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile();
       Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
       Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
           : diffReport.to;
           : diffReport.to;
       Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
       Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
@@ -441,7 +441,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
       boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot,
       boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot,
           laterSnapshot);
           laterSnapshot);
       if (change) {
       if (change) {
-        diffReport.addFileDiff(file.asINodeFile(), relativePath);
+        diffReport.addFileDiff(file, relativePath);
       }
       }
     }
     }
   }
   }

+ 12 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java

@@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeMap;
 import org.apache.hadoop.hdfs.server.namenode.INodeMap;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
@@ -55,7 +54,7 @@ import com.google.common.base.Preconditions;
  * storing snapshot data. When there are modifications to the directory, the old
  * storing snapshot data. When there are modifications to the directory, the old
  * data is stored in the latest snapshot, if there is any.
  * data is stored in the latest snapshot, if there is any.
  */
  */
-public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
+public class INodeDirectoryWithSnapshot extends INodeDirectory {
   /**
   /**
    * The difference between the current state and a previous snapshot
    * The difference between the current state and a previous snapshot
    * of the children list of an INodeDirectory.
    * of the children list of an INodeDirectory.
@@ -185,14 +184,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
         INode dnode = deleted.get(d);
         INode dnode = deleted.get(d);
         if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) {
         if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) {
           fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
           fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
-          if (cnode.isSymlink() && dnode.isSymlink()) {
-            dList.add(new DiffReportEntry(DiffType.MODIFY, fullPath));
-          } else {
-            // must be the case: delete first and then create an inode with the
-            // same name
-            cList.add(new DiffReportEntry(DiffType.CREATE, fullPath));
-            dList.add(new DiffReportEntry(DiffType.DELETE, fullPath));
-          }
+          // must be the case: delete first and then create an inode with the
+          // same name
+          cList.add(new DiffReportEntry(DiffType.CREATE, fullPath));
+          dList.add(new DiffReportEntry(DiffType.DELETE, fullPath));
           c++;
           c++;
           d++;
           d++;
         } else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) {
         } else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) {
@@ -490,7 +485,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
 
 
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
       DirectoryDiffList diffs) {
       DirectoryDiffList diffs) {
-    super(that, adopt, that.getQuotaCounts());
+    super(that, adopt, true);
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
   }
   }
 
 
@@ -775,8 +770,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
         removedINodes, priorDeleted, countDiffChange));
         removedINodes, priorDeleted, countDiffChange));
     
     
     if (isQuotaSet()) {
     if (isQuotaSet()) {
-      this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
-          -counts.get(Quota.DISKSPACE));
+      getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
+          -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
     }
     }
     return counts;
     return counts;
   }
   }
@@ -809,10 +804,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
         // For DstReference node, since the node is not in the created list of
         // For DstReference node, since the node is not in the created list of
         // prior, we should treat it as regular file/dir
         // prior, we should treat it as regular file/dir
       } else if (topNode.isFile()
       } else if (topNode.isFile()
-          && topNode.asFile() instanceof FileWithSnapshot) {
-        FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
-        counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
-            topNode.asFile(), collectedBlocks, removedINodes, countDiffChange));
+          && topNode.asFile() instanceof INodeFileWithSnapshot) {
+        INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile();
+        counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs,
+            collectedBlocks, removedINodes, countDiffChange));
       } else if (topNode.isDirectory()) {
       } else if (topNode.isDirectory()) {
         INodeDirectory dir = topNode.asDirectory();
         INodeDirectory dir = topNode.asDirectory();
         ChildrenDiff priorChildrenDiff = null;
         ChildrenDiff priorChildrenDiff = null;

+ 104 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java

@@ -21,6 +21,7 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
@@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.server.namenode.Quota;
  * Represent an {@link INodeFile} that is snapshotted.
  * Represent an {@link INodeFile} that is snapshotted.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class INodeFileWithSnapshot extends INodeFile
-    implements FileWithSnapshot {
+public class INodeFileWithSnapshot extends INodeFile {
   private final FileDiffList diffs;
   private final FileDiffList diffs;
   private boolean isCurrentFileDeleted = false;
   private boolean isCurrentFileDeleted = false;
 
 
   public INodeFileWithSnapshot(INodeFile f) {
   public INodeFileWithSnapshot(INodeFile f) {
-    this(f, f instanceof FileWithSnapshot?
-        ((FileWithSnapshot)f).getDiffs(): null);
+    this(f, f instanceof INodeFileWithSnapshot ? 
+        ((INodeFileWithSnapshot) f).getDiffs() : null);
   }
   }
 
 
   public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) {
   public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) {
@@ -46,12 +46,12 @@ public class INodeFileWithSnapshot extends INodeFile
     this.diffs = diffs != null? diffs: new FileDiffList();
     this.diffs = diffs != null? diffs: new FileDiffList();
   }
   }
 
 
-  @Override
+  /** Is the current file deleted? */
   public boolean isCurrentFileDeleted() {
   public boolean isCurrentFileDeleted() {
     return isCurrentFileDeleted;
     return isCurrentFileDeleted;
   }
   }
   
   
-  @Override
+  /** Delete the file from the current tree */
   public void deleteCurrentFile() {
   public void deleteCurrentFile() {
     isCurrentFileDeleted = true;
     isCurrentFileDeleted = true;
   }
   }
@@ -70,12 +70,7 @@ public class INodeFileWithSnapshot extends INodeFile
     return this;
     return this;
   }
   }
 
 
-  @Override
-  public INodeFile asINodeFile() {
-    return this;
-  }
-
-  @Override
+  /** @return the file diff list. */
   public FileDiffList getDiffs() {
   public FileDiffList getDiffs() {
     return diffs;
     return diffs;
   }
   }
@@ -90,7 +85,7 @@ public class INodeFileWithSnapshot extends INodeFile
         recordModification(prior, null);
         recordModification(prior, null);
         deleteCurrentFile();
         deleteCurrentFile();
       }
       }
-      Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
+      this.collectBlocksAndClear(collectedBlocks, removedINodes);
       return Quota.Counts.newInstance();
       return Quota.Counts.newInstance();
     } else { // delete a snapshot
     } else { // delete a snapshot
       prior = getDiffs().updatePrior(snapshot, prior);
       prior = getDiffs().updatePrior(snapshot, prior);
@@ -104,4 +99,100 @@ public class INodeFileWithSnapshot extends INodeFile
     return super.toDetailString()
     return super.toDetailString()
         + (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
         + (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
   }
   }
+  
+  /** 
+   * @return block replication, which is the max file replication among
+   *         the file and the diff list.
+   */
+  @Override
+  public short getBlockReplication() {
+    short max = isCurrentFileDeleted() ? 0 : getFileReplication();
+    for(FileDiff d : getDiffs()) {
+      if (d.snapshotINode != null) {
+        final short replication = d.snapshotINode.getFileReplication();
+        if (replication > max) {
+          max = replication;
+        }
+      }
+    }
+    return max;
+  }
+  
+  /**
+   * If some blocks at the end of the block list no longer belongs to
+   * any inode, collect them and update the block list.
+   */
+  void collectBlocksAndClear(final BlocksMapUpdateInfo info,
+      final List<INode> removedINodes) {
+    // check if everything is deleted.
+    if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
+      destroyAndCollectBlocks(info, removedINodes);
+      return;
+    }
+
+    // find max file size.
+    final long max;
+    if (isCurrentFileDeleted()) {
+      final FileDiff last = getDiffs().getLast();
+      max = last == null? 0: last.getFileSize();
+    } else { 
+      max = computeFileSize();
+    }
+
+    collectBlocksBeyondMax(max, info);
+  }
+
+  private void collectBlocksBeyondMax(final long max,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    final BlockInfo[] oldBlocks = getBlocks();
+    if (oldBlocks != null) {
+      //find the minimum n such that the size of the first n blocks > max
+      int n = 0;
+      for(long size = 0; n < oldBlocks.length && max > size; n++) {
+        size += oldBlocks[n].getNumBytes();
+      }
+      
+      // starting from block n, the data is beyond max.
+      if (n < oldBlocks.length) {
+        // resize the array.  
+        final BlockInfo[] newBlocks;
+        if (n == 0) {
+          newBlocks = null;
+        } else {
+          newBlocks = new BlockInfo[n];
+          System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
+        }
+        
+        // set new blocks
+        setBlocks(newBlocks);
+
+        // collect the blocks beyond max.  
+        if (collectedBlocks != null) {
+          for(; n < oldBlocks.length; n++) {
+            collectedBlocks.addDeleteBlock(oldBlocks[n]);
+          }
+        }
+      }
+    }
+  }
+  
+  Quota.Counts updateQuotaAndCollectBlocks(FileDiff removed,
+      BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+    long oldDiskspace = this.diskspaceConsumed();
+    if (removed.snapshotINode != null) {
+      short replication = removed.snapshotINode.getFileReplication();
+      short currentRepl = getBlockReplication();
+      if (currentRepl == 0) {
+        oldDiskspace = computeFileSize(true, true) * replication;
+      } else if (replication > currentRepl) {  
+        oldDiskspace = oldDiskspace / getBlockReplication()
+            * replication;
+      }
+    }
+    
+    this.collectBlocksAndClear(collectedBlocks, removedINodes);
+    
+    long dsDelta = oldDiskspace - diskspaceConsumed();
+    return Quota.Counts.newInstance(0, dsDelta);
+  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java

@@ -136,7 +136,7 @@ public class Snapshot implements Comparable<byte[]> {
   /** The root directory of the snapshot. */
   /** The root directory of the snapshot. */
   static public class Root extends INodeDirectory {
   static public class Root extends INodeDirectory {
     Root(INodeDirectory other) {
     Root(INodeDirectory other) {
-      super(other, false);
+      super(other, false, false);
     }
     }
 
 
     @Override
     @Override

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java

@@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
 import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
@@ -99,8 +97,8 @@ public class SnapshotFSImageFormat {
   
   
   public static void saveFileDiffList(final INodeFile file,
   public static void saveFileDiffList(final INodeFile file,
       final DataOutput out) throws IOException {
       final DataOutput out) throws IOException {
-    saveINodeDiffs(file instanceof FileWithSnapshot?
-        ((FileWithSnapshot)file).getDiffs(): null, out, null);
+    saveINodeDiffs(file instanceof INodeFileWithSnapshot?
+        ((INodeFileWithSnapshot) file).getDiffs(): null, out, null);
   }
   }
 
 
   public static FileDiffList loadFileDiffList(DataInput in,
   public static FileDiffList loadFileDiffList(DataInput in,

+ 72 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -29,11 +29,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -131,7 +133,8 @@ public class CacheAdmin extends Configured implements Tool {
     @Override
     @Override
     public String getShortUsage() {
     public String getShortUsage() {
       return "[" + getName() +
       return "[" + getName() +
-          " -path <path> -replication <replication> -pool <pool-name>]\n";
+          " -path <path> -pool <pool-name> " +
+          "[-replication <replication>] [-ttl <time-to-live>]]\n";
     }
     }
 
 
     @Override
     @Override
@@ -139,11 +142,15 @@ public class CacheAdmin extends Configured implements Tool {
       TableListing listing = getOptionDescriptionListing();
       TableListing listing = getOptionDescriptionListing();
       listing.addRow("<path>", "A path to cache. The path can be " +
       listing.addRow("<path>", "A path to cache. The path can be " +
           "a directory or a file.");
           "a directory or a file.");
-      listing.addRow("<replication>", "The cache replication factor to use. " +
-          "Defaults to 1.");
       listing.addRow("<pool-name>", "The pool to which the directive will be " +
       listing.addRow("<pool-name>", "The pool to which the directive will be " +
           "added. You must have write permission on the cache pool "
           "added. You must have write permission on the cache pool "
           + "in order to add new directives.");
           + "in order to add new directives.");
+      listing.addRow("<replication>", "The cache replication factor to use. " +
+          "Defaults to 1.");
+      listing.addRow("<time-to-live>", "How long the directive is " +
+          "valid. Can be specified in minutes, hours, and days via e.g. " +
+          "30m, 4h, 2d. Valid units are [smhd]." +
+          " If unspecified, the directive never expires.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
         "Add a new cache directive.\n\n" +
         "Add a new cache directive.\n\n" +
         listing.toString();
         listing.toString();
@@ -151,33 +158,48 @@ public class CacheAdmin extends Configured implements Tool {
 
 
     @Override
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
     public int run(Configuration conf, List<String> args) throws IOException {
+      CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder();
+
       String path = StringUtils.popOptionWithArgument("-path", args);
       String path = StringUtils.popOptionWithArgument("-path", args);
       if (path == null) {
       if (path == null) {
         System.err.println("You must specify a path with -path.");
         System.err.println("You must specify a path with -path.");
         return 1;
         return 1;
       }
       }
-      short replication = 1;
-      String replicationString =
-          StringUtils.popOptionWithArgument("-replication", args);
-      if (replicationString != null) {
-        replication = Short.parseShort(replicationString);
-      }
+      builder.setPath(new Path(path));
+
       String poolName = StringUtils.popOptionWithArgument("-pool", args);
       String poolName = StringUtils.popOptionWithArgument("-pool", args);
       if (poolName == null) {
       if (poolName == null) {
         System.err.println("You must specify a pool name with -pool.");
         System.err.println("You must specify a pool name with -pool.");
         return 1;
         return 1;
       }
       }
+      builder.setPool(poolName);
+
+      String replicationString =
+          StringUtils.popOptionWithArgument("-replication", args);
+      if (replicationString != null) {
+        Short replication = Short.parseShort(replicationString);
+        builder.setReplication(replication);
+      }
+
+      String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
+      if (ttlString != null) {
+        try {
+          long ttl = DFSUtil.parseRelativeTime(ttlString);
+          builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl));
+        } catch (IOException e) {
+          System.err.println(
+              "Error while parsing ttl value: " + e.getMessage());
+          return 1;
+        }
+      }
+
       if (!args.isEmpty()) {
       if (!args.isEmpty()) {
         System.err.println("Can't understand argument: " + args.get(0));
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
         return 1;
       }
       }
         
         
       DistributedFileSystem dfs = getDFS(conf);
       DistributedFileSystem dfs = getDFS(conf);
-      CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().
-          setPath(new Path(path)).
-          setReplication(replication).
-          setPool(poolName).
-          build();
+      CacheDirectiveInfo directive = builder.build();
       try {
       try {
         long id = dfs.addCacheDirective(directive);
         long id = dfs.addCacheDirective(directive);
         System.out.println("Added cache directive " + id);
         System.out.println("Added cache directive " + id);
@@ -260,7 +282,7 @@ public class CacheAdmin extends Configured implements Tool {
     public String getShortUsage() {
     public String getShortUsage() {
       return "[" + getName() +
       return "[" + getName() +
           " -id <id> [-path <path>] [-replication <replication>] " +
           " -id <id> [-path <path>] [-replication <replication>] " +
-          "[-pool <pool-name>] ]\n";
+          "[-pool <pool-name>] [-ttl <time-to-live>]]\n";
     }
     }
 
 
     @Override
     @Override
@@ -274,6 +296,10 @@ public class CacheAdmin extends Configured implements Tool {
       listing.addRow("<pool-name>", "The pool to which the directive will be " +
       listing.addRow("<pool-name>", "The pool to which the directive will be " +
           "added. You must have write permission on the cache pool "
           "added. You must have write permission on the cache pool "
           + "in order to move a directive into it. (optional)");
           + "in order to move a directive into it. (optional)");
+      listing.addRow("<time-to-live>", "How long the directive is " +
+          "valid. Can be specified in minutes, hours, and days via e.g. " +
+          "30m, 4h, 2d. Valid units are [smhd]." +
+          " If unspecified, the directive never expires.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
         "Modify a cache directive.\n\n" +
         "Modify a cache directive.\n\n" +
         listing.toString();
         listing.toString();
@@ -307,6 +333,19 @@ public class CacheAdmin extends Configured implements Tool {
         builder.setPool(poolName);
         builder.setPool(poolName);
         modified = true;
         modified = true;
       }
       }
+      String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
+      if (ttlString != null) {
+        long ttl;
+        try {
+          ttl = DFSUtil.parseRelativeTime(ttlString);
+        } catch (IOException e) {
+          System.err.println(
+              "Error while parsing ttl value: " + e.getMessage());
+          return 1;
+        }
+        builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl));
+        modified = true;
+      }
       if (!args.isEmpty()) {
       if (!args.isEmpty()) {
         System.err.println("Can't understand argument: " + args.get(0));
         System.err.println("Can't understand argument: " + args.get(0));
         System.err.println("Usage is " + getShortUsage());
         System.err.println("Usage is " + getShortUsage());
@@ -434,7 +473,8 @@ public class CacheAdmin extends Configured implements Tool {
       TableListing.Builder tableBuilder = new TableListing.Builder().
       TableListing.Builder tableBuilder = new TableListing.Builder().
           addField("ID", Justification.RIGHT).
           addField("ID", Justification.RIGHT).
           addField("POOL", Justification.LEFT).
           addField("POOL", Justification.LEFT).
-          addField("REPLICATION", Justification.RIGHT).
+          addField("REPL", Justification.RIGHT).
+          addField("EXPIRY", Justification.LEFT).
           addField("PATH", Justification.LEFT);
           addField("PATH", Justification.LEFT);
       if (printStats) {
       if (printStats) {
         tableBuilder.addField("NEEDED", Justification.RIGHT).
         tableBuilder.addField("NEEDED", Justification.RIGHT).
@@ -455,6 +495,14 @@ public class CacheAdmin extends Configured implements Tool {
         row.add("" + directive.getId());
         row.add("" + directive.getId());
         row.add(directive.getPool());
         row.add(directive.getPool());
         row.add("" + directive.getReplication());
         row.add("" + directive.getReplication());
+        String expiry;
+        if (directive.getExpiration().getMillis() ==
+            CacheDirectiveInfo.Expiration.EXPIRY_NEVER) {
+          expiry = "never";
+        } else {
+          expiry = directive.getExpiration().toString();
+        }
+        row.add(expiry);
         row.add(directive.getPath().toUri().getPath());
         row.add(directive.getPath().toUri().getPath());
         if (printStats) {
         if (printStats) {
           row.add("" + stats.getBytesNeeded());
           row.add("" + stats.getBytesNeeded());
@@ -755,9 +803,10 @@ public class CacheAdmin extends Configured implements Tool {
           build();
           build();
       int numResults = 0;
       int numResults = 0;
       try {
       try {
-        RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
         while (iter.hasNext()) {
         while (iter.hasNext()) {
-          CachePoolInfo info = iter.next();
+          CachePoolEntry entry = iter.next();
+          CachePoolInfo info = entry.getInfo();
           String[] row = new String[5];
           String[] row = new String[5];
           if (name == null || info.getPoolName().equals(name)) {
           if (name == null || info.getPoolName().equals(name)) {
             row[0] = info.getPoolName();
             row[0] = info.getPoolName();
@@ -822,14 +871,15 @@ public class CacheAdmin extends Configured implements Tool {
         return 0;
         return 0;
       }
       }
       String commandName = args.get(0);
       String commandName = args.get(0);
-      Command command = determineCommand(commandName);
+      // prepend a dash to match against the command names
+      Command command = determineCommand("-"+commandName);
       if (command == null) {
       if (command == null) {
         System.err.print("Sorry, I don't know the command '" +
         System.err.print("Sorry, I don't know the command '" +
           commandName + "'.\n");
           commandName + "'.\n");
-        System.err.print("Valid command names are:\n");
+        System.err.print("Valid help command names are:\n");
         String separator = "";
         String separator = "";
         for (Command c : COMMANDS) {
         for (Command c : COMMANDS) {
-          System.err.print(separator + c.getName());
+          System.err.print(separator + c.getName().substring(1));
           separator = ", ";
           separator = ", ";
         }
         }
         System.err.print("\n");
         System.err.print("\n");

+ 32 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -36,9 +36,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
@@ -82,18 +83,28 @@ public class DFSck extends Configured implements Tool {
       + "\t-delete\tdelete corrupted files\n"
       + "\t-delete\tdelete corrupted files\n"
       + "\t-files\tprint out files being checked\n"
       + "\t-files\tprint out files being checked\n"
       + "\t-openforwrite\tprint out files opened for write\n"
       + "\t-openforwrite\tprint out files opened for write\n"
+      + "\t-includeSnapshots\tinclude snapshot data if the given path"
+      + " indicates a snapshottable directory or there are "
+      + "snapshottable directories under it\n"
       + "\t-list-corruptfileblocks\tprint out list of missing "
       + "\t-list-corruptfileblocks\tprint out list of missing "
       + "blocks and files they belong to\n"
       + "blocks and files they belong to\n"
       + "\t-blocks\tprint out block report\n"
       + "\t-blocks\tprint out block report\n"
       + "\t-locations\tprint out locations for every block\n"
       + "\t-locations\tprint out locations for every block\n"
-      + "\t-racks\tprint out network topology for data-node locations\n"
-      + "\t\tBy default fsck ignores files opened for write, "
+      + "\t-racks\tprint out network topology for data-node locations\n\n"
+      + "Please Note:\n"
+      + "\t1. By default fsck ignores files opened for write, "
       + "use -openforwrite to report such files. They are usually "
       + "use -openforwrite to report such files. They are usually "
       + " tagged CORRUPT or HEALTHY depending on their block "
       + " tagged CORRUPT or HEALTHY depending on their block "
-      + "allocation status";
+      + "allocation status\n"
+      + "\t2. Option -includeSnapshots should not be used for comparing stats,"
+      + " should be used only for HEALTH check, as this may contain duplicates"
+      + " if the same file present in both original fs tree "
+      + "and inside snapshots.";
   
   
   private final UserGroupInformation ugi;
   private final UserGroupInformation ugi;
   private final PrintStream out;
   private final PrintStream out;
+  private final URLConnectionFactory connectionFactory;
+  private final boolean isSpnegoEnabled;
 
 
   /**
   /**
    * Filesystem checker.
    * Filesystem checker.
@@ -107,6 +118,9 @@ public class DFSck extends Configured implements Tool {
     super(conf);
     super(conf);
     this.ugi = UserGroupInformation.getCurrentUser();
     this.ugi = UserGroupInformation.getCurrentUser();
     this.out = out;
     this.out = out;
+    this.connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
+    this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
   }
   }
 
 
   /**
   /**
@@ -158,7 +172,12 @@ public class DFSck extends Configured implements Tool {
         url.append("&startblockafter=").append(String.valueOf(cookie));
         url.append("&startblockafter=").append(String.valueOf(cookie));
       }
       }
       URL path = new URL(url.toString());
       URL path = new URL(url.toString());
-      URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
+      URLConnection connection;
+      try {
+        connection = connectionFactory.openConnection(path, isSpnegoEnabled);
+      } catch (AuthenticationException e) {
+        throw new IOException(e);
+      }
       InputStream stream = connection.getInputStream();
       InputStream stream = connection.getInputStream();
       BufferedReader input = new BufferedReader(new InputStreamReader(
       BufferedReader input = new BufferedReader(new InputStreamReader(
           stream, "UTF-8"));
           stream, "UTF-8"));
@@ -255,6 +274,8 @@ public class DFSck extends Configured implements Tool {
       else if (args[idx].equals("-list-corruptfileblocks")) {
       else if (args[idx].equals("-list-corruptfileblocks")) {
         url.append("&listcorruptfileblocks=1");
         url.append("&listcorruptfileblocks=1");
         doListCorruptFileBlocks = true;
         doListCorruptFileBlocks = true;
+      } else if (args[idx].equals("-includeSnapshots")) {
+        url.append("&includeSnapshots=1");
       } else if (!args[idx].startsWith("-")) {
       } else if (!args[idx].startsWith("-")) {
         if (null == dir) {
         if (null == dir) {
           dir = args[idx];
           dir = args[idx];
@@ -278,7 +299,12 @@ public class DFSck extends Configured implements Tool {
       return listCorruptFileBlocks(dir, url.toString());
       return listCorruptFileBlocks(dir, url.toString());
     }
     }
     URL path = new URL(url.toString());
     URL path = new URL(url.toString());
-    URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
+    URLConnection connection;
+    try {
+      connection = connectionFactory.openConnection(path, isSpnegoEnabled);
+    } catch (AuthenticationException e) {
+      throw new IOException(e);
+    }
     InputStream stream = connection.getInputStream();
     InputStream stream = connection.getInputStream();
     BufferedReader input = new BufferedReader(new InputStreamReader(
     BufferedReader input = new BufferedReader(new InputStreamReader(
                                               stream, "UTF-8"));
                                               stream, "UTF-8"));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -145,7 +145,7 @@ public class DelegationTokenFetcher {
     // default to using the local file system
     // default to using the local file system
     FileSystem local = FileSystem.getLocal(conf);
     FileSystem local = FileSystem.getLocal(conf);
     final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
     final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
-    final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+    final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
 
 
     // Login the current user
     // Login the current user
     UserGroupInformation.getCurrentUser().doAs(
     UserGroupInformation.getCurrentUser().doAs(

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java

@@ -176,10 +176,9 @@ public class HftpFileSystem extends FileSystem
    * Initialize connectionFactory and tokenAspect. This function is intended to
    * Initialize connectionFactory and tokenAspect. This function is intended to
    * be overridden by HsFtpFileSystem.
    * be overridden by HsFtpFileSystem.
    */
    */
-  protected void initConnectionFactoryAndTokenAspect(Configuration conf)
+  protected void initTokenAspect(Configuration conf)
       throws IOException {
       throws IOException {
     tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
     tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
-    connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
   }
   }
 
 
   @Override
   @Override
@@ -187,6 +186,8 @@ public class HftpFileSystem extends FileSystem
   throws IOException {
   throws IOException {
     super.initialize(name, conf);
     super.initialize(name, conf);
     setConf(conf);
     setConf(conf);
+    this.connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
     this.ugi = UserGroupInformation.getCurrentUser();
     this.ugi = UserGroupInformation.getCurrentUser();
     this.nnUri = getNamenodeUri(name);
     this.nnUri = getNamenodeUri(name);
 
 
@@ -197,7 +198,7 @@ public class HftpFileSystem extends FileSystem
       throw new IllegalArgumentException(e);
       throw new IllegalArgumentException(e);
     }
     }
 
 
-    initConnectionFactoryAndTokenAspect(conf);
+    initTokenAspect(conf);
     if (UserGroupInformation.isSecurityEnabled()) {
     if (UserGroupInformation.isSecurityEnabled()) {
       tokenAspect.initDelegationToken(ugi);
       tokenAspect.initDelegationToken(ugi);
     }
     }
@@ -338,7 +339,7 @@ public class HftpFileSystem extends FileSystem
   }
   }
 
 
   static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
   static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
-    URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+    URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
 
 
     RangeHeaderUrlOpener(final URL url) {
     RangeHeaderUrlOpener(final URL url) {
       super(url);
       super(url);

+ 1 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.security.GeneralSecurityException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -61,18 +60,8 @@ public class HsftpFileSystem extends HftpFileSystem {
   }
   }
 
 
   @Override
   @Override
-  protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException {
+  protected void initTokenAspect(Configuration conf) throws IOException {
     tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
     tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
-
-    connectionFactory = new URLConnectionFactory(
-        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
-    try {
-      connectionFactory.setConnConfigurator(URLConnectionFactory
-          .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
-              conf));
-    } catch (GeneralSecurityException e) {
-      throw new IOException(e);
-    }
   }
   }
 
 
   @Override
   @Override

+ 0 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java

@@ -17,10 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 
 
@@ -44,20 +40,6 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
     tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
     tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
   }
   }
 
 
-  @Override
-  protected void initializeConnectionFactory(Configuration conf)
-      throws IOException {
-    connectionFactory = new URLConnectionFactory(
-        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
-    try {
-      connectionFactory.setConnConfigurator(URLConnectionFactory
-          .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
-              conf));
-    } catch (GeneralSecurityException e) {
-      throw new IOException(e);
-    }
-  }
-
   @Override
   @Override
   protected int getDefaultPort() {
   protected int getDefaultPort() {
     return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
     return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,

+ 36 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java

@@ -39,6 +39,8 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.ssl.SSLFactory;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
 /**
  * Utilities for handling URLs
  * Utilities for handling URLs
  */
  */
@@ -54,26 +56,50 @@ public class URLConnectionFactory {
    * Timeout for socket connects and reads
    * Timeout for socket connects and reads
    */
    */
   public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
   public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+  private final ConnectionConfigurator connConfigurator;
 
 
-  public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory(
-      DEFAULT_SOCKET_TIMEOUT);
-
-  private int socketTimeout;
-
-  /** Configure connections for AuthenticatedURL */
-  private ConnectionConfigurator connConfigurator = new ConnectionConfigurator() {
+  private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() {
     @Override
     @Override
     public HttpURLConnection configure(HttpURLConnection conn)
     public HttpURLConnection configure(HttpURLConnection conn)
         throws IOException {
         throws IOException {
-      URLConnectionFactory.setTimeouts(conn, socketTimeout);
+      URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
       return conn;
       return conn;
     }
     }
   };
   };
 
 
+  /**
+   * The URLConnectionFactory that sets the default timeout and it only trusts
+   * Java's SSL certificates.
+   */
+  public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory(
+      DEFAULT_TIMEOUT_CONN_CONFIGURATOR);
+
+  /**
+   * Construct a new URLConnectionFactory based on the configuration. It will
+   * try to load SSL certificates when it is specified.
+   */
+  public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
+    ConnectionConfigurator conn = null;
+    try {
+      conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
+    } catch (Exception e) {
+      LOG.debug(
+          "Cannot load customized ssl related configuration. Fallback to system-generic settings.",
+          e);
+      conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
+    }
+    return new URLConnectionFactory(conn);
+  }
+
+  @VisibleForTesting
+  URLConnectionFactory(ConnectionConfigurator connConfigurator) {
+    this.connConfigurator = connConfigurator;
+  }
+
   /**
   /**
    * Create a new ConnectionConfigurator for SSL connections
    * Create a new ConnectionConfigurator for SSL connections
    */
    */
-  static ConnectionConfigurator newSslConnConfigurator(final int timeout,
+  private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
       Configuration conf) throws IOException, GeneralSecurityException {
       Configuration conf) throws IOException, GeneralSecurityException {
     final SSLFactory factory;
     final SSLFactory factory;
     final SSLSocketFactory sf;
     final SSLSocketFactory sf;
@@ -99,10 +125,6 @@ public class URLConnectionFactory {
     };
     };
   }
   }
 
 
-  public URLConnectionFactory(int socketTimeout) {
-    this.socketTimeout = socketTimeout;
-  }
-
   /**
   /**
    * Opens a url with read and connect timeouts
    * Opens a url with read and connect timeouts
    *
    *
@@ -153,14 +175,6 @@ public class URLConnectionFactory {
     }
     }
   }
   }
 
 
-  public ConnectionConfigurator getConnConfigurator() {
-    return connConfigurator;
-  }
-
-  public void setConnConfigurator(ConnectionConfigurator connConfigurator) {
-    this.connConfigurator = connConfigurator;
-  }
-
   /**
   /**
    * Sets timeout parameters on the given URLConnection.
    * Sets timeout parameters on the given URLConnection.
    * 
    * 
@@ -169,7 +183,7 @@ public class URLConnectionFactory {
    * @param socketTimeout
    * @param socketTimeout
    *          the connection and read timeout of the connection.
    *          the connection and read timeout of the connection.
    */
    */
-  static void setTimeouts(URLConnection connection, int socketTimeout) {
+  private static void setTimeouts(URLConnection connection, int socketTimeout) {
     connection.setConnectTimeout(socketTimeout);
     connection.setConnectTimeout(socketTimeout);
     connection.setReadTimeout(socketTimeout);
     connection.setReadTimeout(socketTimeout);
   }
   }

+ 4 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -112,7 +112,7 @@ public class WebHdfsFileSystem extends FileSystem
   public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
   public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
 
 
   /** Default connection factory may be overridden in tests to use smaller timeout values */
   /** Default connection factory may be overridden in tests to use smaller timeout values */
-  URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+  protected URLConnectionFactory connectionFactory;
 
 
   /** Delegation token kind */
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
@@ -152,22 +152,15 @@ public class WebHdfsFileSystem extends FileSystem
     tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
     tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
   }
   }
 
 
-  /**
-   * Initialize connectionFactory. This function is intended to
-   * be overridden by SWebHdfsFileSystem.
-   */
-  protected void initializeConnectionFactory(Configuration conf)
-      throws IOException {
-    connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
-  }
-
   @Override
   @Override
   public synchronized void initialize(URI uri, Configuration conf
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {
     super.initialize(uri, conf);
     super.initialize(uri, conf);
     setConf(conf);
     setConf(conf);
+    connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
     initializeTokenAspect();
     initializeTokenAspect();
-    initializeConnectionFactory(conf);
+
 
 
     ugi = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.getCurrentUser();
 
 

+ 16 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -369,12 +369,19 @@ message CacheDirectiveInfoProto {
   optional string path = 2;
   optional string path = 2;
   optional uint32 replication = 3;
   optional uint32 replication = 3;
   optional string pool = 4;
   optional string pool = 4;
+  optional CacheDirectiveInfoExpirationProto expiration = 5;
+}
+
+message CacheDirectiveInfoExpirationProto {
+  required int64 millis = 1;
+  required bool isRelative = 2;
 }
 }
 
 
 message CacheDirectiveStatsProto {
 message CacheDirectiveStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
   required int64 bytesCached = 2;
   required int64 filesAffected = 3;
   required int64 filesAffected = 3;
+  required bool hasExpired = 4;
 }
 }
 
 
 message AddCacheDirectiveRequestProto {
 message AddCacheDirectiveRequestProto {
@@ -422,6 +429,12 @@ message CachePoolInfoProto {
   optional int32 weight = 5;
   optional int32 weight = 5;
 }
 }
 
 
+message CachePoolStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 filesAffected = 3;
+}
+
 message AddCachePoolRequestProto {
 message AddCachePoolRequestProto {
   required CachePoolInfoProto info = 1;
   required CachePoolInfoProto info = 1;
 }
 }
@@ -448,12 +461,13 @@ message ListCachePoolsRequestProto {
 }
 }
 
 
 message ListCachePoolsResponseProto {
 message ListCachePoolsResponseProto {
-  repeated ListCachePoolsResponseElementProto elements = 1;
+  repeated CachePoolEntryProto entries = 1;
   required bool hasMore = 2;
   required bool hasMore = 2;
 }
 }
 
 
-message ListCachePoolsResponseElementProto {
+message CachePoolEntryProto {
   required CachePoolInfoProto info = 1;
   required CachePoolInfoProto info = 1;
+  required CachePoolStatsProto stats = 2;
 }
 }
 
 
 message GetFileLinkInfoRequestProto {
 message GetFileLinkInfoRequestProto {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm

@@ -53,11 +53,12 @@ HDFS NFS Gateway
    * If the client mounts the export with access time update allowed, make sure the following 
    * If the client mounts the export with access time update allowed, make sure the following 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
     this property is changed. On some Unix systems, the user can disable access time update
     this property is changed. On some Unix systems, the user can disable access time update
-    by mounting the export with "noatime".
+    by mounting the export with "noatime". If the export is mounted with "noatime", the user 
+    doesn't need to change the following property and thus no need to restart namenode.
 
 
 ----
 ----
 <property>
 <property>
-  <name>dfs.access.time.precision</name>
+  <name>dfs.namenode.accesstime.precision</name>
   <value>3600000</value>
   <value>3600000</value>
   <description>The access time for HDFS file is precise upto this value. 
   <description>The access time for HDFS file is precise upto this value. 
     The default value is 1 hour. Setting a value of 0 disables
     The default value is 1 hour. Setting a value of 0 disables

+ 13 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -191,21 +191,25 @@ public class TestDFSRollback {
       // Create a previous snapshot for the blockpool
       // Create a previous snapshot for the blockpool
       UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
       UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
-      // Older LayoutVersion to make it rollback
+      // Put newer layout version in current.
       storageInfo = new StorageInfo(
       storageInfo = new StorageInfo(
-          UpgradeUtilities.getCurrentLayoutVersion()+1,
+          UpgradeUtilities.getCurrentLayoutVersion()-1,
           UpgradeUtilities.getCurrentNamespaceID(cluster),
           UpgradeUtilities.getCurrentNamespaceID(cluster),
           UpgradeUtilities.getCurrentClusterID(cluster),
           UpgradeUtilities.getCurrentClusterID(cluster),
           UpgradeUtilities.getCurrentFsscTime(cluster));
           UpgradeUtilities.getCurrentFsscTime(cluster));
-      // Create old VERSION file for each data dir
+
+      // Overwrite VERSION file in the current directory of
+      // volume directories and block pool slice directories
+      // with a layout version from future.
+      File[] dataCurrentDirs = new File[dataNodeDirs.length];
       for (int i=0; i<dataNodeDirs.length; i++) {
       for (int i=0; i<dataNodeDirs.length; i++) {
-        Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
-            + UpgradeUtilities.getCurrentBlockPoolID(cluster));
-        UpgradeUtilities.createBlockPoolVersionFile(
-            new File(bpPrevPath.toString()),
-            storageInfo,
-            UpgradeUtilities.getCurrentBlockPoolID(cluster));
+        dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i] 
+            + "/current")).toString());
       }
       }
+      UpgradeUtilities.createDataNodeVersionFile(
+          dataCurrentDirs,
+          storageInfo,
+          UpgradeUtilities.getCurrentBlockPoolID(cluster));
 
 
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       assertTrue(cluster.isDataNodeUp());
       assertTrue(cluster.isDataNodeUp());

+ 41 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_A
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
@@ -62,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assume;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Before;
@@ -724,4 +726,43 @@ public class TestDFSUtil {
         DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
         DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
         DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
         DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
   }
   }
+
+  @Test(timeout=1000)
+  public void testDurationToString() throws Exception {
+    assertEquals("000:00:00:00", DFSUtil.durationToString(0));
+    try {
+      DFSUtil.durationToString(-199);
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Invalid negative duration", e);
+    }
+    assertEquals("001:01:01:01",
+        DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
+    assertEquals("000:23:59:59",
+        DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000));
+  }
+
+  @Test(timeout=5000)
+  public void testRelativeTimeConversion() throws Exception {
+    try {
+      DFSUtil.parseRelativeTime("1");
+    } catch (IOException e) {
+      assertExceptionContains("too short", e);
+    }
+    try {
+      DFSUtil.parseRelativeTime("1z");
+    } catch (IOException e) {
+      assertExceptionContains("unknown time unit", e);
+    }
+    try {
+      DFSUtil.parseRelativeTime("yyz");
+    } catch (IOException e) {
+      assertExceptionContains("is not a number", e);
+    }
+    assertEquals(61*1000, DFSUtil.parseRelativeTime("61s"));
+    assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
+    assertEquals(0, DFSUtil.parseRelativeTime("0s"));
+    assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
+    assertEquals(4*24*60*60*1000, DFSUtil.parseRelativeTime("4d"));
+    assertEquals(999*24*60*60*1000, DFSUtil.parseRelativeTime("999d"));
+  }
 }
 }

部分文件因为文件数量过多而无法显示