Browse Source

Merge branch 'trunk' into HDFS-7240

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java

Fixed these conflicts by accepting the trunk version of the same files.
Anu Engineer 7 years ago
parent
commit
1f74cb2f1a
81 changed files with 1599 additions and 1821 deletions
  1. 2 2
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  2. 1 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  3. 3 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
  4. 3 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  5. 1 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  6. 3 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  7. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  8. 10 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  9. 1 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  10. 1 1
      hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
  11. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java
  12. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
  13. 4 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
  14. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
  15. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
  16. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
  17. 15 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
  18. 0 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
  19. 233 200
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
  20. 48 61
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
  21. 64 64
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
  22. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
  23. 0 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
  24. 0 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
  25. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
  26. 2 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  27. 2 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  28. 10 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  29. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
  30. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
  31. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
  32. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
  33. 3 69
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
  34. 92 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
  35. 16 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  36. 71 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
  37. 49 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java
  38. 8 488
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
  39. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java
  40. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java
  41. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
  42. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
  43. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
  44. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
  45. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
  46. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
  47. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
  48. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
  49. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
  50. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
  51. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
  52. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
  53. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
  54. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
  55. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
  56. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
  57. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
  58. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
  59. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
  60. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
  61. 426 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
  62. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
  63. 40 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
  64. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
  65. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  66. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
  67. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
  68. 147 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
  69. 9 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
  70. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java
  71. 47 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java
  72. 10 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java
  73. 88 62
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
  74. 3 1
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
  75. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java
  76. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
  77. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
  78. 56 64
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
  79. 42 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
  80. 1 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
  81. 11 10
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsResourceCalculator.java

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -172,9 +172,9 @@ log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
 
 
 # AWS SDK & S3A FileSystem
-log4j.logger.com.amazonaws=ERROR
+#log4j.logger.com.amazonaws=ERROR
 log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
 
 #
 # Event Counter Appender

+ 1 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.conf;
 import java.io.IOException;
 import java.io.Writer;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -59,12 +58,7 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    // If user is a static user and auth Type is null, that means
-    // there is a non-security environment and no need authorization,
-    // otherwise, do the authorization.
-    final ServletContext servletContext = getServletContext();
-    if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-        !HttpServer2.isInstrumentationAccessAllowed(servletContext,
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                    request, response)) {
       return;
     }

+ 3 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.http;
 
 import java.io.IOException;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -36,13 +35,9 @@ public class AdminAuthorizedServlet extends DefaultServlet {
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws ServletException, IOException {
-    // If user is a static user and auth Type is null, that means
-    // there is a non-security environment and no need authorization,
-    // otherwise, do the authorization.
-    final ServletContext servletContext = getServletContext();
-    if (HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) ||
-        HttpServer2.hasAdministratorAccess(servletContext, request,
+ throws ServletException, IOException {
+    // Do the authorization
+    if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
         response)) {
       // Authorization is done. Just call super.
       super.doGet(request, response);

+ 3 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.http;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -1358,24 +1355,6 @@ public final class HttpServer2 implements FilterContainer {
     return sb.toString();
   }
 
-  /**
-   * check whether user is static and unauthenticated, if the
-   * answer is TRUE, that means http sever is in non-security
-   * environment.
-   * @param servletContext the servlet context.
-   * @param request the servlet request.
-   * @return TRUE/FALSE based on the logic described above.
-   */
-  public static boolean isStaticUserAndNoneAuthType(
-      ServletContext servletContext, HttpServletRequest request) {
-    Configuration conf =
-        (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
-    final String authType = request.getAuthType();
-    final String staticUser = conf.get(HADOOP_HTTP_STATIC_USER,
-        DEFAULT_HADOOP_HTTP_STATIC_USER);
-    return authType == null && staticUser.equals(request.getRemoteUser());
-  }
-
   /**
    * Checks the user has privileges to access to instrumentation servlets.
    * <p/>
@@ -1473,14 +1452,9 @@ public final class HttpServer2 implements FilterContainer {
 
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
-        throws ServletException, IOException {
-      // If user is a static user and auth Type is null, that means
-      // there is a non-security environment and no need authorization,
-      // otherwise, do the authorization.
-      final ServletContext servletContext = getServletContext();
-      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-          !HttpServer2.isInstrumentationAccessAllowed(servletContext,
-              request, response)) {
+      throws ServletException, IOException {
+      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
+                                                      request, response)) {
         return;
       }
       response.setContentType("text/plain; charset=UTF-8");

+ 1 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -38,7 +38,6 @@ import javax.management.RuntimeMBeanException;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeType;
 import javax.management.openmbean.TabularData;
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -170,12 +169,7 @@ public class JMXJsonServlet extends HttpServlet {
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
-      // If user is a static user and auth Type is null, that means
-      // there is a non-security environment and no need authorization,
-      // otherwise, do the authorization.
-      final ServletContext servletContext = getServletContext();
-      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-          !isInstrumentationAccessAllowed(request, response)) {
+      if (!isInstrumentationAccessAllowed(request, response)) {
         return;
       }
       JsonGenerator jg = null;

+ 3 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -27,7 +27,6 @@ import java.util.regex.Pattern;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLSocketFactory;
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -324,13 +323,9 @@ public class LogLevel {
     public void doGet(HttpServletRequest request, HttpServletResponse response
         ) throws ServletException, IOException {
 
-      // If user is a static user and auth Type is null, that means
-      // there is a non-security environment and no need authorization,
-      // otherwise, do the authorization.
-      final ServletContext servletContext = getServletContext();
-      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-          !HttpServer2.hasAdministratorAccess(servletContext,
-              request, response)) {
+      // Do the authorization
+      if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
+          response)) {
         return;
       }
 

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -1885,7 +1885,10 @@ public class UserGroupInformation {
     @Override
     public void logout() throws LoginException {
       synchronized(getSubjectLock()) {
-        super.logout();
+        if (this.getSubject() != null
+            && !this.getSubject().getPrivateCredentials().isEmpty()) {
+          super.logout();
+        }
       }
     }
   }

+ 10 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -61,7 +61,6 @@ import static org.apache.hadoop.conf.StorageUnit.MB;
 import static org.apache.hadoop.conf.StorageUnit.TB;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -93,8 +92,8 @@ public class TestConfiguration {
   final static String CONFIG_CORE = new File("./core-site.xml")
       .getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-      + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+      "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
     "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -877,7 +876,8 @@ public class TestConfiguration {
     out.close();
     out=new BufferedWriter(new FileWriter(CONFIG));
     writeHeader();
-    declareSystemEntity("configuration", "d", CONFIG2);
+    declareSystemEntity("configuration", "d",
+        new Path(CONFIG2).toUri().toString());
     writeConfiguration();
     appendProperty("a", "b");
     appendProperty("c", "&d;");
@@ -1749,7 +1749,7 @@ public class TestConfiguration {
       assertEquals("test.key2", jp1.getKey());
       assertEquals("value2", jp1.getValue());
       assertEquals(true, jp1.isFinal);
-      assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+      assertEquals(fileResource.toString(), jp1.getResource());
 
       // test xml format
       outWriter = new StringWriter();
@@ -1760,7 +1760,7 @@ public class TestConfiguration {
       assertEquals(1, actualConf1.size());
       assertEquals("value2", actualConf1.get("test.key2"));
       assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-      assertEquals(fileResource.toUri().getPath(),
+      assertEquals(fileResource.toString(),
           actualConf1.getPropertySources("test.key2")[0]);
 
       // case 2: dump an non existing property
@@ -2271,7 +2271,8 @@ public class TestConfiguration {
     final File tmpDir = GenericTestUtils.getRandomizedTestDir();
     tmpDir.mkdirs();
     final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-        "file",  new File(tmpDir, "test.jks").toString(), null).toString();
+        "file",  new File(tmpDir, "test.jks").toURI().getPath(),
+        null).toString();
 
     conf = new Configuration(false);
     conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -2299,7 +2300,8 @@ public class TestConfiguration {
     final File tmpDir = GenericTestUtils.getRandomizedTestDir();
     tmpDir.mkdirs();
     final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-        "file",  new File(tmpDir, "test.jks").toString(), null).toString();
+        "file",  new File(tmpDir, "test.jks").toURI().getPath(),
+        null).toString();
 
     conf = new Configuration(false);
     conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);

+ 1 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -69,9 +69,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
 public class TestHttpServer extends HttpServerFunctionalTest {
   static final Logger LOG = LoggerFactory.getLogger(TestHttpServer.class);
   private static HttpServer2 server;
@@ -485,7 +482,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     String serverURL = "http://"
         + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
-        "logLevel", "jmx" }) {
+        "logLevel" }) {
       for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
         assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
             + servlet, user));
@@ -493,18 +490,6 @@ public class TestHttpServer extends HttpServerFunctionalTest {
       assertEquals(HttpURLConnection.HTTP_FORBIDDEN, getHttpStatusCode(
           serverURL + servlet, "userE"));
     }
-
-    // hadoop.security.authorization is set as true while
-    // hadoop.http.authentication.type's value is `simple`(default value)
-    // in this case, static user has administrator access
-    final String staticUser = conf.get(HADOOP_HTTP_STATIC_USER,
-        DEFAULT_HADOOP_HTTP_STATIC_USER);
-    for (String servlet : new String[] {"conf", "logs", "stacks",
-        "logLevel", "jmx"}) {
-      assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(
-          serverURL + servlet, staticUser));
-    }
-
     myServer.stop();
   }
   

+ 1 - 1
hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java

@@ -166,7 +166,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase {
 
     } finally {
       if (loginContext != null && loginContext.getSubject() != null
-          && !loginContext.getSubject().getPrincipals().isEmpty()) {
+          && !loginContext.getSubject().getPrivateCredentials().isEmpty()) {
         loginContext.logout();
       }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java

@@ -223,7 +223,7 @@ public class ZoneReencryptionStatus {
    * a listReencryptionStatus call, for the crypto admin to consume.
    */
   public void setZoneName(final String name) {
-    Preconditions.checkNotNull(name == null);
+    Preconditions.checkNotNull(name, "zone name cannot be null");
     zoneName = name;
   }
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java

@@ -22,7 +22,7 @@ import java.io.File;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
-import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
 import java.text.MessageFormat;
 
 import org.apache.commons.io.FileUtils;
@@ -61,7 +61,7 @@ public class TestHttpFSServerWebServer {
     System.setProperty("httpfs.log.dir", logsDir.getAbsolutePath());
     System.setProperty("httpfs.config.dir", confDir.getAbsolutePath());
     FileUtils.writeStringToFile(new File(confDir, "httpfs-signature.secret"),
-        "foo", Charset.forName("UTF-8"));
+        "foo", StandardCharsets.UTF_8);
   }
 
   @Before

+ 4 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java

@@ -149,10 +149,8 @@ class BlockPoolManager {
     LOG.info("Refresh request received for nameservices: " +
         conf.get(DFSConfigKeys.DFS_NAMESERVICES));
 
-    Map<String, Map<String, InetSocketAddress>> newAddressMap =
-        new HashMap<>();
-    Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap =
-        new HashMap<>();
+    Map<String, Map<String, InetSocketAddress>> newAddressMap = null;
+    Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap = null;
 
     try {
       newAddressMap =
@@ -160,11 +158,10 @@ class BlockPoolManager {
       newLifelineAddressMap =
           DFSUtil.getNNLifelineRpcAddressesForCluster(conf);
     } catch (IOException ioe) {
-      LOG.warn("Unable to get NameNode addresses. (Note: Namenode is required "
-          +  "even for Ozone cluster.)");
+      LOG.warn("Unable to get NameNode addresses.");
     }
 
-    if (newAddressMap.isEmpty()) {
+    if (newAddressMap == null || newAddressMap.isEmpty()) {
       throw new IOException("No services to connect, missing NameNode " +
           "address.");
     }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java

@@ -812,7 +812,6 @@ class BlockPoolSlice {
           break;
         }
       }
-      inputStream.close();
       // Now it is safe to add the replica into volumeMap
       // In case of any exception during parsing this cache file, fall back
       // to scan all the files on disk.
@@ -835,12 +834,13 @@ class BlockPoolSlice {
       return false;
     }
     finally {
+      // close the inputStream
+      IOUtils.closeStream(inputStream);
+
       if (!fileIoProvider.delete(volume, replicaFile)) {
         LOG.info("Failed to delete replica cache file: " +
             replicaFile.getPath());
       }
-      // close the inputStream
-      IOUtils.closeStream(inputStream);
     }
   }
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.web.JsonUtil;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Locale;
@@ -284,7 +285,7 @@ public class DiskBalancerCluster {
   public void createSnapshot(String snapShotName) throws IOException {
     String json = this.toJson();
     File outFile = new File(getOutput() + "/" + snapShotName);
-    FileUtils.writeStringToFile(outFile, json);
+    FileUtils.writeStringToFile(outFile, json, StandardCharsets.UTF_8);
   }
 
   /**

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java

@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements StateStoreMBean {
     writes.resetMinMax();
     removes.resetMinMax();
     failures.resetMinMax();
+
+    reads.lastStat().reset();
+    writes.lastStat().reset();
+    removes.lastStat().reset();
+    failures.lastStat().reset();
   }
 }

+ 15 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.CachedRecordStore;
@@ -75,14 +76,15 @@ public class RouterHeartbeatService extends PeriodicService {
   /**
    * Update the state of the Router in the State Store.
    */
-  private synchronized void updateStateStore() {
+  @VisibleForTesting
+  synchronized void updateStateStore() {
     String routerId = router.getRouterId();
     if (routerId == null) {
       LOG.error("Cannot heartbeat for router: unknown router id");
       return;
     }
-    RouterStore routerStore = router.getRouterStateManager();
-    if (routerStore != null) {
+    if (isStoreAvailable()) {
+      RouterStore routerStore = router.getRouterStateManager();
       try {
         RouterState record = RouterState.newInstance(
             routerId, router.getStartTime(), router.getRouterState());
@@ -152,4 +154,14 @@ public class RouterHeartbeatService extends PeriodicService {
   public void periodicInvoke() {
     updateStateStore();
   }
+
+  private boolean isStoreAvailable() {
+    if (router.getRouterStateManager() == null) {
+      return false;
+    }
+    if (router.getStateStore() == null) {
+      return false;
+    }
+    return router.getStateStore().isDriverReady();
+  }
 }

+ 0 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java

@@ -48,21 +48,6 @@ public interface StateStoreRecordOperations {
   @Idempotent
   <T extends BaseRecord> QueryResult<T> get(Class<T> clazz) throws IOException;
 
-  /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-  <T extends BaseRecord> QueryResult<T> get(Class<T> clazz, String sub)
-      throws IOException;
-
   /**
    * Get a single record from the store that matches the query.
    *

+ 233 - 200
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java

@@ -18,28 +18,39 @@
 package org.apache.hadoop.hdfs.server.federation.store.driver.impl;
 
 import static org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils.filterMultiple;
-import static org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils.getRecordClass;
+import static org.apache.hadoop.util.Time.monotonicNow;
+import static org.apache.hadoop.util.Time.now;
 
 import java.io.BufferedReader;
 import java.io.BufferedWriter;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.apache.hadoop.hdfs.server.federation.store.records.Query;
 import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
- * {@link StateStoreDriver} implementation based on a local file.
+ * {@link StateStoreDriver} implementation based on files. In this approach, we
+ * use temporary files for the writes and renaming "atomically" to the final
+ * value. Instead of writing to the final location, it will go to a temporary
+ * one and then rename to the final destination.
  */
 public abstract class StateStoreFileBaseImpl
     extends StateStoreSerializableImpl {
@@ -47,75 +58,76 @@ public abstract class StateStoreFileBaseImpl
   private static final Logger LOG =
       LoggerFactory.getLogger(StateStoreFileBaseImpl.class);
 
+  /** File extension for temporary files. */
+  private static final String TMP_MARK = ".tmp";
+  /** We remove temporary files older than 10 seconds. */
+  private static final long OLD_TMP_RECORD_MS = TimeUnit.SECONDS.toMillis(10);
+  /** File pattern for temporary records: file.XYZ.tmp. */
+  private static final Pattern OLD_TMP_RECORD_PATTERN =
+      Pattern.compile(".+\\.(\\d+)\\.tmp");
+
   /** If it is initialized. */
   private boolean initialized = false;
 
-  /** Name of the file containing the data. */
-  private static final String DATA_FILE_NAME = "records.data";
-
 
   /**
-   * Lock reading records.
+   * Get the reader of a record for the file system.
    *
-   * @param clazz Class of the record.
+   * @param path Path of the record to read.
+   * @return Reader for the record.
    */
-  protected abstract <T extends BaseRecord> void lockRecordRead(Class<T> clazz);
+  protected abstract <T extends BaseRecord> BufferedReader getReader(
+      String path);
 
   /**
-   * Unlock reading records.
+   * Get the writer of a record for the file system.
    *
-   * @param clazz Class of the record.
+   * @param path Path of the record to write.
+   * @return Writer for the record.
    */
-  protected abstract <T extends BaseRecord> void unlockRecordRead(
-      Class<T> clazz);
+  protected abstract <T extends BaseRecord> BufferedWriter getWriter(
+      String path);
 
   /**
-   * Lock writing records.
+   * Check if a path exists.
    *
-   * @param clazz Class of the record.
+   * @param path Path to check.
+   * @return If the path exists.
    */
-  protected abstract <T extends BaseRecord> void lockRecordWrite(
-      Class<T> clazz);
+  protected abstract boolean exists(String path);
 
   /**
-   * Unlock writing records.
+   * Make a directory.
    *
-   * @param clazz Class of the record.
+   * @param path Path of the directory to create.
+   * @return If the directory was created.
    */
-  protected abstract <T extends BaseRecord> void unlockRecordWrite(
-      Class<T> clazz);
+  protected abstract boolean mkdir(String path);
 
   /**
-   * Get the reader for the file system.
+   * Rename a file. This should be atomic.
    *
-   * @param clazz Class of the record.
+   * @param src Source name.
+   * @param dst Destination name.
+   * @return If the rename was successful.
    */
-  protected abstract <T extends BaseRecord> BufferedReader getReader(
-      Class<T> clazz, String sub);
+  protected abstract boolean rename(String src, String dst);
 
   /**
-   * Get the writer for the file system.
+   * Remove a file.
    *
-   * @param clazz Class of the record.
+   * @param path Path for the file to remove
+   * @return If the file was removed.
    */
-  protected abstract <T extends BaseRecord> BufferedWriter getWriter(
-      Class<T> clazz, String sub);
+  protected abstract boolean remove(String path);
 
   /**
-   * Check if a path exists.
+   * Get the children for a path.
    *
    * @param path Path to check.
-   * @return If the path exists.
-   */
-  protected abstract boolean exists(String path);
-
-  /**
-   * Make a directory.
-   *
-   * @param path Path of the directory to create.
-   * @return If the directory was created.
+   * @return List of children.
    */
-  protected abstract boolean mkdir(String path);
+  protected abstract List<String> getChildren(String path);
 
   /**
    * Get root directory.
@@ -171,15 +183,6 @@ public abstract class StateStoreFileBaseImpl
           LOG.error("Cannot create data directory {}", dataDirPath);
           return false;
         }
-        String dataFilePath = dataDirPath + "/" + DATA_FILE_NAME;
-        if (!exists(dataFilePath)) {
-          // Create empty file
-          List<T> emtpyList = new ArrayList<>();
-          if(!writeAll(emtpyList, recordClass)) {
-            LOG.error("Cannot create data file {}", dataFilePath);
-            return false;
-          }
-        }
       }
     } catch (Exception ex) {
       LOG.error("Cannot create data directory {}", dataDirPath, ex);
@@ -188,138 +191,110 @@ public abstract class StateStoreFileBaseImpl
     return true;
   }
 
-  /**
-   * Read all lines from a file and deserialize into the desired record type.
-   *
-   * @param reader Open handle for the file.
-   * @param clazz Record class to create.
-   * @param includeDates True if dateModified/dateCreated are serialized.
-   * @return List of records.
-   * @throws IOException
-   */
-  private <T extends BaseRecord> List<T> getAllFile(
-      BufferedReader reader, Class<T> clazz, boolean includeDates)
-          throws IOException {
-
-    List<T> ret = new ArrayList<T>();
-    String line;
-    while ((line = reader.readLine()) != null) {
-      if (!line.startsWith("#") && line.length() > 0) {
-        try {
-          T record = newRecord(line, clazz, includeDates);
-          ret.add(record);
-        } catch (Exception ex) {
-          LOG.error("Cannot parse line in data source file: {}", line, ex);
-        }
-      }
-    }
-    return ret;
-  }
-
   @Override
   public <T extends BaseRecord> QueryResult<T> get(Class<T> clazz)
       throws IOException {
-    return get(clazz, (String)null);
-  }
-
-  @Override
-  public <T extends BaseRecord> QueryResult<T> get(Class<T> clazz, String sub)
-      throws IOException {
     verifyDriverReady();
-    BufferedReader reader = null;
-    lockRecordRead(clazz);
+    long start = monotonicNow();
+    StateStoreMetrics metrics = getMetrics();
+    List<T> ret = new ArrayList<>();
     try {
-      reader = getReader(clazz, sub);
-      List<T> data = getAllFile(reader, clazz, true);
-      return new QueryResult<T>(data, getTime());
-    } catch (Exception ex) {
-      LOG.error("Cannot fetch records {}", clazz.getSimpleName());
-      throw new IOException("Cannot read from data store " + ex.getMessage());
-    } finally {
-      if (reader != null) {
-        try {
-          reader.close();
-        } catch (IOException e) {
-          LOG.error("Failed closing file", e);
+      String path = getPathForClass(clazz);
+      List<String> children = getChildren(path);
+      for (String child : children) {
+        String pathRecord = path + "/" + child;
+        if (child.endsWith(TMP_MARK)) {
+          LOG.debug("There is a temporary file {} in {}", child, path);
+          if (isOldTempRecord(child)) {
+            LOG.warn("Removing {} as it's an old temporary record", child);
+            remove(pathRecord);
+          }
+        } else {
+          T record = getRecord(pathRecord, clazz);
+          ret.add(record);
         }
       }
-      unlockRecordRead(clazz);
+    } catch (Exception e) {
+      if (metrics != null) {
+        metrics.addFailure(monotonicNow() - start);
+      }
+      String msg = "Cannot fetch records for " + clazz.getSimpleName();
+      LOG.error(msg, e);
+      throw new IOException(msg, e);
+    }
+
+    if (metrics != null) {
+      metrics.addRead(monotonicNow() - start);
     }
+    return new QueryResult<T>(ret, getTime());
   }
 
   /**
-   * Overwrite the existing data with a new data set.
+   * Check if a record is temporary and old.
    *
-   * @param records List of records to write.
-   * @param writer BufferedWriter stream to write to.
-   * @return If the records were succesfully written.
+   * @param pathRecord Path for the record to check.
+   * @return If the record is temporary and old.
    */
-  private <T extends BaseRecord> boolean writeAllFile(
-      Collection<T> records, BufferedWriter writer) {
-
-    try {
-      for (BaseRecord record : records) {
-        try {
-          String data = serializeString(record);
-          writer.write(data);
-          writer.newLine();
-        } catch (IllegalArgumentException ex) {
-          LOG.error("Cannot write record {} to file", record, ex);
-        }
-      }
-      writer.flush();
-      return true;
-    } catch (IOException e) {
-      LOG.error("Cannot commit records to file", e);
+  @VisibleForTesting
+  public static boolean isOldTempRecord(final String pathRecord) {
+    if (!pathRecord.endsWith(TMP_MARK)) {
       return false;
     }
+    // Extract temporary record creation time
+    Matcher m = OLD_TMP_RECORD_PATTERN.matcher(pathRecord);
+    if (m.find()) {
+      long time = Long.parseLong(m.group(1));
+      return now() - time > OLD_TMP_RECORD_MS;
+    }
+    return false;
   }
 
   /**
-   * Overwrite the existing data with a new data set. Replaces all records in
-   * the data store for this record class. If all records in the data store are
-   * not successfully committed, this function must return false and leave the
-   * data store unchanged.
+   * Read a record from a file.
    *
-   * @param records List of records to write. All records must be of type
-   *                recordClass.
-   * @param recordClass Class of record to replace.
-   * @return true if all operations were successful, false otherwise.
-   * @throws StateStoreUnavailableException
+   * @param path Path to the file containing the record.
+   * @param clazz Class of the record.
+   * @return Record read from the file.
+   * @throws IOException If the file cannot be read.
    */
-  public <T extends BaseRecord> boolean writeAll(
-      Collection<T> records, Class<T> recordClass)
-          throws StateStoreUnavailableException {
-    verifyDriverReady();
-    lockRecordWrite(recordClass);
-    BufferedWriter writer = null;
+  private <T extends BaseRecord> T getRecord(
+      final String path, final Class<T> clazz) throws IOException {
+    BufferedReader reader = getReader(path);
     try {
-      writer = getWriter(recordClass, null);
-      return writeAllFile(records, writer);
-    } catch (Exception e) {
-      LOG.error(
-          "Cannot add records to file for {}", recordClass.getSimpleName(), e);
-      return false;
-    } finally {
-      if (writer != null) {
-        try {
-          writer.close();
-        } catch (IOException e) {
-          LOG.error(
-              "Cannot close writer for {}", recordClass.getSimpleName(), e);
+      String line;
+      while ((line = reader.readLine()) != null) {
+        if (!line.startsWith("#") && line.length() > 0) {
+          try {
+            T record = newRecord(line, clazz, false);
+            return record;
+          } catch (Exception ex) {
+            LOG.error("Cannot parse line {} in file {}", line, path, ex);
+          }
         }
       }
-      unlockRecordWrite(recordClass);
+    } finally {
+      if (reader != null) {
+        reader.close();
+      }
     }
+    throw new IOException("Cannot read " + path + " for record " +
+        clazz.getSimpleName());
   }
 
   /**
-   * Get the data file name.
-   *
-   * @return Data file name.
+   * Get the path for a record class.
+   * @param clazz Class of the record.
+   * @return Path for this record class.
    */
-  protected String getDataFileName() {
-    return DATA_FILE_NAME;
+  private <T extends BaseRecord> String getPathForClass(final Class<T> clazz) {
+    String className = StateStoreUtils.getRecordName(clazz);
+    StringBuilder sb = new StringBuilder();
+    sb.append(getRootDir());
+    if (sb.charAt(sb.length() - 1) != '/') {
+      sb.append("/");
+    }
+    sb.append(className);
+    return sb.toString();
   }
 
   @Override
@@ -332,56 +307,80 @@ public abstract class StateStoreFileBaseImpl
       List<T> records, boolean allowUpdate, boolean errorIfExists)
           throws StateStoreUnavailableException {
     verifyDriverReady();
-
     if (records.isEmpty()) {
       return true;
     }
 
-    @SuppressWarnings("unchecked")
-    Class<T> clazz = (Class<T>) getRecordClass(records.get(0).getClass());
-    QueryResult<T> result;
-    try {
-      result = get(clazz);
-    } catch (IOException e) {
-      return false;
-    }
-    Map<Object, T> writeList = new HashMap<>();
+    long start = monotonicNow();
+    StateStoreMetrics metrics = getMetrics();
 
-    // Write all of the existing records
-    for (T existingRecord : result.getRecords()) {
-      String key = existingRecord.getPrimaryKey();
-      writeList.put(key, existingRecord);
-    }
+    // Check if any record exists
+    Map<String, T> toWrite = new HashMap<>();
+    for (T record : records) {
+      Class<? extends BaseRecord> recordClass = record.getClass();
+      String path = getPathForClass(recordClass);
+      String primaryKey = getPrimaryKey(record);
+      String recordPath = path + "/" + primaryKey;
 
-    // Add inserts and updates, overwrite any existing values
-    for (T updatedRecord : records) {
-      try {
-        updatedRecord.validate();
-        String key = updatedRecord.getPrimaryKey();
-        if (writeList.containsKey(key) && allowUpdate) {
-          // Update
-          writeList.put(key, updatedRecord);
+      if (exists(recordPath)) {
+        if (allowUpdate) {
           // Update the mod time stamp. Many backends will use their
           // own timestamp for the mod time.
-          updatedRecord.setDateModified(this.getTime());
-        } else if (!writeList.containsKey(key)) {
-          // Insert
-          // Create/Mod timestamps are already initialized
-          writeList.put(key, updatedRecord);
+          record.setDateModified(this.getTime());
+          toWrite.put(recordPath, record);
         } else if (errorIfExists) {
           LOG.error("Attempt to insert record {} that already exists",
-              updatedRecord);
+              recordPath);
+          if (metrics != null) {
+            metrics.addFailure(monotonicNow() - start);
+          }
           return false;
+        } else  {
+          LOG.debug("Not updating {}", record);
         }
-      } catch (IllegalArgumentException ex) {
-        LOG.error("Cannot write invalid record to State Store", ex);
-        return false;
+      } else {
+        toWrite.put(recordPath, record);
       }
     }
 
-    // Write all
-    boolean status = writeAll(writeList.values(), clazz);
-    return status;
+    // Write the records
+    boolean success = true;
+    for (Entry<String, T> entry : toWrite.entrySet()) {
+      String recordPath = entry.getKey();
+      String recordPathTemp = recordPath + "." + now() + TMP_MARK;
+      BufferedWriter writer = getWriter(recordPathTemp);
+      try {
+        T record = entry.getValue();
+        String line = serializeString(record);
+        writer.write(line);
+      } catch (IOException e) {
+        LOG.error("Cannot write {}", recordPathTemp, e);
+        success = false;
+      } finally {
+        if (writer != null) {
+          try {
+            writer.close();
+          } catch (IOException e) {
+            LOG.error("Cannot close the writer for {}", recordPathTemp);
+          }
+        }
+      }
+      // Commit
+      if (!rename(recordPathTemp, recordPath)) {
+        LOG.error("Failed committing record into {}", recordPath);
+        success = false;
+      }
+    }
+
+    long end = monotonicNow();
+    if (metrics != null) {
+      if (success) {
+        metrics.addWrite(end - start);
+      } else {
+        metrics.addFailure(end - start);
+      }
+    }
+    return success;
   }
 
   @Override
@@ -393,6 +392,8 @@ public abstract class StateStoreFileBaseImpl
       return 0;
     }
 
+    long start = Time.monotonicNow();
+    StateStoreMetrics metrics = getMetrics();
     int removed = 0;
     // Get the current records
     try {
@@ -400,21 +401,34 @@ public abstract class StateStoreFileBaseImpl
       final List<T> existingRecords = result.getRecords();
       // Write all of the existing records except those to be removed
       final List<T> recordsToRemove = filterMultiple(query, existingRecords);
-      removed = recordsToRemove.size();
-      final List<T> newRecords = new LinkedList<>();
-      for (T record : existingRecords) {
-        if (!recordsToRemove.contains(record)) {
-          newRecords.add(record);
+      boolean success = true;
+      for (T recordToRemove : recordsToRemove) {
+        String path = getPathForClass(clazz);
+        String primaryKey = getPrimaryKey(recordToRemove);
+        String recordToRemovePath = path + "/" + primaryKey;
+        if (remove(recordToRemovePath)) {
+          removed++;
+        } else {
+          LOG.error("Cannot remove record {}", recordToRemovePath);
+          success = false;
         }
       }
-      if (!writeAll(newRecords, clazz)) {
-        throw new IOException(
-            "Cannot remove record " + clazz + " query " + query);
+      if (!success) {
+        LOG.error("Cannot remove records {} query {}", clazz, query);
+        if (metrics != null) {
+          metrics.addFailure(monotonicNow() - start);
+        }
       }
     } catch (IOException e) {
       LOG.error("Cannot remove records {} query {}", clazz, query, e);
+      if (metrics != null) {
+        metrics.addFailure(monotonicNow() - start);
+      }
     }
 
+    if (removed > 0 && metrics != null) {
+      metrics.addRemove(monotonicNow() - start);
+    }
     return removed;
   }
 
@@ -422,8 +436,27 @@ public abstract class StateStoreFileBaseImpl
   public <T extends BaseRecord> boolean removeAll(Class<T> clazz)
       throws StateStoreUnavailableException {
     verifyDriverReady();
-    List<T> emptyList = new ArrayList<>();
-    boolean status = writeAll(emptyList, clazz);
-    return status;
+    long start = Time.monotonicNow();
+    StateStoreMetrics metrics = getMetrics();
+
+    boolean success = true;
+    String path = getPathForClass(clazz);
+    List<String> children = getChildren(path);
+    for (String child : children) {
+      String pathRecord = path + "/" + child;
+      if (!remove(pathRecord)) {
+        success = false;
+      }
+    }
+
+    if (metrics != null) {
+      long time = Time.monotonicNow() - start;
+      if (success) {
+        metrics.addRemove(time);
+      } else {
+        metrics.addFailure(time);
+      }
+    }
+    return success;
   }
 }

+ 48 - 61
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java

@@ -26,11 +26,10 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.nio.charset.StandardCharsets;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -49,10 +48,6 @@ public class StateStoreFileImpl extends StateStoreFileBaseImpl {
   public static final String FEDERATION_STORE_FILE_DIRECTORY =
       DFSConfigKeys.FEDERATION_STORE_PREFIX + "driver.file.directory";
 
-  /** Synchronization. */
-  private static final ReadWriteLock READ_WRITE_LOCK =
-      new ReentrantReadWriteLock();
-
   /** Root directory for the state store. */
   private String rootDirectory;
 
@@ -69,6 +64,23 @@ public class StateStoreFileImpl extends StateStoreFileBaseImpl {
     return dir.mkdirs();
   }
 
+  @Override
+  protected boolean rename(String src, String dst) {
+    try {
+      Files.move(new File(src), new File(dst));
+      return true;
+    } catch (IOException e) {
+      LOG.error("Cannot rename {} to {}", src, dst, e);
+      return false;
+    }
+  }
+
+  @Override
+  protected boolean remove(String path) {
+    File file = new File(path);
+    return file.delete();
+  }
+
   @Override
   protected String getRootDir() {
     if (this.rootDirectory == null) {
@@ -76,6 +88,7 @@ public class StateStoreFileImpl extends StateStoreFileBaseImpl {
       if (dir == null) {
         File tempDir = Files.createTempDir();
         dir = tempDir.getAbsolutePath();
+        LOG.warn("The root directory is not available, using {}", dir);
       }
       this.rootDirectory = dir;
     }
@@ -83,79 +96,53 @@ public class StateStoreFileImpl extends StateStoreFileBaseImpl {
   }
 
   @Override
-  protected <T extends BaseRecord> void lockRecordWrite(Class<T> recordClass) {
-    // TODO - Synchronize via FS
-    READ_WRITE_LOCK.writeLock().lock();
-  }
-
-  @Override
-  protected <T extends BaseRecord> void unlockRecordWrite(
-      Class<T> recordClass) {
-    // TODO - Synchronize via FS
-    READ_WRITE_LOCK.writeLock().unlock();
-  }
-
-  @Override
-  protected <T extends BaseRecord> void lockRecordRead(Class<T> recordClass) {
-    // TODO - Synchronize via FS
-    READ_WRITE_LOCK.readLock().lock();
-  }
-
-  @Override
-  protected <T extends BaseRecord> void unlockRecordRead(Class<T> recordClass) {
-    // TODO - Synchronize via FS
-    READ_WRITE_LOCK.readLock().unlock();
-  }
-
-  @Override
-  protected <T extends BaseRecord> BufferedReader getReader(
-      Class<T> clazz, String sub) {
-    String filename = StateStoreUtils.getRecordName(clazz);
-    if (sub != null && sub.length() > 0) {
-      filename += "/" + sub;
-    }
-    filename += "/" + getDataFileName();
-
+  protected <T extends BaseRecord> BufferedReader getReader(String filename) {
+    BufferedReader reader = null;
     try {
       LOG.debug("Loading file: {}", filename);
-      File file = new File(getRootDir(), filename);
+      File file = new File(filename);
       FileInputStream fis = new FileInputStream(file);
       InputStreamReader isr =
           new InputStreamReader(fis, StandardCharsets.UTF_8);
-      BufferedReader reader = new BufferedReader(isr);
-      return reader;
+      reader = new BufferedReader(isr);
     } catch (Exception ex) {
-      LOG.error(
-          "Cannot open read stream for record {}", clazz.getSimpleName(), ex);
-      return null;
+      LOG.error("Cannot open read stream for record {}", filename, ex);
     }
+    return reader;
   }
 
   @Override
-  protected <T extends BaseRecord> BufferedWriter getWriter(
-      Class<T> clazz, String sub) {
-    String filename = StateStoreUtils.getRecordName(clazz);
-    if (sub != null && sub.length() > 0) {
-      filename += "/" + sub;
-    }
-    filename += "/" + getDataFileName();
-
+  protected <T extends BaseRecord> BufferedWriter getWriter(String filename) {
+    BufferedWriter writer = null;
     try {
-      File file = new File(getRootDir(), filename);
+      LOG.debug("Writing file: {}", filename);
+      File file = new File(filename);
       FileOutputStream fos = new FileOutputStream(file, false);
       OutputStreamWriter osw =
           new OutputStreamWriter(fos, StandardCharsets.UTF_8);
-      BufferedWriter writer = new BufferedWriter(osw);
-      return writer;
-    } catch (IOException ex) {
-      LOG.error(
-          "Cannot open read stream for record {}", clazz.getSimpleName(), ex);
-      return null;
+      writer = new BufferedWriter(osw);
+    } catch (IOException e) {
+      LOG.error("Cannot open write stream for record {}", filename, e);
     }
+    return writer;
   }
 
   @Override
   public void close() throws Exception {
     setInitialized(false);
   }
+
+  @Override
+  protected List<String> getChildren(String path) {
+    List<String> ret = new LinkedList<>();
+    File dir = new File(path);
+    File[] files = dir.listFiles();
+    if (files != null) {
+      for (File file : files) {
+        String filename = file.getName();
+        ret.add(filename);
+      }
+    }
+    return ret;
+  }
 }

+ 64 - 64
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java

@@ -24,13 +24,17 @@ import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.net.URI;
 import java.nio.charset.StandardCharsets;
+import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -72,6 +76,36 @@ public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
     }
   }
 
+  @Override
+  protected boolean rename(String src, String dst) {
+    try {
+      if (fs instanceof DistributedFileSystem) {
+        DistributedFileSystem dfs = (DistributedFileSystem)fs;
+        dfs.rename(new Path(src), new Path(dst), Options.Rename.OVERWRITE);
+        return true;
+      } else {
+        // Replace should be atomic but not available
+        if (fs.exists(new Path(dst))) {
+          fs.delete(new Path(dst), true);
+        }
+        return fs.rename(new Path(src), new Path(dst));
+      }
+    } catch (Exception e) {
+      LOG.error("Cannot rename {} to {}", src, dst, e);
+      return false;
+    }
+  }
+
+  @Override
+  protected boolean remove(String path) {
+    try {
+      return fs.delete(new Path(path), true);
+    } catch (Exception e) {
+      LOG.error("Cannot remove {}", path, e);
+      return false;
+    }
+  }
+
   @Override
   protected String getRootDir() {
     if (this.workPath == null) {
@@ -95,84 +129,50 @@ public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
     }
   }
 
-  /**
-   * Get the folder path for the record class' data.
-   *
-   * @param clazz Data record class.
-   * @return Path of the folder containing the record class' data files.
-   */
-  private Path getPathForClass(Class<? extends BaseRecord> clazz) {
-    if (clazz == null) {
-      return null;
-    }
-    // TODO extract table name from class: entry.getTableName()
-    String className = StateStoreUtils.getRecordName(clazz);
-    return new Path(workPath, className);
-  }
-
-  @Override
-  protected <T extends BaseRecord> void lockRecordRead(Class<T> clazz) {
-    // Not required, synced with HDFS leasing
-  }
-
-  @Override
-  protected <T extends BaseRecord> void unlockRecordRead(Class<T> clazz) {
-    // Not required, synced with HDFS leasing
-  }
-
-  @Override
-  protected <T extends BaseRecord> void lockRecordWrite(Class<T> clazz) {
-    // TODO -> wait for lease to be available
-  }
-
   @Override
-  protected <T extends BaseRecord> void unlockRecordWrite(Class<T> clazz) {
-    // TODO -> ensure lease is closed for the file
-  }
-
-  @Override
-  protected <T extends BaseRecord> BufferedReader getReader(
-      Class<T> clazz, String sub) {
-
-    Path path = getPathForClass(clazz);
-    if (sub != null && sub.length() > 0) {
-      path = Path.mergePaths(path, new Path("/" + sub));
-    }
-    path = Path.mergePaths(path, new Path("/" + getDataFileName()));
-
+  protected <T extends BaseRecord> BufferedReader getReader(String pathName) {
+    BufferedReader reader = null;
+    Path path = new Path(pathName);
     try {
       FSDataInputStream fdis = fs.open(path);
       InputStreamReader isr =
           new InputStreamReader(fdis, StandardCharsets.UTF_8);
-      BufferedReader reader = new BufferedReader(isr);
-      return reader;
+      reader = new BufferedReader(isr);
     } catch (IOException ex) {
-      LOG.error("Cannot open write stream for {}  to {}",
-          clazz.getSimpleName(), path);
-      return null;
+      LOG.error("Cannot open read stream for {}", path);
     }
+    return reader;
   }
 
   @Override
-  protected <T extends BaseRecord> BufferedWriter getWriter(
-      Class<T> clazz, String sub) {
-
-    Path path = getPathForClass(clazz);
-    if (sub != null && sub.length() > 0) {
-      path = Path.mergePaths(path, new Path("/" + sub));
-    }
-    path = Path.mergePaths(path, new Path("/" + getDataFileName()));
-
+  protected <T extends BaseRecord> BufferedWriter getWriter(String pathName) {
+    BufferedWriter writer = null;
+    Path path = new Path(pathName);
     try {
       FSDataOutputStream fdos = fs.create(path, true);
       OutputStreamWriter osw =
           new OutputStreamWriter(fdos, StandardCharsets.UTF_8);
-      BufferedWriter writer = new BufferedWriter(osw);
-      return writer;
+      writer = new BufferedWriter(osw);
     } catch (IOException ex) {
-      LOG.error("Cannot open write stream for {} to {}",
-          clazz.getSimpleName(), path);
-      return null;
+      LOG.error("Cannot open write stream for {}", path);
+    }
+    return writer;
+  }
+
+  @Override
+  protected List<String> getChildren(String pathName) {
+    List<String> ret = new LinkedList<>();
+    Path path = new Path(workPath, pathName);
+    try {
+      FileStatus[] files = fs.listStatus(path);
+      for (FileStatus file : files) {
+        Path filePath = file.getPath();
+        String fileName = filePath.getName();
+        ret.add(fileName);
+      }
+    } catch (Exception e) {
+      LOG.error("Cannot get children for {}", pathName, e);
     }
+    return ret;
   }
 }

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java

@@ -26,6 +26,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.imps.CuratorFrameworkState;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -111,18 +113,19 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
 
   @Override
   public boolean isDriverReady() {
-    return zkManager != null;
+    if (zkManager == null) {
+      return false;
+    }
+    CuratorFramework curator = zkManager.getCurator();
+    if (curator == null) {
+      return false;
+    }
+    return curator.getState() == CuratorFrameworkState.STARTED;
   }
 
   @Override
   public <T extends BaseRecord> QueryResult<T> get(Class<T> clazz)
       throws IOException {
-    return get(clazz, (String)null);
-  }
-
-  @Override
-  public <T extends BaseRecord> QueryResult<T> get(Class<T> clazz, String sub)
-      throws IOException {
     verifyDriverReady();
     long start = monotonicNow();
     List<T> ret = new ArrayList<>();

+ 0 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.util.EnumCounters;
-
 /**
  * The content types such as file, directory and symlink to be computed.
  */
@@ -39,34 +37,4 @@ public enum Content {
   SNAPSHOT,
   /** The number of snapshottable directories. */
   SNAPSHOTTABLE_DIRECTORY;
-
-  /** Content counts. */
-  public static class Counts extends EnumCounters<Content> {
-    public static Counts newInstance() {
-      return new Counts();
-    }
-
-    private Counts() {
-      super(Content.class);
-    }
-  }
-
-  private static final EnumCounters.Factory<Content, Counts> FACTORY
-      = new EnumCounters.Factory<Content, Counts>() {
-    @Override
-    public Counts newInstance() {
-      return Counts.newInstance();
-    }
-  };
-
-  /** A map of counters for the current state and the snapshots. */
-  public static class CountsMap
-      extends EnumCounters.Map<CountsMap.Key, Content, Counts> {
-    /** The key type of the map. */
-    public enum Key { CURRENT, SNAPSHOT }
-
-    CountsMap() {
-      super(FACTORY);
-    }
-  }
 }

+ 0 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java

@@ -146,25 +146,6 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     }
   }
 
-  void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
-      boolean verify) throws QuotaExceededException {
-    if (dir.isQuotaSet()) {
-      // The following steps are important:
-      // check quotas in this inode and all ancestors before changing counts
-      // so that no change is made if there is any quota violation.
-      // (1) verify quota in this inode
-      if (verify) {
-        verifyQuota(counts);
-      }
-      // (2) verify quota and then add count in ancestors
-      dir.addSpaceConsumed2Parent(counts, verify);
-      // (3) add count in this inode
-      addSpaceConsumed2Cache(counts);
-    } else {
-      dir.addSpaceConsumed2Parent(counts, verify);
-    }
-  }
-  
   /** Update the space/namespace/type usage of the tree
    * 
    * @param delta the change of the namespace/space/type usage

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java

@@ -689,13 +689,13 @@ class FSDirRenameOp {
       return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst);
     }
 
-    void updateMtimeAndLease(long timestamp) throws QuotaExceededException {
+    void updateMtimeAndLease(long timestamp) {
       srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
       final INode dstParent = dstParentIIP.getLastINode();
       dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
     }
 
-    void restoreSource() throws QuotaExceededException {
+    void restoreSource() {
       // Rename failed - restore src
       final INode oldSrcChild = srcChild;
       // put it back
@@ -722,7 +722,7 @@ class FSDirRenameOp {
       }
     }
 
-    void restoreDst(BlockStoragePolicySuite bsps) throws QuotaExceededException {
+    void restoreDst(BlockStoragePolicySuite bsps) {
       Preconditions.checkState(oldDstChild != null);
       final INodeDirectory dstParent = dstParentIIP.getLastINode().asDirectory();
       if (dstParent.isWithSnapshot()) {
@@ -738,8 +738,8 @@ class FSDirRenameOp {
       }
     }
 
-    boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
-        throws QuotaExceededException {
+    boolean cleanDst(
+        BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks) {
       Preconditions.checkState(oldDstChild != null);
       List<INode> removedINodes = new ChunkedArrayList<>();
       List<Long> removedUCFiles = new ChunkedArrayList<>();
@@ -762,13 +762,13 @@ class FSDirRenameOp {
       return filesDeleted;
     }
 
-    void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) throws QuotaExceededException {
+    void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) {
       // update the quota usage in src tree
       if (isSrcInSnapshot) {
         // get the counts after rename
         QuotaCounts newSrcCounts = srcChild.computeQuotaUsage(bsps, false);
         newSrcCounts.subtract(oldSrcCounts);
-        srcParent.addSpaceConsumed(newSrcCounts, false);
+        srcParent.addSpaceConsumed(newSrcCounts);
       }
     }
   }

+ 2 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -1293,13 +1293,8 @@ public class FSDirectory implements Closeable {
     updateCount(existing, pos, counts, checkQuota);
 
     boolean isRename = (inode.getParent() != null);
-    boolean added;
-    try {
-      added = parent.addChild(inode, true, existing.getLatestSnapshotId());
-    } catch (QuotaExceededException e) {
-      updateCountNoQuotaCheck(existing, pos, counts.negation());
-      throw e;
-    }
+    final boolean added = parent.addChild(inode, true,
+        existing.getLatestSnapshotId());
     if (!added) {
       updateCountNoQuotaCheck(existing, pos, counts.negation());
       return null;

+ 2 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -471,21 +470,10 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
 
   /**
    * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
-   * @throws QuotaExceededException if quote is violated.
    */
-  public void addSpaceConsumed(QuotaCounts counts, boolean verify)
-    throws QuotaExceededException {
-    addSpaceConsumed2Parent(counts, verify);
-  }
-
-  /**
-   * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
-   * @throws QuotaExceededException if quote is violated.
-   */
-  void addSpaceConsumed2Parent(QuotaCounts counts, boolean verify)
-    throws QuotaExceededException {
+  public void addSpaceConsumed(QuotaCounts counts) {
     if (parent != null) {
-      parent.addSpaceConsumed(counts, verify);
+      parent.addSpaceConsumed(counts);
     }
   }
 

+ 10 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
@@ -171,13 +170,12 @@ public class INodeDirectory extends INodeWithAdditionalFields
   }
 
   @Override
-  public void addSpaceConsumed(QuotaCounts counts, boolean verify)
-    throws QuotaExceededException {
+  public void addSpaceConsumed(QuotaCounts counts) {
+    super.addSpaceConsumed(counts);
+
     final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
-    if (q != null) {
-      q.addSpaceConsumed(this, counts, verify);
-    } else {
-      addSpaceConsumed2Parent(counts, verify);
+    if (q != null && isQuotaSet()) {
+      q.addSpaceConsumed2Cache(counts);
     }
   }
 
@@ -281,7 +279,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
   public Snapshot addSnapshot(int id, String name,
       final LeaseManager leaseManager, final boolean captureOpenFiles,
       int maxSnapshotLimit)
-      throws SnapshotException, QuotaExceededException {
+      throws SnapshotException {
     return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
         leaseManager, captureOpenFiles, maxSnapshotLimit);
   }
@@ -543,7 +541,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
    *         otherwise, return true;
    */
   public boolean addChild(INode node, final boolean setModTime,
-      final int latestSnapshotId) throws QuotaExceededException {
+      final int latestSnapshotId) {
     final int low = searchChildren(node.getLocalNameBytes());
     if (low >= 0) {
       return false;
@@ -739,10 +737,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
    *          The reference node to be removed/replaced
    * @param newChild
    *          The node to be added back
-   * @throws QuotaExceededException should not throw this exception
    */
   public void undoRename4ScrParent(final INodeReference oldChild,
-      final INode newChild) throws QuotaExceededException {
+      final INode newChild) {
     DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     assert sf != null : "Directory does not have snapshot feature";
     sf.getDiffs().removeDeletedChild(oldChild);
@@ -756,8 +753,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
    * and delete possible record in the deleted list.  
    */
   public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
-      final INode deletedChild,
-      int latestSnapshotId) throws QuotaExceededException {
+      final INode deletedChild, int latestSnapshotId) {
     DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     assert sf != null : "Directory does not have snapshot feature";
     boolean removeDeletedChild = sf.getDiffs().removeDeletedChild(deletedChild);
@@ -767,8 +763,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
     // been stored in deleted list before
     if (added && !removeDeletedChild) {
       final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
-      addSpaceConsumed(counts, false);
-
+      addSpaceConsumed(counts);
     }
   }
 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.Content;
@@ -171,7 +170,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
   public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
       final LeaseManager leaseManager, final boolean captureOpenFiles,
       int maxSnapshotLimit)
-      throws SnapshotException, QuotaExceededException {
+      throws SnapshotException {
     //check snapshot quota
     final int n = getNumSnapshots();
     if (n + 1 > snapshotQuota) {

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.*;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@@ -524,7 +523,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
    * to make sure that parent is in the given snapshot "latest".
    */
   public boolean addChild(INodeDirectory parent, INode inode,
-      boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
+      boolean setModTime, int latestSnapshotId) {
     ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
         parent).diff;
     final int undoInfo = diff.create(inode);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java

@@ -120,7 +120,7 @@ public class FSImageFormatPBSnapshot {
     }
 
     private INodeReference loadINodeReference(
-        INodeReferenceSection.INodeReference r) throws IOException {
+        INodeReferenceSection.INodeReference r) {
       long referredId = r.getReferredId();
       INode referred = fsDir.getInode(referredId);
       WithCount withCount = (WithCount) referred.getParentReference();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java

@@ -145,7 +145,7 @@ public class AdminHelper {
     private final Command[] commands;
 
     public HelpCommand(Command[] commands) {
-      Preconditions.checkNotNull(commands != null);
+      Preconditions.checkNotNull(commands, "commands cannot be null.");
       this.commands = commands;
     }
 

+ 3 - 69
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java

@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import java.util.Arrays;
-import java.util.HashMap;
-
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.ArrayUtils;
 
+import java.util.Arrays;
+
 /**
  * Counters for an enum type.
  * 
@@ -31,7 +30,7 @@ import org.apache.commons.lang.ArrayUtils;
  * enum Fruit { APPLE, ORANGE, GRAPE }
  * </pre>
  * An {@link EnumCounters} object can be created for counting the numbers of
- * APPLE, ORANGLE and GRAPE.
+ * APPLE, ORANGE and GRAPE.
  *
  * @param <E> the enum type
  */
@@ -178,69 +177,4 @@ public class EnumCounters<E extends Enum<E>> {
     }
     return false;
   }
-
-  /**
-   * A factory for creating counters.
-   * 
-   * @param <E> the enum type
-   * @param <C> the counter type
-   */
-  public static interface Factory<E extends Enum<E>,
-                                  C extends EnumCounters<E>> {
-    /** Create a new counters instance. */
-    public C newInstance(); 
-  }
-
-  /**
-   * A key-value map which maps the keys to {@link EnumCounters}.
-   * Note that null key is supported.
-   *
-   * @param <K> the key type
-   * @param <E> the enum type
-   * @param <C> the counter type
-   */
-  public static class Map<K, E extends Enum<E>, C extends EnumCounters<E>> {
-    /** The factory for creating counters. */
-    private final Factory<E, C> factory;
-    /** Key-to-Counts map. */
-    private final java.util.Map<K, C> counts = new HashMap<K, C>();
-    
-    /** Construct a map. */
-    public Map(final Factory<E, C> factory) {
-      this.factory = factory;
-    }
-
-    /** @return the counters for the given key. */
-    public final C getCounts(final K key) {
-      C c = counts.get(key);
-      if (c == null) {
-        c = factory.newInstance();
-        counts.put(key, c); 
-      }
-      return c;
-    }
-    
-    /** @return the sum of the values of all the counters. */
-    public final C sum() {
-      final C sum = factory.newInstance();
-      for(C c : counts.values()) {
-        sum.add(c);
-      }
-      return sum;
-    }
-    
-    /** @return the sum of the values of all the counters for e. */
-    public final long sum(final E e) {
-      long sum = 0;
-      for(C c : counts.values()) {
-        sum += c.get(e);
-      }
-      return sum;
-    }
-
-    @Override
-    public String toString() {
-      return counts.toString();
-    }
-  }
 }

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md

@@ -51,6 +51,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
     * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies)
     * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
     * [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
+    * [`GETSNAPSHOTTABLEDIRECTORYLIST`](#Get_Snapshottable_Directory_List)
 *   HTTP PUT
     * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
     * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@@ -1282,6 +1283,45 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSna
 
         {"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}}
 
+### Get Snapshottable Directory List
+
+* Submit a HTTP GET request.
+
+        curl -i GET "http://<HOST>:<PORT>/webhdfs/v1/?user.name=<USER>&op=GETSNAPSHOTTABLEDIRECTORYLIST"
+
+    If the USER is not the hdfs super user, the call lists only the snapshottable directories owned by the user. If the USER is the hdfs super user, the call lists all the snapshottable directories. The client receives a response with a [`SnapshottableDirectoryList` JSON object](#SnapshottableDirectoryList_JSON_Schema):
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+        Transfer-Encoding: chunked
+
+        {
+            "SnapshottableDirectoryList":
+            [
+                {
+                  "dirStatus":
+                    {
+                        "accessTime":0,
+                        "blockSize":0,
+                        "childrenNum":0,
+                        "fileId":16386,
+                        "group":"hadoop",
+                        "length":0,
+                        "modificationTime":1520761889225,
+                        "owner":"random",
+                        "pathSuffix":"bar",
+                        "permission":"755",
+                        "replication":0,
+                        "storagePolicy":0,
+                        "type":"DIRECTORY"
+                    },
+                  "parentFullPath":"/",
+                  "snapshotNumber":0,
+                  "snapshotQuota":65536
+                }
+            ]
+        }
+
 Delegation Token Operations
 ---------------------------
 
@@ -2135,6 +2175,58 @@ var diffReportEntries =
 }
 ```
 
+### SnapshottableDirectoryList JSON Schema
+
+```json
+{
+  "name": "SnapshottableDirectoryList",
+  "type": "object",
+  "properties":
+  {
+    "SnapshottableDirectoryList":
+    {
+      "description": "An array of SnapshottableDirectoryStatus",
+      "type"        : "array",
+      "items"       : snapshottableDirectoryStatus,
+      "required"    : true
+    }
+  }
+}
+```
+
+#### SnapshottableDirectoryStatus
+
+JavaScript syntax is used to define `snapshottableDirectoryStatus` so that it can be referred in `SnapshottableDirectoryList` JSON schema.
+
+```javascript
+var snapshottableDirectoryStatus =
+{
+  "type": "object",
+  "properties":
+  {
+    "dirStatus": fileStatusProperties,
+    "parentFullPath":
+    {
+      "description" : "Full path of the parent of snapshottable directory",
+      "type"        : "string",
+      "required"    : true
+    },
+    "snapshotNumber":
+    {
+      "description" : "Number of snapshots created on the snapshottable directory",
+      "type"        : "integer",
+      "required"    : true
+    },
+    "snapshotQuota":
+    {
+      "description" : "Total number of snapshots allowed on the snapshottable directory",
+      "type"        : "integer",
+      "required"    : true
+    }
+  }
+}
+```
+
 HTTP Query Parameter Dictionary
 -------------------------------
 

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -2282,6 +2282,22 @@ public class MiniDFSCluster implements AutoCloseable {
     return stopDataNode(node);
   }
 
+  /*
+   * Restart a DataNode by name.
+   * @return true if DataNode restart is successful else returns false
+   */
+  public synchronized boolean restartDataNode(String dnName)
+      throws IOException {
+    for (int i = 0; i < dataNodes.size(); i++) {
+      DataNode dn = dataNodes.get(i).datanode;
+      if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
+        return restartDataNode(i);
+      }
+    }
+    return false;
+  }
+
+
   /*
    * Shutdown a particular datanode
    * @param i node index

+ 71 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.junit.Assume.assumeTrue;
+
+/**
+ * Test striped file write operation with data node failures with parameterized
+ * test cases.
+ */
+@RunWith(Parameterized.class)
+public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
+    TestDFSStripedOutputStreamWithFailureBase{
+  public static final Logger LOG = LoggerFactory.getLogger(
+      ParameterizedTestDFSStripedOutputStreamWithFailure.class);
+
+  private int base;
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    List<Object[]> parameters = new ArrayList<>();
+    for (int i = 0; i <= 10; i++) {
+      parameters.add(new Object[]{RANDOM.nextInt(220)});
+    }
+    return parameters;
+  }
+
+  public ParameterizedTestDFSStripedOutputStreamWithFailure(int base) {
+    this.base = base;
+  }
+
+  @Test(timeout = 240000)
+  public void runTestWithSingleFailure() {
+    assumeTrue(base >= 0);
+    if (base > lengths.size()) {
+      base = base % lengths.size();
+    }
+    final int i = base;
+    final Integer length = getLength(i);
+    assumeTrue("Skip test " + i + " since length=null.", length != null);
+    assumeTrue("Test " + i + ", length=" + length
+        + ", is not chosen to run.", RANDOM.nextInt(16) != 0);
+    System.out.println("Run test " + i + ", length=" + length);
+    runTest(length);
+  }
+}

+ 49 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This tests write operation of DFS striped file with a random erasure code
+ * policy except for the default policy under Datanode failure conditions.
+ */
+public class
+    ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy extends
+    ParameterizedTestDFSStripedOutputStreamWithFailure {
+
+  private final ECSchema schema;
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy
+          .class.getName());
+
+  public ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy(
+      int base) {
+    super(base);
+    schema = StripedFileTestUtil.getRandomNonDefaultECPolicy().getSchema();
+    LOG.info(schema.toString());
+  }
+
+  @Override
+  public ECSchema getEcSchema() {
+    return schema;
+  }
+}

+ 8 - 488
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java

@@ -17,234 +17,33 @@
  */
 package org.apache.hadoop.hdfs;
 
-import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
-import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
-import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-import java.util.Stack;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 
 /**
- * Test striped file write operation with data node failures.
+ * Test striped file write operation with data node failures with fixed
+ * parameter test cases.
  */
-public class TestDFSStripedOutputStreamWithFailure {
-  public static final Log LOG = LogFactory.getLog(
+public class TestDFSStripedOutputStreamWithFailure extends
+    TestDFSStripedOutputStreamWithFailureBase{
+  public static final Logger LOG = LoggerFactory.getLogger(
       TestDFSStripedOutputStreamWithFailure.class);
-  static {
-    GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.ALL);
-    GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
-    GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
-    ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class))
-        .getLogger().setLevel(Level.ALL);
-  }
-
-  private final int cellSize = 64 * 1024; //64k
-  private final int stripesPerBlock = 4;
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int blockSize;
-  private int blockGroupSize;
-
-  private static final int FLUSH_POS =
-      9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
-
-  public ECSchema getEcSchema() {
-    return StripedFileTestUtil.getDefaultECPolicy().getSchema();
-  }
-
-  /*
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init() {
-    ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
-    dataBlocks = ecPolicy.getNumDataUnits();
-    parityBlocks = ecPolicy.getNumParityUnits();
-    blockSize = cellSize * stripesPerBlock;
-    blockGroupSize = blockSize * dataBlocks;
-    dnIndexSuite = getDnIndexSuite();
-    lengths = newLengths();
-  }
-
-  List<Integer> newLengths() {
-    final List<Integer> lens = new ArrayList<>();
-    lens.add(FLUSH_POS + 2);
-    for(int b = 0; b <= 2; b++) {
-      for(int c = 0; c < stripesPerBlock * dataBlocks; c++) {
-        for(int delta = -1; delta <= 1; delta++) {
-          final int length = b * blockGroupSize + c * cellSize + delta;
-          System.out.println(lens.size() + ": length=" + length
-              + ", (b, c, d) = (" + b + ", " + c + ", " + delta + ")");
-          lens.add(length);
-        }
-      }
-    }
-    return lens;
-  }
-
-  private int[][] dnIndexSuite;
-
-  private int[][] getDnIndexSuite() {
-    final int maxNumLevel = 2;
-    final int maxPerLevel = 8;
-    List<List<Integer>> allLists = new ArrayList<>();
-    int numIndex = parityBlocks;
-    for (int i = 0; i < maxNumLevel && numIndex > 1; i++) {
-      List<List<Integer>> lists =
-          combinations(dataBlocks + parityBlocks, numIndex);
-      if (lists.size() > maxPerLevel) {
-        Collections.shuffle(lists);
-        lists = lists.subList(0, maxPerLevel);
-      }
-      allLists.addAll(lists);
-      numIndex--;
-    }
-    int[][] dnIndexArray = new int[allLists.size()][];
-    for (int i = 0; i < dnIndexArray.length; i++) {
-      int[] list = new int[allLists.get(i).size()];
-      for (int j = 0; j < list.length; j++) {
-        list[j] = allLists.get(i).get(j);
-      }
-      dnIndexArray[i] = list;
-    }
-    return dnIndexArray;
-  }
-
-  // get all combinations of k integers from {0,...,n-1}
-  private static List<List<Integer>> combinations(int n, int k) {
-    List<List<Integer>> res = new LinkedList<List<Integer>>();
-    if (k >= 1 && n >= k) {
-      getComb(n, k, new Stack<Integer>(), res);
-    }
-    return res;
-  }
-
-  private static void getComb(int n, int k, Stack<Integer> stack,
-      List<List<Integer>> res) {
-    if (stack.size() == k) {
-      List<Integer> list = new ArrayList<Integer>(stack);
-      res.add(list);
-    } else {
-      int next = stack.empty() ? 0 : stack.peek() + 1;
-      while (next < n) {
-        stack.push(next);
-        getComb(n, k, stack, res);
-        next++;
-      }
-    }
-    if (!stack.empty()) {
-      stack.pop();
-    }
-  }
-
-  private int[] getKillPositions(int fileLen, int num) {
-    int[] positions = new int[num];
-    for (int i = 0; i < num; i++) {
-      positions[i] = fileLen * (i + 1) / (num + 1);
-    }
-    return positions;
-  }
-
-  private List<Integer> lengths;
-
-  Integer getLength(int i) {
-    return i >= 0 && i < lengths.size() ? lengths.get(i): null;
-  }
-
-  private static final Random RANDOM = new Random();
-
-  private MiniDFSCluster cluster;
-  private DistributedFileSystem dfs;
-  private final Path dir = new Path("/"
-      + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
-
-  private void setup(Configuration conf) throws IOException {
-    System.out.println("NUM_DATA_BLOCKS  = " + dataBlocks);
-    System.out.println("NUM_PARITY_BLOCKS= " + parityBlocks);
-    System.out.println("CELL_SIZE        = " + cellSize + " (=" +
-        StringUtils.TraditionalBinaryPrefix.long2String(cellSize, "B", 2)
-        + ")");
-    System.out.println("BLOCK_SIZE       = " + blockSize + " (=" +
-        StringUtils.TraditionalBinaryPrefix.long2String(blockSize, "B", 2)
-        + ")");
-    System.out.println("BLOCK_GROUP_SIZE = " + blockGroupSize + " (=" +
-        StringUtils.TraditionalBinaryPrefix.long2String(blockGroupSize, "B", 2)
-        + ")");
-    final int numDNs = dataBlocks + parityBlocks;
-    if (ErasureCodeNative.isNativeCodeLoaded()) {
-      conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
-          NativeRSRawErasureCoderFactory.CODER_NAME);
-    }
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.waitActive();
-    dfs = cluster.getFileSystem();
-    AddErasureCodingPolicyResponse[] res =
-        dfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
-    ecPolicy = res[0].getPolicy();
-    dfs.enableErasureCodingPolicy(ecPolicy.getName());
-    DFSTestUtil.enableAllECPolicies(dfs);
-    dfs.mkdirs(dir);
-    dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
-  }
-
-  private void tearDown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  private HdfsConfiguration newHdfsConfiguration() {
-    final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
-        false);
-    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
-    return conf;
-  }
 
   @Test(timeout=300000)
   public void testMultipleDatanodeFailure56() throws Exception {
@@ -432,53 +231,11 @@ public class TestDFSStripedOutputStreamWithFailure {
     }
   }
 
-  void runTest(final int length) {
-    final HdfsConfiguration conf = newHdfsConfiguration();
-    for (int dn = 0; dn < dataBlocks + parityBlocks; dn++) {
-      try {
-        LOG.info("runTest: dn=" + dn + ", length=" + length);
-        setup(conf);
-        runTest(length, new int[]{length / 2}, new int[]{dn}, false);
-      } catch (Throwable e) {
-        final String err = "failed, dn=" + dn + ", length=" + length
-            + StringUtils.stringifyException(e);
-        LOG.error(err);
-        Assert.fail(err);
-      } finally {
-        tearDown();
-      }
-    }
-  }
-
-  void runTestWithMultipleFailure(final int length) throws Exception {
-    final HdfsConfiguration conf = newHdfsConfiguration();
-    for (int[] dnIndex : dnIndexSuite) {
-      int[] killPos = getKillPositions(length, dnIndex.length);
-      try {
-        LOG.info("runTestWithMultipleFailure: length==" + length + ", killPos="
-            + Arrays.toString(killPos) + ", dnIndex="
-            + Arrays.toString(dnIndex));
-        setup(conf);
-        runTest(length, killPos, dnIndex, false);
-      } catch (Throwable e) {
-        final String err = "failed, killPos=" + Arrays.toString(killPos)
-            + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
-        LOG.error(err);
-        throw e;
-      } finally {
-        tearDown();
-      }
-    }
-  }
-
   /**
    * When the two DataNodes with partial data blocks fail.
    */
   @Test
-  public void runTestWithDifferentLengths() throws Exception {
-    assumeTrue("Skip this test case in the subclasses. Once is enough.",
-        this.getClass().equals(TestDFSStripedOutputStreamWithFailure.class));
-
+  public void testCloseWithExceptionsInStreamer() throws Exception {
     final HdfsConfiguration conf = newHdfsConfiguration();
 
     final int[] fileLengths = {
@@ -515,9 +272,6 @@ public class TestDFSStripedOutputStreamWithFailure {
    */
   @Test
   public void runTestWithShortStripe() throws Exception {
-    assumeTrue("Skip this test case in the subclasses. Once is enough.",
-        this.getClass().equals(TestDFSStripedOutputStreamWithFailure.class));
-
     final HdfsConfiguration conf = newHdfsConfiguration();
     // Write a file with a 1 cell partial stripe
     final int length = cellSize - 123;
@@ -543,238 +297,4 @@ public class TestDFSStripedOutputStreamWithFailure {
       tearDown();
     }
   }
-
-  /**
-   * runTest implementation.
-   * @param length file length
-   * @param killPos killing positions in ascending order
-   * @param dnIndex DN index to kill when meets killing positions
-   * @param tokenExpire wait token to expire when kill a DN
-   * @throws Exception
-   */
-  private void runTest(final int length, final int[] killPos,
-      final int[] dnIndex, final boolean tokenExpire) throws Exception {
-    if (killPos[0] <= FLUSH_POS) {
-      LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS="
-          + FLUSH_POS + ", length=" + length + ", dnIndex="
-          + Arrays.toString(dnIndex));
-      return; //skip test
-    }
-    Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s",
-        length, killPos);
-    Preconditions.checkArgument(killPos.length == dnIndex.length);
-
-    final Path p = new Path(dir, "dn" + Arrays.toString(dnIndex)
-        + "len" + length + "kill" +  Arrays.toString(killPos));
-    final String fullPath = p.toString();
-    LOG.info("fullPath=" + fullPath);
-
-    if (tokenExpire) {
-      final NameNode nn = cluster.getNameNode();
-      final BlockManager bm = nn.getNamesystem().getBlockManager();
-      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
-
-      // set a short token lifetime (6 second)
-      SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
-    }
-
-    final AtomicInteger pos = new AtomicInteger();
-    final FSDataOutputStream out = dfs.create(p);
-    final DFSStripedOutputStream stripedOut
-        = (DFSStripedOutputStream)out.getWrappedStream();
-
-    // first GS of this block group which never proceeds blockRecovery
-    long firstGS = -1;
-    long oldGS = -1; // the old GS before bumping
-    List<Long> gsList = new ArrayList<>();
-    final List<DatanodeInfo> killedDN = new ArrayList<>();
-    int numKilled = 0;
-    for(; pos.get() < length;) {
-      final int i = pos.getAndIncrement();
-      if (numKilled < killPos.length && i == killPos[numKilled]) {
-        assertTrue(firstGS != -1);
-        final long gs = getGenerationStamp(stripedOut);
-        if (numKilled == 0) {
-          assertEquals(firstGS, gs);
-        } else {
-          //TODO: implement hflush/hsync and verify gs strict greater than oldGS
-          assertTrue(gs >= oldGS);
-        }
-        oldGS = gs;
-
-        if (tokenExpire) {
-          DFSTestUtil.flushInternal(stripedOut);
-          waitTokenExpires(out);
-        }
-
-        killedDN.add(
-            killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
-        numKilled++;
-      }
-
-      write(out, i);
-
-      if (i % blockGroupSize == FLUSH_POS) {
-        firstGS = getGenerationStamp(stripedOut);
-        oldGS = firstGS;
-      }
-      if (i > 0 && (i + 1) % blockGroupSize == 0) {
-        gsList.add(oldGS);
-      }
-    }
-    gsList.add(oldGS);
-    out.close();
-    assertEquals(dnIndex.length, numKilled);
-
-    StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
-
-    cluster.triggerBlockReports();
-    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList,
-        blockGroupSize);
-  }
-
-  static void write(FSDataOutputStream out, int i) throws IOException {
-    try {
-      out.write(StripedFileTestUtil.getByte(i));
-    } catch(IOException ioe) {
-      throw new IOException("Failed at i=" + i, ioe);
-    }
-  }
-
-  static long getGenerationStamp(DFSStripedOutputStream out)
-      throws IOException {
-    final long gs = out.getBlock().getGenerationStamp();
-    LOG.info("getGenerationStamp returns " + gs);
-    return gs;
-  }
-
-  static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
-    for(;;) {
-      DatanodeInfo[] datanodes = streamer.getNodes();
-      if (datanodes == null) {
-        // try peeking following block.
-        final LocatedBlock lb = streamer.peekFollowingBlock();
-        if (lb != null) {
-          datanodes = lb.getLocations();
-        }
-      }
-
-      if (datanodes != null) {
-        Assert.assertEquals(1, datanodes.length);
-        Assert.assertNotNull(datanodes[0]);
-        return datanodes[0];
-      }
-
-      try {
-        Thread.sleep(100);
-      } catch (InterruptedException ie) {
-        Assert.fail(StringUtils.stringifyException(ie));
-        return null;
-      }
-    }
-  }
-
-  static DatanodeInfo killDatanode(MiniDFSCluster cluster,
-      DFSStripedOutputStream out, final int dnIndex, final AtomicInteger pos) {
-    final StripedDataStreamer s = out.getStripedDataStreamer(dnIndex);
-    final DatanodeInfo datanode = getDatanodes(s);
-    LOG.info("killDatanode " + dnIndex + ": " + datanode + ", pos=" + pos);
-    if (datanode != null) {
-      cluster.stopDataNode(datanode.getXferAddr());
-    }
-    return datanode;
-  }
-
-
-  private void waitTokenExpires(FSDataOutputStream out) throws IOException {
-    Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(out);
-    while (!SecurityTestUtil.isBlockTokenExpired(token)) {
-      try {
-        Thread.sleep(10);
-      } catch (InterruptedException ignored) {
-      }
-    }
-  }
-
-  int getBase() {
-    final String name = getClass().getSimpleName();
-    int i = name.length() - 1;
-    for(; i >= 0 && Character.isDigit(name.charAt(i));){
-      i--;
-    }
-    String number = name.substring(i + 1);
-    try {
-      return Integer.parseInt(number);
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  private void run(int offset) {
-    int base = getBase();
-    assumeTrue(base >= 0);
-    final int i = offset + base;
-    final Integer length = getLength(i);
-    if (length == null) {
-      System.out.println("Skip test " + i + " since length=null.");
-      return;
-    }
-    if (RANDOM.nextInt(16) != 0) {
-      System.out.println("Test " + i + ", length=" + length
-          + ", is not chosen to run.");
-      return;
-    }
-    System.out.println("Run test " + i + ", length=" + length);
-    runTest(length);
-  }
-
-  @Test(timeout = 240000)
-  public void test0() {
-    run(0);
-  }
-
-  @Test(timeout = 240000)
-  public void test1() {
-    run(1);
-  }
-
-  @Test(timeout = 240000)
-  public void test2() {
-    run(2);
-  }
-
-  @Test(timeout = 240000)
-  public void test3() {
-    run(3);
-  }
-
-  @Test(timeout = 240000)
-  public void test4() {
-    run(4);
-  }
-
-  @Test(timeout = 240000)
-  public void test5() {
-    run(5);
-  }
-
-  @Test(timeout = 240000)
-  public void test6() {
-    run(6);
-  }
-
-  @Test(timeout = 240000)
-  public void test7() {
-    run(7);
-  }
-
-  @Test(timeout = 240000)
-  public void test8() {
-    run(8);
-  }
-
-  @Test(timeout = 240000)
-  public void test9() {
-    run(9);
-  }
 }

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure000
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure010
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure020
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure030
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure040
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure050
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure060
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure070
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure080
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure090
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure100
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure110
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure120
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure130
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure140
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure150
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure160
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure170
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure180
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure190
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure200
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-/**
- * Test striped file write operation with data node failures.
- */
-public class TestDFSStripedOutputStreamWithFailure210
-    extends TestDFSStripedOutputStreamWithFailure {}

+ 426 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java

@@ -0,0 +1,426 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+import org.junit.Assert;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.Stack;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Base class for test striped file write operation.
+ */
+public class TestDFSStripedOutputStreamWithFailureBase {
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestDFSStripedOutputStreamWithFailureBase.class);
+  static {
+    GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(
+        LoggerFactory.getLogger(BlockPlacementPolicy.class), Level.TRACE);
+  }
+
+  protected final int cellSize = 64 * 1024; // 8k
+  protected final int stripesPerBlock = 4;
+  protected ErasureCodingPolicy ecPolicy;
+  protected int dataBlocks;
+  protected int parityBlocks;
+  protected int blockSize;
+  protected int blockGroupSize;
+  private int[][] dnIndexSuite;
+  protected List<Integer> lengths;
+  protected static final Random RANDOM = new Random();
+  MiniDFSCluster cluster;
+  DistributedFileSystem dfs;
+  final Path dir = new Path("/"
+      + TestDFSStripedOutputStreamWithFailureBase.class.getSimpleName());
+  protected static final int FLUSH_POS =
+      9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
+
+  public ECSchema getEcSchema() {
+    return StripedFileTestUtil.getDefaultECPolicy().getSchema();
+  }
+
+  /*
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init() {
+    ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
+    dataBlocks = ecPolicy.getNumDataUnits();
+    parityBlocks = ecPolicy.getNumParityUnits();
+    blockSize = cellSize * stripesPerBlock;
+    blockGroupSize = blockSize * dataBlocks;
+    dnIndexSuite = getDnIndexSuite();
+    lengths = newLengths();
+  }
+
+  List<Integer> newLengths() {
+    final List<Integer> lens = new ArrayList<>();
+    lens.add(FLUSH_POS + 2);
+    for(int b = 0; b <= 2; b++) {
+      for(int c = 0; c < stripesPerBlock * dataBlocks; c++) {
+        for(int delta = -1; delta <= 1; delta++) {
+          final int length = b * blockGroupSize + c * cellSize + delta;
+          System.out.println(lens.size() + ": length=" + length
+              + ", (b, c, d) = (" + b + ", " + c + ", " + delta + ")");
+          lens.add(length);
+        }
+      }
+    }
+    return lens;
+  }
+
+  private int[][] getDnIndexSuite() {
+    final int maxNumLevel = 2;
+    final int maxPerLevel = 5;
+    List<List<Integer>> allLists = new ArrayList<>();
+    int numIndex = parityBlocks;
+    for (int i = 0; i < maxNumLevel && numIndex > 1; i++) {
+      List<List<Integer>> lists =
+          combinations(dataBlocks + parityBlocks, numIndex);
+      if (lists.size() > maxPerLevel) {
+        Collections.shuffle(lists);
+        lists = lists.subList(0, maxPerLevel);
+      }
+      allLists.addAll(lists);
+      numIndex--;
+    }
+    int[][] dnIndexArray = new int[allLists.size()][];
+    for (int i = 0; i < dnIndexArray.length; i++) {
+      int[] list = new int[allLists.get(i).size()];
+      for (int j = 0; j < list.length; j++) {
+        list[j] = allLists.get(i).get(j);
+      }
+      dnIndexArray[i] = list;
+    }
+    return dnIndexArray;
+  }
+
+  // get all combinations of k integers from {0,...,n-1}
+  private static List<List<Integer>> combinations(int n, int k) {
+    List<List<Integer>> res = new LinkedList<List<Integer>>();
+    if (k >= 1 && n >= k) {
+      getComb(n, k, new Stack<Integer>(), res);
+    }
+    return res;
+  }
+
+  private static void getComb(int n, int k, Stack<Integer> stack,
+      List<List<Integer>> res) {
+    if (stack.size() == k) {
+      List<Integer> list = new ArrayList<Integer>(stack);
+      res.add(list);
+    } else {
+      int next = stack.empty() ? 0 : stack.peek() + 1;
+      while (next < n) {
+        stack.push(next);
+        getComb(n, k, stack, res);
+        next++;
+      }
+    }
+    if (!stack.empty()) {
+      stack.pop();
+    }
+  }
+
+  int[] getKillPositions(int fileLen, int num) {
+    int[] positions = new int[num];
+    for (int i = 0; i < num; i++) {
+      positions[i] = fileLen * (i + 1) / (num + 1);
+    }
+    return positions;
+  }
+
+  Integer getLength(int i) {
+    return i >= 0 && i < lengths.size() ? lengths.get(i): null;
+  }
+
+  void setup(Configuration conf) throws IOException {
+    System.out.println("NUM_DATA_BLOCKS  = " + dataBlocks);
+    System.out.println("NUM_PARITY_BLOCKS= " + parityBlocks);
+    System.out.println("CELL_SIZE        = " + cellSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(cellSize, "B", 2)
+        + ")");
+    System.out.println("BLOCK_SIZE       = " + blockSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(blockSize, "B", 2)
+        + ")");
+    System.out.println("BLOCK_GROUP_SIZE = " + blockGroupSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(blockGroupSize, "B", 2)
+        + ")");
+    final int numDNs = dataBlocks + parityBlocks;
+    if (ErasureCodeNative.isNativeCodeLoaded()) {
+      conf.set(
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
+          NativeRSRawErasureCoderFactory.CODER_NAME);
+    }
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+    cluster.waitActive();
+    dfs = cluster.getFileSystem();
+    AddErasureCodingPolicyResponse[] res =
+        dfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
+    ecPolicy = res[0].getPolicy();
+    dfs.enableErasureCodingPolicy(ecPolicy.getName());
+    DFSTestUtil.enableAllECPolicies(dfs);
+    dfs.mkdirs(dir);
+    dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
+  }
+
+  void tearDown() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  HdfsConfiguration newHdfsConfiguration() {
+    final HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        false);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
+    return conf;
+  }
+
+  void runTest(final int length) {
+    final HdfsConfiguration conf = newHdfsConfiguration();
+    for (int dn = 0; dn < dataBlocks + parityBlocks; dn++) {
+      try {
+        LOG.info("runTest: dn=" + dn + ", length=" + length);
+        setup(conf);
+        runTest(length, new int[]{length / 2}, new int[]{dn}, false);
+      } catch (Throwable e) {
+        final String err = "failed, dn=" + dn + ", length=" + length
+            + StringUtils.stringifyException(e);
+        LOG.error(err);
+        Assert.fail(err);
+      } finally {
+        tearDown();
+      }
+    }
+  }
+
+  void runTestWithMultipleFailure(final int length) throws Exception {
+    final HdfsConfiguration conf = newHdfsConfiguration();
+    for (int[] dnIndex : dnIndexSuite) {
+      int[] killPos = getKillPositions(length, dnIndex.length);
+      try {
+        LOG.info("runTestWithMultipleFailure: length==" + length + ", killPos="
+            + Arrays.toString(killPos) + ", dnIndex="
+            + Arrays.toString(dnIndex));
+        setup(conf);
+        runTest(length, killPos, dnIndex, false);
+      } catch (Throwable e) {
+        final String err = "failed, killPos=" + Arrays.toString(killPos)
+            + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
+        LOG.error(err);
+        throw e;
+      } finally {
+        tearDown();
+      }
+    }
+  }
+
+  /**
+   * runTest implementation.
+   * @param length file length
+   * @param killPos killing positions in ascending order
+   * @param dnIndex DN index to kill when meets killing positions
+   * @param tokenExpire wait token to expire when kill a DN
+   * @throws Exception
+   */
+  void runTest(final int length, final int[] killPos,
+      final int[] dnIndex, final boolean tokenExpire) throws Exception {
+    if (killPos[0] <= FLUSH_POS) {
+      LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS="
+          + FLUSH_POS + ", length=" + length + ", dnIndex="
+          + Arrays.toString(dnIndex));
+      return; //skip test
+    }
+    Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s",
+        length, killPos);
+    Preconditions.checkArgument(killPos.length == dnIndex.length);
+
+    final Path p = new Path(dir, "dn" + Arrays.toString(dnIndex)
+        + "len" + length + "kill" +  Arrays.toString(killPos));
+    final String fullPath = p.toString();
+    LOG.info("fullPath=" + fullPath);
+
+    if (tokenExpire) {
+      final NameNode nn = cluster.getNameNode();
+      final BlockManager bm = nn.getNamesystem().getBlockManager();
+      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
+
+      // set a short token lifetime (6 second)
+      SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
+    }
+
+    final AtomicInteger pos = new AtomicInteger();
+    final FSDataOutputStream out = dfs.create(p);
+    final DFSStripedOutputStream stripedOut
+        = (DFSStripedOutputStream)out.getWrappedStream();
+
+    // first GS of this block group which never proceeds blockRecovery
+    long firstGS = -1;
+    long oldGS = -1; // the old GS before bumping
+    List<Long> gsList = new ArrayList<>();
+    final List<DatanodeInfo> killedDN = new ArrayList<>();
+    int numKilled = 0;
+    for(; pos.get() < length;) {
+      final int i = pos.getAndIncrement();
+      if (numKilled < killPos.length && i == killPos[numKilled]) {
+        assertTrue(firstGS != -1);
+        final long gs = getGenerationStamp(stripedOut);
+        if (numKilled == 0) {
+          assertEquals(firstGS, gs);
+        } else {
+          //TODO: implement hflush/hsync and verify gs strict greater than oldGS
+          assertTrue(gs >= oldGS);
+        }
+        oldGS = gs;
+
+        if (tokenExpire) {
+          DFSTestUtil.flushInternal(stripedOut);
+          waitTokenExpires(out);
+        }
+
+        killedDN.add(
+            killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
+        numKilled++;
+      }
+
+      write(out, i);
+
+      if (i % blockGroupSize == FLUSH_POS) {
+        firstGS = getGenerationStamp(stripedOut);
+        oldGS = firstGS;
+      }
+      if (i > 0 && (i + 1) % blockGroupSize == 0) {
+        gsList.add(oldGS);
+      }
+    }
+    gsList.add(oldGS);
+    out.close();
+    assertEquals(dnIndex.length, numKilled);
+
+    StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
+
+    cluster.triggerBlockReports();
+    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList,
+        blockGroupSize);
+  }
+
+  static void write(FSDataOutputStream out, int i) throws IOException {
+    try {
+      out.write(StripedFileTestUtil.getByte(i));
+    } catch(IOException ioe) {
+      throw new IOException("Failed at i=" + i, ioe);
+    }
+  }
+
+  static long getGenerationStamp(DFSStripedOutputStream out)
+      throws IOException {
+    final long gs = out.getBlock().getGenerationStamp();
+    LOG.info("getGenerationStamp returns " + gs);
+    return gs;
+  }
+
+  static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
+    for(;;) {
+      DatanodeInfo[] datanodes = streamer.getNodes();
+      if (datanodes == null) {
+        // try peeking following block.
+        final LocatedBlock lb = streamer.peekFollowingBlock();
+        if (lb != null) {
+          datanodes = lb.getLocations();
+        }
+      }
+
+      if (datanodes != null) {
+        Assert.assertEquals(1, datanodes.length);
+        Assert.assertNotNull(datanodes[0]);
+        return datanodes[0];
+      }
+
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ie) {
+        Assert.fail(StringUtils.stringifyException(ie));
+        return null;
+      }
+    }
+  }
+
+  static DatanodeInfo killDatanode(MiniDFSCluster cluster,
+      DFSStripedOutputStream out, final int dnIndex, final AtomicInteger pos) {
+    final StripedDataStreamer s = out.getStripedDataStreamer(dnIndex);
+    final DatanodeInfo datanode = getDatanodes(s);
+    LOG.info("killDatanode " + dnIndex + ": " + datanode + ", pos=" + pos);
+    if (datanode != null) {
+      cluster.stopDataNode(datanode.getXferAddr());
+    }
+    return datanode;
+  }
+
+  private void waitTokenExpires(FSDataOutputStream out) throws IOException {
+    Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(out);
+    while (!SecurityTestUtil.isBlockTokenExpired(token)) {
+      try {
+        Thread.sleep(10);
+      } catch (InterruptedException ignored) {
+      }
+    }
+  }
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

@@ -132,7 +132,7 @@ public class TestQuota {
   }
 
   @AfterClass
-  public static void tearDownClass() throws Exception {
+  public static void tearDownClass() {
     try {
       System.out.flush();
       System.err.flush();
@@ -170,7 +170,7 @@ public class TestQuota {
    * @throws Exception
    */
   @Test
-  public void testDSQuotaExceededExceptionIsHumanReadable() throws Exception {
+  public void testDSQuotaExceededExceptionIsHumanReadable() {
     Integer bytes = 1024;
     try {
       throw new DSQuotaExceededException(bytes, bytes);

+ 40 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java

@@ -22,7 +22,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.junit.Assert;
 import org.junit.Test;
@@ -40,6 +42,7 @@ public class TestReadStripedFileWithMissingBlocks {
       .getLog(TestReadStripedFileWithMissingBlocks.class);
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
+  private DFSClient dfsClient;
   private Configuration conf = new HdfsConfiguration();
   private final ErasureCodingPolicy ecPolicy =
       StripedFileTestUtil.getDefaultECPolicy();
@@ -49,7 +52,9 @@ public class TestReadStripedFileWithMissingBlocks {
   private final int stripPerBlock = 4;
   private final int blockSize = stripPerBlock * cellSize;
   private final int blockGroupSize = blockSize * dataBlocks;
-  private final int numDNs = dataBlocks + parityBlocks;
+  // Starting with two more datanodes, minimum 9 should be up for
+  // test to pass.
+  private final int numDNs = dataBlocks + parityBlocks + 2;
   private final int fileLength = blockSize * dataBlocks + 123;
 
   @Rule
@@ -63,6 +68,8 @@ public class TestReadStripedFileWithMissingBlocks {
         "/", ecPolicy.getName());
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(ecPolicy.getName());
+    dfsClient = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(),
+        conf);
   }
 
   public void tearDown() throws IOException {
@@ -74,29 +81,34 @@ public class TestReadStripedFileWithMissingBlocks {
 
   @Test
   public void testReadFileWithMissingBlocks() throws Exception {
-    for (int missingData = 1; missingData <= dataBlocks; missingData++) {
-      for (int missingParity = 0; missingParity <=
-          parityBlocks - missingData; missingParity++) {
-        try {
-          setup();
-          readFileWithMissingBlocks(new Path("/foo"), fileLength,
-              missingData, missingParity);
-        } finally {
-          tearDown();
+    try {
+      setup();
+      Path srcPath = new Path("/foo");
+      final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
+      DFSTestUtil.writeFile(fs, srcPath, new String(expected));
+      StripedFileTestUtil
+          .waitBlockGroupsReported(fs, srcPath.toUri().getPath());
+      StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
+
+      for (int missingData = 1; missingData <= dataBlocks; missingData++) {
+        for (int missingParity = 0; missingParity <=
+            parityBlocks - missingData; missingParity++) {
+          readFileWithMissingBlocks(srcPath, fileLength, missingData,
+              missingParity, expected);
         }
       }
+    } finally {
+      tearDown();
     }
   }
 
+
   private void readFileWithMissingBlocks(Path srcPath, int fileLength,
-      int missingDataNum, int missingParityNum)
+      int missingDataNum, int missingParityNum, byte[] expected)
       throws Exception {
     LOG.info("readFileWithMissingBlocks: (" + missingDataNum + ","
         + missingParityNum + ")");
-    final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
-    DFSTestUtil.writeFile(fs, srcPath, new String(expected));
-    StripedFileTestUtil.waitBlockGroupsReported(fs, srcPath.toUri().getPath());
-    StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
+
     int dataBlocks = (fileLength - 1) / cellSize + 1;
     BlockLocation[] locs = fs.getFileBlockLocations(srcPath, 0, cellSize);
 
@@ -112,7 +124,8 @@ public class TestReadStripedFileWithMissingBlocks {
 
     // make sure there are missing block locations
     BlockLocation[] newLocs = fs.getFileBlockLocations(srcPath, 0, cellSize);
-    Assert.assertTrue(newLocs[0].getNames().length < locs[0].getNames().length);
+    Assert.assertTrue(
+        newLocs[0].getNames().length < locs[0].getNames().length);
 
     byte[] smallBuf = new byte[1024];
     byte[] largeBuf = new byte[fileLength + 100];
@@ -120,10 +133,18 @@ public class TestReadStripedFileWithMissingBlocks {
         blockGroupSize);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         smallBuf);
-    StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+    StripedFileTestUtil
+        .verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+    restartDeadDataNodes();
+  }
 
-    // delete the file
-    fs.delete(srcPath, true);
+  private void restartDeadDataNodes() throws IOException {
+    DatanodeInfo[] deadNodes = dfsClient
+        .datanodeReport(DatanodeReportType.DEAD);
+    for (DatanodeInfo dnInfo : deadNodes) {
+      cluster.restartDataNode(dnInfo.getXferAddr());
+    }
+    cluster.triggerHeartbeats();
   }
 
   private void stopDataNodes(BlockLocation[] locs, int[] datanodes)

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java

@@ -305,7 +305,6 @@ public class TestDataNodeHotSwapVolumes {
 
   private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
       throws ReconfigurationException, IOException, InterruptedException {
-    File dataDir = new File(cluster.getDataDirectory());
     DataNode dn = cluster.getDataNodes().get(0);  // First DataNode.
     Configuration conf = dn.getConf();
     String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
@@ -315,14 +314,14 @@ public class TestDataNodeHotSwapVolumes {
     int startIdx = oldDataDir.split(",").length + 1;
     // Find the first available (non-taken) directory name for data volume.
     while (true) {
-      File volumeDir = new File(dataDir, "data" + startIdx);
+      File volumeDir = cluster.getInstanceStorageDir(0, startIdx);
       if (!volumeDir.exists()) {
         break;
       }
       startIdx++;
     }
     for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
-      File volumeDir = new File(dataDir, "data" + String.valueOf(i));
+      File volumeDir = cluster.getInstanceStorageDir(0, i);
       newVolumeDirs.add(volumeDir);
       volumeDir.mkdirs();
       newDataDirBuf.append(",");
@@ -1037,7 +1036,7 @@ public class TestDataNodeHotSwapVolumes {
         InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
 
     // Remove a data dir from datanode
-    File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
+    File dataDirToKeep = cluster.getInstanceStorageDir(0, 0);
     assertThat(
         "DN did not update its own config",
         dn.reconfigurePropertyImpl(

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -168,7 +168,7 @@ public class TestDataNodeVolumeFailure {
    
     // fail the volume
     // delete/make non-writable one of the directories (failed volume)
-    data_fail = new File(dataDir, "data3");
+    data_fail = cluster.getInstanceStorageDir(1, 0);
     failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
         cluster.getNamesystem().getBlockPoolId());
     if (failedDir.exists() &&
@@ -235,7 +235,7 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short) 2);
 
-    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
+    File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -298,8 +298,8 @@ public class TestDataNodeVolumeFailure {
     assumeNotWindows();
 
     // make both data directories to fail on dn0
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -322,8 +322,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
         DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
@@ -366,8 +366,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final File dn0VolNew = new File(dataDir, "data_new");
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
@@ -413,8 +413,8 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.waitReplication(fs, file1, (short)3);
 
     // Fail the first volume on both datanodes
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     Path file2 = new Path("/test2");

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java

@@ -102,8 +102,7 @@ public class TestDataNodeVolumeMetrics {
 
       ArrayList<DataNode> dns = cluster.getDataNodes();
       assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-
-      final File dn1Vol2 = cluster.getInstanceStorageDir(0, 0);
+      final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
 
       DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
       verifyDataNodeVolumeMetrics(fs, cluster, fileName);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java

@@ -615,15 +615,15 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(3),
         is(allOf(containsString("DISK"),
-            containsString(cluster.getInstanceStorageDir(0, 0).getName()),
+            containsString(cluster.getInstanceStorageDir(0, 0)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("1.00"))));
-
-
     assertThat(
         outputs.get(4),
         is(allOf(containsString("DISK"),
-            containsString(cluster.getInstanceStorageDir(0, 1).getName()),
+            containsString(cluster.getInstanceStorageDir(0, 1)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("1.00"))));
   }

+ 147 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java

@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.RouterStore;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
+import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test cases for router heartbeat service.
+ */
+public class TestRouterHeartbeatService {
+  private Router router;
+  private final String routerId = "router1";
+  private TestingServer testingServer;
+  private CuratorFramework curatorFramework;
+
+  @Before
+  public void setup() throws Exception {
+    router = new Router();
+    router.setRouterId(routerId);
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, 1);
+    Configuration routerConfig =
+        new RouterConfigBuilder(conf).stateStore().build();
+    routerConfig.setLong(DFSConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
+        TimeUnit.HOURS.toMillis(1));
+    routerConfig.setClass(DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
+        StateStoreZooKeeperImpl.class, StateStoreDriver.class);
+
+    testingServer = new TestingServer();
+    String connectStr = testingServer.getConnectString();
+    curatorFramework = CuratorFrameworkFactory.builder()
+        .connectString(connectStr)
+        .retryPolicy(new RetryNTimes(100, 100))
+        .build();
+    curatorFramework.start();
+    routerConfig.set(CommonConfigurationKeys.ZK_ADDRESS, connectStr);
+    router.init(routerConfig);
+    router.start();
+
+
+    waitStateStore(router.getStateStore(), TimeUnit.SECONDS.toMicros(10));
+  }
+
+  @Test
+  public void testStateStoreUnavailable() throws IOException {
+    curatorFramework.close();
+    testingServer.stop();
+    router.getStateStore().stop();
+    // The driver is not ready
+    assertFalse(router.getStateStore().isDriverReady());
+
+    // Do a heartbeat, and no exception thrown out
+    RouterHeartbeatService heartbeatService =
+        new RouterHeartbeatService(router);
+    heartbeatService.updateStateStore();
+  }
+
+  @Test
+  public void testStateStoreAvailable() throws Exception {
+    // The driver is ready
+    StateStoreService stateStore = router.getStateStore();
+    assertTrue(router.getStateStore().isDriverReady());
+    RouterStore routerStore = router.getRouterStateManager();
+
+    // No record about this router
+    stateStore.refreshCaches(true);
+    GetRouterRegistrationRequest request =
+        GetRouterRegistrationRequest.newInstance(routerId);
+    GetRouterRegistrationResponse response =
+        router.getRouterStateManager().getRouterRegistration(request);
+    RouterState routerState = response.getRouter();
+    String id = routerState.getRouterId();
+    StateStoreVersion version = routerState.getStateStoreVersion();
+    assertNull(id);
+    assertNull(version);
+
+    // Do a heartbeat
+    RouterHeartbeatService heartbeatService =
+        new RouterHeartbeatService(router);
+    heartbeatService.updateStateStore();
+
+    // We should have a record
+    stateStore.refreshCaches(true);
+    request = GetRouterRegistrationRequest.newInstance(routerId);
+    response = routerStore.getRouterRegistration(request);
+    routerState = response.getRouter();
+    id = routerState.getRouterId();
+    version = routerState.getStateStoreVersion();
+    assertNotNull(id);
+    assertNotNull(version);
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (curatorFramework != null) {
+      curatorFramework.close();
+    }
+    if (testingServer != null) {
+      testingServer.stop();
+    }
+    if (router != null) {
+      router.shutDown();
+    }
+  }
+}

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.Query;
 import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
 import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
 import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -74,6 +75,14 @@ public class TestStateStoreDriverBase {
     return stateStore.getDriver();
   }
 
+  @After
+  public void cleanMetrics() {
+    if (stateStore != null) {
+      StateStoreMetrics metrics = stateStore.getMetrics();
+      metrics.reset();
+    }
+  }
+
   @AfterClass
   public static void tearDownCluster() {
     if (stateStore != null) {

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java

@@ -61,4 +61,16 @@ public class TestStateStoreFile extends TestStateStoreDriverBase {
       throws IllegalArgumentException, IllegalAccessException, IOException {
     testRemove(getStateStoreDriver());
   }
+
+  @Test
+  public void testFetchErrors()
+      throws IllegalArgumentException, IllegalAccessException, IOException {
+    testFetchErrors(getStateStoreDriver());
+  }
+
+  @Test
+  public void testMetrics()
+      throws IllegalArgumentException, IllegalAccessException, IOException {
+    testMetrics(getStateStoreDriver());
+  }
 }

+ 47 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.driver;
+
+import static org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileBaseImpl.isOldTempRecord;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
+
+/**
+ * Tests for the State Store file based implementation.
+ */
+public class TestStateStoreFileBase {
+
+  @Test
+  public void testTempOld() {
+    assertFalse(isOldTempRecord("test.txt"));
+    assertFalse(isOldTempRecord("testfolder/test.txt"));
+
+    long tnow = Time.now();
+    String tmpFile1 = "test." + tnow + ".tmp";
+    assertFalse(isOldTempRecord(tmpFile1));
+
+    long told = Time.now() - TimeUnit.MINUTES.toMillis(1);
+    String tmpFile2 = "test." + told + ".tmp";
+    assertTrue(isOldTempRecord(tmpFile2));
+  }
+}

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java

@@ -69,15 +69,15 @@ public class TestStateStoreFileSystem extends TestStateStoreDriverBase {
   }
 
   @Test
-  public void testUpdate()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(getStateStoreDriver());
+  public void testUpdate() throws IllegalArgumentException, IOException,
+      SecurityException, ReflectiveOperationException {
+    testPut(getStateStoreDriver());
   }
 
   @Test
   public void testDelete()
       throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(getStateStoreDriver());
+    testRemove(getStateStoreDriver());
   }
 
   @Test
@@ -85,4 +85,10 @@ public class TestStateStoreFileSystem extends TestStateStoreDriverBase {
       throws IllegalArgumentException, IllegalAccessException, IOException {
     testFetchErrors(getStateStoreDriver());
   }
+
+  @Test
+  public void testMetrics()
+      throws IllegalArgumentException, IllegalAccessException, IOException {
+    testMetrics(getStateStoreDriver());
+  }
 }

+ 88 - 62
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java

@@ -187,77 +187,103 @@ public class TestTaskAttempt{
 
   @Test
   public void testMRAppHistoryForMap() throws Exception {
-    MRApp app = new FailingAttemptsMRApp(1, 0);
-    testMRAppHistory(app);
+    MRApp app = null;
+    try {
+      app = new FailingAttemptsMRApp(1, 0);
+      testMRAppHistory(app);
+    } finally {
+      app.close();
+    }
   }
 
   @Test
   public void testMRAppHistoryForReduce() throws Exception {
-    MRApp app = new FailingAttemptsMRApp(0, 1);
-    testMRAppHistory(app);
+    MRApp app = null;
+    try {
+      app = new FailingAttemptsMRApp(0, 1);
+      testMRAppHistory(app);
+    } finally {
+      app.close();
+    }
   }
 
   @Test
   public void testMRAppHistoryForTAFailedInAssigned() throws Exception {
     // test TA_CONTAINER_LAUNCH_FAILED for map
-    FailingAttemptsDuringAssignedMRApp app =
-        new FailingAttemptsDuringAssignedMRApp(1, 0,
-            TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_CONTAINER_LAUNCH_FAILED for reduce
-    app =
-        new FailingAttemptsDuringAssignedMRApp(0, 1,
-            TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_CONTAINER_COMPLETED for map
-    app =
-        new FailingAttemptsDuringAssignedMRApp(1, 0,
-            TaskAttemptEventType.TA_CONTAINER_COMPLETED);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_CONTAINER_COMPLETED for reduce
-    app =
-        new FailingAttemptsDuringAssignedMRApp(0, 1,
-            TaskAttemptEventType.TA_CONTAINER_COMPLETED);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_FAILMSG for map
-    app =
-        new FailingAttemptsDuringAssignedMRApp(1, 0,
-            TaskAttemptEventType.TA_FAILMSG);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_FAILMSG for reduce
-    app =
-        new FailingAttemptsDuringAssignedMRApp(0, 1,
-            TaskAttemptEventType.TA_FAILMSG);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_FAILMSG_BY_CLIENT for map
-    app =
-        new FailingAttemptsDuringAssignedMRApp(1, 0,
-            TaskAttemptEventType.TA_FAILMSG_BY_CLIENT);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_FAILMSG_BY_CLIENT for reduce
-    app =
-        new FailingAttemptsDuringAssignedMRApp(0, 1,
-            TaskAttemptEventType.TA_FAILMSG_BY_CLIENT);
-    testTaskAttemptAssignedFailHistory(app);
-
-    // test TA_KILL for map
-    app =
-        new FailingAttemptsDuringAssignedMRApp(1, 0,
-            TaskAttemptEventType.TA_KILL);
-    testTaskAttemptAssignedKilledHistory(app);
-
-    // test TA_KILL for reduce
-    app =
-        new FailingAttemptsDuringAssignedMRApp(0, 1,
-            TaskAttemptEventType.TA_KILL);
-    testTaskAttemptAssignedKilledHistory(app);
+    FailingAttemptsDuringAssignedMRApp app = null;
+
+    try {
+      app =
+          new FailingAttemptsDuringAssignedMRApp(1, 0,
+              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_CONTAINER_LAUNCH_FAILED for reduce
+      app =
+          new FailingAttemptsDuringAssignedMRApp(0, 1,
+              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_CONTAINER_COMPLETED for map
+      app =
+          new FailingAttemptsDuringAssignedMRApp(1, 0,
+              TaskAttemptEventType.TA_CONTAINER_COMPLETED);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_CONTAINER_COMPLETED for reduce
+      app =
+          new FailingAttemptsDuringAssignedMRApp(0, 1,
+              TaskAttemptEventType.TA_CONTAINER_COMPLETED);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_FAILMSG for map
+      app =
+          new FailingAttemptsDuringAssignedMRApp(1, 0,
+              TaskAttemptEventType.TA_FAILMSG);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_FAILMSG for reduce
+      app =
+          new FailingAttemptsDuringAssignedMRApp(0, 1,
+              TaskAttemptEventType.TA_FAILMSG);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_FAILMSG_BY_CLIENT for map
+      app =
+          new FailingAttemptsDuringAssignedMRApp(1, 0,
+              TaskAttemptEventType.TA_FAILMSG_BY_CLIENT);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_FAILMSG_BY_CLIENT for reduce
+      app =
+          new FailingAttemptsDuringAssignedMRApp(0, 1,
+              TaskAttemptEventType.TA_FAILMSG_BY_CLIENT);
+      testTaskAttemptAssignedFailHistory(app);
+      app.close();
+
+      // test TA_KILL for map
+      app =
+          new FailingAttemptsDuringAssignedMRApp(1, 0,
+              TaskAttemptEventType.TA_KILL);
+      testTaskAttemptAssignedKilledHistory(app);
+      app.close();
+
+      // test TA_KILL for reduce
+      app =
+          new FailingAttemptsDuringAssignedMRApp(0, 1,
+              TaskAttemptEventType.TA_KILL);
+      testTaskAttemptAssignedKilledHistory(app);
+      app.close();
+    } finally {
+      app.close();
+    }
   }
 
   @Test

+ 3 - 1
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.azure;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
 
 import java.io.File;
+import java.nio.charset.StandardCharsets;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -65,7 +66,8 @@ public class TestShellDecryptionKeyProvider
     // Create a simple script which echoes the given key plus the given
     // expected result (so that we validate both script input and output)
     File scriptFile = new File(TEST_ROOT_DIR, "testScript.cmd");
-    FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult);
+    FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult,
+            StandardCharsets.UTF_8);
 
     ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider();
     Configuration conf = new Configuration();

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java

@@ -32,6 +32,7 @@ import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.StringWriter;
+import java.nio.charset.StandardCharsets;
 import java.util.Properties;
 
 /**
@@ -61,7 +62,7 @@ public abstract class PublishedConfigurationOutputter {
   }
 */
   public void save(File dest) throws IOException {
-    FileUtils.writeStringToFile(dest, asString(), Charsets.UTF_8);
+    FileUtils.writeStringToFile(dest, asString(), StandardCharsets.UTF_8);
   }
 
   /**

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java

@@ -39,6 +39,7 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
 import java.util.Iterator;
 
 /**
@@ -178,6 +179,6 @@ public final class DockerClientConfigHandler {
     rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
     String json =
         mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
-    FileUtils.writeStringToFile(outConfigFile, json, Charset.defaultCharset());
+    FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
   }
 }

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java

@@ -30,6 +30,7 @@ import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.List;
 import java.util.Random;
 import java.util.Vector;
@@ -154,7 +155,8 @@ public class TestProcfsBasedProcessTree {
         + " $(($1-1))\n" + "else\n" + " echo $$ > " + lowestDescendant + "\n"
         + "(sleep 300&\n"
         + "echo $! > " + lostDescendant + ")\n"
-        + " while true\n do\n" + "  sleep 5\n" + " done\n" + "fi");
+        + " while true\n do\n" + "  sleep 5\n" + " done\n" + "fi",
+            StandardCharsets.UTF_8);
 
     Thread t = new RogueTaskThread();
     t.start();

+ 56 - 64
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java

@@ -109,8 +109,7 @@ public class AppBlock extends HtmlBlock {
       final GetApplicationReportRequest request =
           GetApplicationReportRequest.newInstance(appID);
       if (callerUGI == null) {
-        appReport =
-            appBaseProt.getApplicationReport(request).getApplicationReport();
+        appReport = getApplicationReport(request);
       } else {
         appReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationReport> () {
@@ -143,14 +142,19 @@ public class AppBlock extends HtmlBlock {
     try {
       final GetApplicationAttemptsRequest request =
           GetApplicationAttemptsRequest.newInstance(appID);
-      attempts = callerUGI.doAs(
+      if (callerUGI == null) {
+        attempts = getApplicationAttemptsReport(request);
+      } else {
+        attempts = callerUGI.doAs(
           new PrivilegedExceptionAction<Collection<
               ApplicationAttemptReport>>() {
             @Override
-            public Collection<ApplicationAttemptReport> run() throws Exception {
+            public Collection<ApplicationAttemptReport> run()
+                throws Exception {
               return getApplicationAttemptsReport(request);
             }
           });
+      }
     } catch (Exception e) {
       String message =
           "Failed to read the attempts of the application " + appID + ".";
@@ -204,36 +208,55 @@ public class AppBlock extends HtmlBlock {
     String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
         "/cluster/scheduler?openQueues=" + app.getQueue();
 
+    generateOverviewTable(app, schedulerPath, webUiType, appReport);
+
+    createApplicationMetricsTable(html);
+
+    html.__(InfoBlock.class);
+
+    generateApplicationTable(html, callerUGI, attempts);
+
+  }
+
+  /**
+   * Generate overview table for app web page.
+   * @param app app info.
+   * @param schedulerPath schedule path.
+   * @param webUiType web ui type.
+   * @param appReport app report.
+   */
+  private void generateOverviewTable(AppInfo app, String schedulerPath,
+      String webUiType, ApplicationReport appReport) {
     ResponseInfo overviewTable = info("Application Overview")
-      .__("User:", schedulerPath, app.getUser())
-      .__("Name:", app.getName())
-      .__("Application Type:", app.getType())
-      .__("Application Tags:",
-        app.getApplicationTags() == null ? "" : app.getApplicationTags())
-      .__("Application Priority:", clarifyAppPriority(app.getPriority()))
-      .__(
-        "YarnApplicationState:",
-        app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
-          .getAppState()))
-      .__("Queue:", schedulerPath, app.getQueue())
-      .__("FinalStatus Reported by AM:",
-        clairfyAppFinalStatus(app.getFinalAppStatus()))
-      .__("Started:", Times.format(app.getStartedTime()))
-      .__(
-        "Elapsed:",
-        StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
-          app.getFinishedTime())))
-      .__(
-        "Tracking URL:",
-        app.getTrackingUrl() == null
-            || app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app
-          .getTrackingUrl()),
-        app.getTrackingUrl() == null
-            || app.getTrackingUrl().equals(UNAVAILABLE) ? "Unassigned" : app
-          .getAppState() == YarnApplicationState.FINISHED
-            || app.getAppState() == YarnApplicationState.FAILED
-            || app.getAppState() == YarnApplicationState.KILLED ? "History"
-            : "ApplicationMaster");
+        .__("User:", schedulerPath, app.getUser())
+        .__("Name:", app.getName())
+        .__("Application Type:", app.getType())
+        .__("Application Tags:",
+            app.getApplicationTags() == null ? "" : app.getApplicationTags())
+        .__("Application Priority:", clarifyAppPriority(app.getPriority()))
+        .__(
+            "YarnApplicationState:",
+            app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
+                .getAppState()))
+        .__("Queue:", schedulerPath, app.getQueue())
+        .__("FinalStatus Reported by AM:",
+            clairfyAppFinalStatus(app.getFinalAppStatus()))
+        .__("Started:", Times.format(app.getStartedTime()))
+        .__(
+            "Elapsed:",
+            StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
+                app.getFinishedTime())))
+        .__(
+            "Tracking URL:",
+            app.getTrackingUrl() == null
+                || app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app
+                .getTrackingUrl()),
+            app.getTrackingUrl() == null
+                || app.getTrackingUrl().equals(UNAVAILABLE) ? "Unassigned" : app
+                .getAppState() == YarnApplicationState.FINISHED
+                || app.getAppState() == YarnApplicationState.FAILED
+                || app.getAppState() == YarnApplicationState.KILLED ? "History"
+                : "ApplicationMaster");
     if (webUiType != null
         && webUiType.equals(YarnWebParams.RM_WEB_UI)) {
       LogAggregationStatus status = getLogAggregationStatus();
@@ -265,37 +288,6 @@ public class AppBlock extends HtmlBlock {
     overviewTable.__("AM container Node Label expression:",
         app.getAmNodeLabelExpression() == null ? "<Not set>"
             : app.getAmNodeLabelExpression());
-
-    try {
-      final GetApplicationAttemptsRequest request =
-          GetApplicationAttemptsRequest.newInstance(appID);
-      if (callerUGI == null) {
-        attempts = appBaseProt.getApplicationAttempts(request)
-            .getApplicationAttemptList();
-      } else {
-        attempts = callerUGI.doAs(
-            new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
-          @Override
-          public Collection<ApplicationAttemptReport> run() throws Exception {
-            return appBaseProt.getApplicationAttempts(request)
-                .getApplicationAttemptList();
-          }
-        });
-      }
-    } catch (Exception e) {
-      String message =
-          "Failed to read the attempts of the application " + appID + ".";
-      LOG.error(message, e);
-      html.p().__(message).__();
-      return;
-    }
-
-    createApplicationMetricsTable(html);
-
-    html.__(InfoBlock.class);
-
-    generateApplicationTable(html, callerUGI, attempts);
-
   }
 
   protected void generateApplicationTable(Block html,

+ 42 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c

@@ -534,6 +534,18 @@ char *get_user_directory(const char *nm_root, const char *user) {
   return concatenate(USER_DIR_PATTERN, "user_dir_path", 2, nm_root, user);
 }
 
+/**
+ * Get the user private filecache directory of a particular user
+ */
+char *get_user_filecache_directory(const char *nm_root, const char *user) {
+  int result = check_nm_local_dir(nm_uid, nm_root);
+  if (result != 0) {
+    return NULL;
+  }
+  return concatenate(USER_FILECACHE_DIR_PATTERN, "user_filecache_dir_path", 2,
+      nm_root, user);
+}
+
 /**
  * Check node manager local dir permission.
  */
@@ -1422,6 +1434,29 @@ int create_local_dirs(const char * user, const char *app_id,
   return exit_code;
 }
 
+// create the user file directory on all disks
+int create_user_filecache_dirs(const char * user, char* const* local_dirs) {
+  int rc = 0;
+  const mode_t permissions = S_IRWXU | S_IXGRP;
+  for(char* const* ldir_p = local_dirs; *ldir_p != 0; ++ldir_p) {
+    char* filecache_dir = get_user_filecache_directory(*ldir_p, user);
+    if (filecache_dir == NULL) {
+      fprintf(LOGFILE, "Couldn't get user filecache directory for %s.\n", user);
+      rc = INITIALIZE_USER_FAILED;
+      break;
+    }
+    if (0 != mkdir(filecache_dir, permissions) && EEXIST != errno) {
+      fprintf(LOGFILE, "Failed to create directory %s - %s\n", filecache_dir,
+              strerror(errno));
+      free(filecache_dir);
+      rc = INITIALIZE_USER_FAILED;
+      break;
+    }
+    free(filecache_dir);
+  }
+  return rc;
+}
+
 int launch_docker_container_as_user(const char * user, const char *app_id,
                               const char *container_id, const char *work_dir,
                               const char *script_name, const char *cred_file,
@@ -1476,6 +1511,13 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     goto cleanup;
   }
 
+  exit_code = create_user_filecache_dirs(user, local_dirs);
+  if (exit_code != 0) {
+    fprintf(ERRORFILE, "Could not create user filecache directory");
+    fflush(ERRORFILE);
+    goto cleanup;
+  }
+
   docker_command = construct_docker_command(command_file);
   docker_binary = get_docker_binary(&CFG);
 

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h

@@ -52,6 +52,7 @@ enum operations {
 
 #define NM_GROUP_KEY "yarn.nodemanager.linux-container-executor.group"
 #define USER_DIR_PATTERN "%s/usercache/%s"
+#define USER_FILECACHE_DIR_PATTERN "%s/usercache/%s/filecache"
 #define NM_APP_DIR_PATTERN USER_DIR_PATTERN "/appcache/%s"
 #define CONTAINER_DIR_PATTERN NM_APP_DIR_PATTERN "/%s"
 #define CONTAINER_SCRIPT "launch_container.sh"

+ 11 - 10
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsResourceCalculator.java

@@ -26,6 +26,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.File;
+import java.nio.charset.StandardCharsets;
 
 import static org.mockito.Mockito.*;
 
@@ -62,7 +63,7 @@ public class TestCGroupsResourceCalculator {
           new File(procfs, CGroupsResourceCalculator.CGROUP),
           "7:devices:/yarn/container_1\n" +
               "6:cpuacct,cpu:/yarn/container_1\n" +
-              "5:pids:/yarn/container_1\n");
+              "5:pids:/yarn/container_1\n", StandardCharsets.UTF_8);
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
               "1234", basePath,
@@ -84,7 +85,7 @@ public class TestCGroupsResourceCalculator {
           "7:devices:/yarn/container_1\n" +
               "6:cpuacct,cpu:/yarn/container_1\n" +
               "5:pids:/yarn/container_1\n" +
-              "4:memory:/yarn/container_1\n");
+              "4:memory:/yarn/container_1\n", StandardCharsets.UTF_8);
 
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
@@ -118,12 +119,12 @@ public class TestCGroupsResourceCalculator {
           "7:devices:/yarn/container_1\n" +
               "6:cpuacct,cpu:/yarn/container_1\n" +
               "5:pids:/yarn/container_1\n" +
-              "4:memory:/yarn/container_1\n");
+              "4:memory:/yarn/container_1\n", StandardCharsets.UTF_8);
       FileUtils.writeStringToFile(
           new File(cgcpuacctContainerDir, CGroupsResourceCalculator.CPU_STAT),
           "Can you handle this?\n" +
               "user 5415\n" +
-              "system 3632");
+              "system 3632", StandardCharsets.UTF_8);
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
               "1234", basePath,
@@ -159,10 +160,10 @@ public class TestCGroupsResourceCalculator {
       FileUtils.writeStringToFile(
           new File(procfs, CGroupsResourceCalculator.CGROUP),
               "6:cpuacct,cpu:/yarn/container_1\n" +
-              "4:memory:/yarn/container_1\n");
+              "4:memory:/yarn/container_1\n", StandardCharsets.UTF_8);
       FileUtils.writeStringToFile(
           new File(cgMemoryContainerDir, CGroupsResourceCalculator.MEM_STAT),
-          "418496512\n");
+          "418496512\n", StandardCharsets.UTF_8);
 
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
@@ -182,7 +183,7 @@ public class TestCGroupsResourceCalculator {
       // Test the case where memsw is available
       FileUtils.writeStringToFile(
           new File(cgMemoryContainerDir, CGroupsResourceCalculator.MEMSW_STAT),
-          "418496513\n");
+          "418496513\n", StandardCharsets.UTF_8);
       calculator.updateProcessTree();
       Assert.assertEquals("Incorrect swap usage",
           418496513,
@@ -206,7 +207,7 @@ public class TestCGroupsResourceCalculator {
       FileUtils.writeStringToFile(
           new File(cgcpuacctRootDir, CGroupsResourceCalculator.CPU_STAT),
               "user 5415\n" +
-              "system 3632");
+              "system 3632", StandardCharsets.UTF_8);
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
               null, basePath,
@@ -241,7 +242,7 @@ public class TestCGroupsResourceCalculator {
     try {
       FileUtils.writeStringToFile(
           new File(cgMemoryRootDir, CGroupsResourceCalculator.MEM_STAT),
-          "418496512\n");
+              "418496512\n", StandardCharsets.UTF_8);
 
       CGroupsResourceCalculator calculator =
           new CGroupsResourceCalculator(
@@ -262,7 +263,7 @@ public class TestCGroupsResourceCalculator {
       // Test the case where memsw is available
       FileUtils.writeStringToFile(
           new File(cgMemoryRootDir, CGroupsResourceCalculator.MEMSW_STAT),
-          "418496513\n");
+          "418496513\n", StandardCharsets.UTF_8);
       calculator.updateProcessTree();
       Assert.assertEquals("Incorrect swap usage",
           418496513,