瀏覽代碼

Merge trunk into the HADOOP-10388 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HADOOP-10388@1588509 13f79535-47bb-0310-9956-ffa450edef68
Colin McCabe 11 年之前
父節點
當前提交
ded9acb72d
共有 100 個文件被更改,包括 3935 次插入8099 次删除
  1. 15 0
      BUILDING.txt
  2. 12 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  3. 2 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
  4. 95 16
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  5. 114 2
      hadoop-common-project/hadoop-common/CHANGES.txt
  6. 0 18
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  7. 2 1
      hadoop-common-project/hadoop-common/pom.xml
  8. 1 1
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  9. 2363 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  10. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  11. 223 132
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  12. 126 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  13. 23 34
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  14. 9 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java
  15. 14 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
  16. 38 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
  17. 20 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
  18. 22 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
  19. 6 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  20. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSink.java
  21. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
  22. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
  23. 8 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
  24. 0 223
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java
  25. 0 271
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java
  26. 0 270
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java
  27. 0 72
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java
  28. 0 105
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java
  29. 0 53
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeGenerator.java
  30. 0 48
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java
  31. 0 75
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java
  32. 0 106
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java
  33. 0 120
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java
  34. 0 93
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java
  35. 0 80
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java
  36. 0 102
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java
  37. 0 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JField.java
  38. 0 78
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFile.java
  39. 0 99
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java
  40. 0 93
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java
  41. 0 98
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java
  42. 0 246
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java
  43. 0 822
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java
  44. 0 96
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java
  45. 0 230
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JType.java
  46. 0 214
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java
  47. 0 51
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java
  48. 0 145
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java
  49. 0 219
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java
  50. 0 542
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java
  51. 0 97
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccConstants.java
  52. 0 833
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java
  53. 0 446
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/SimpleCharStream.java
  54. 0 107
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java
  55. 0 161
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java
  56. 0 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/package.html
  57. 0 384
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/rcc.jj
  58. 0 37
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/package.html
  59. 0 107
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java
  60. 0 90
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java
  61. 0 161
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java
  62. 0 166
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java
  63. 0 117
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java
  64. 0 104
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/Utils.java
  65. 0 74
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java
  66. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  67. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  68. 0 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java
  69. 0 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
  70. 18 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  71. 25 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
  72. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
  73. 12 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
  74. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
  75. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  76. 6 0
      hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
  77. 11 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
  78. 5 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c
  79. 5 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  80. 111 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
  81. 94 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
  82. 33 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  83. 4 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  84. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
  85. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  86. 7 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
  87. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
  88. 103 71
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
  89. 184 147
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
  90. 11 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
  91. 7 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
  92. 17 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
  93. 12 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java
  94. 6 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
  95. 48 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
  96. 11 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
  97. 79 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
  98. 1 0
      hadoop-common-project/hadoop-common/src/test/resources/javakeystoreprovider.password
  99. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  100. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

+ 15 - 0
BUILDING.txt

@@ -9,6 +9,8 @@ Requirements:
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer (if compiling native code)
+* Zlib devel (if compiling native code)
+* openssl devel ( if compiling native hadoop-pipes )
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 ----------------------------------------------------------------------------------
@@ -189,6 +191,7 @@ Requirements:
 * ProtocolBuffer 2.5.0
 * Windows SDK or Visual Studio 2010 Professional
 * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
+* zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
@@ -228,6 +231,18 @@ native code is built by enabling the 'native-win' Maven profile. -Pnative-win
 is enabled by default when building on Windows since the native components 
 are required (not optional) on Windows.
 
+If native code bindings for zlib are required, then the zlib headers must be
+deployed on the build machine.  Set the ZLIB_HOME environment variable to the
+directory containing the headers.
+
+set ZLIB_HOME=C:\zlib-1.2.7
+
+At runtime, zlib1.dll must be accessible on the PATH.  Hadoop has been tested
+with zlib 1.2.7, built using Visual Studio 2010 out of contrib\vstudio\vc10 in
+the zlib 1.2.7 source tree.
+
+http://www.zlib.net/
+
 ----------------------------------------------------------------------------------
 Building distributions:
 

+ 12 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -332,7 +332,8 @@ public class AuthenticationFilter implements Filter {
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
       throws IOException, ServletException {
     boolean unauthorizedResponse = true;
-    String unauthorizedMsg = "";
+    int errCode = HttpServletResponse.SC_UNAUTHORIZED;
+    AuthenticationException authenticationEx = null;
     HttpServletRequest httpRequest = (HttpServletRequest) request;
     HttpServletResponse httpResponse = (HttpServletResponse) response;
     boolean isHttps = "https".equals(httpRequest.getScheme());
@@ -344,6 +345,8 @@ public class AuthenticationFilter implements Filter {
       }
       catch (AuthenticationException ex) {
         LOG.warn("AuthenticationToken ignored: " + ex.getMessage());
+        // will be sent back in a 401 unless filter authenticates
+        authenticationEx = ex;
         token = null;
       }
       if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
@@ -392,15 +395,20 @@ public class AuthenticationFilter implements Filter {
         unauthorizedResponse = false;
       }
     } catch (AuthenticationException ex) {
-      unauthorizedMsg = ex.toString();
+      // exception from the filter itself is fatal
+      errCode = HttpServletResponse.SC_FORBIDDEN;
+      authenticationEx = ex;
       LOG.warn("Authentication exception: " + ex.getMessage(), ex);
     }
     if (unauthorizedResponse) {
       if (!httpResponse.isCommitted()) {
         createAuthCookie(httpResponse, "", getCookieDomain(),
                 getCookiePath(), 0, isHttps);
-        httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED,
-                unauthorizedMsg);
+        if (authenticationEx == null) {
+          httpResponse.sendError(errCode, "Authentication required");
+        } else {
+          httpResponse.sendError(errCode, authenticationEx.getMessage());
+        }
       }
     }
   }

+ 2 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java

@@ -63,7 +63,8 @@ public class TestPseudoAuthenticator {
       URL url = new URL(auth.getBaseURL());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.connect();
-      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+      Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, conn.getResponseCode());
+      Assert.assertEquals("Anonymous requests are disallowed", conn.getResponseMessage());
     } finally {
       auth.stop();
     }

+ 95 - 16
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -14,8 +14,10 @@
 package org.apache.hadoop.security.authentication.server;
 
 import java.io.IOException;
+import java.net.HttpCookie;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Properties;
 import java.util.Vector;
 
@@ -130,7 +132,11 @@ public class TestAuthenticationFilter {
         token = new AuthenticationToken("u", "p", "t");
         token.setExpires((expired) ? 0 : System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       } else {
-        response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+        if (request.getHeader("WWW-Authenticate") == null) {
+          response.setHeader("WWW-Authenticate", "dummyauth");
+        } else {
+          throw new AuthenticationException("AUTH FAILED");
+        }
       }
       return token;
     }
@@ -303,7 +309,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
       filter.init(config);
 
-      AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
+      AuthenticationToken token =
+          new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
       Signer signer = new Signer("secret".getBytes());
       String tokenSigned = signer.sign(token.toString());
@@ -312,13 +319,14 @@ public class TestAuthenticationFilter {
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
 
+      boolean failed = false;
       try {
         filter.getToken(request);
-        Assert.fail();
       } catch (AuthenticationException ex) {
-        // Expected
-      } catch (Exception ex) {
-        Assert.fail();
+        Assert.assertEquals("AuthenticationToken expired", ex.getMessage());
+        failed = true;
+      } finally {
+        Assert.assertTrue("token not expired", failed);
       }
     } finally {
       filter.destroy();
@@ -351,13 +359,14 @@ public class TestAuthenticationFilter {
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
 
+      boolean failed = false;
       try {
         filter.getToken(request);
-        Assert.fail();
       } catch (AuthenticationException ex) {
-        // Expected
-      } catch (Exception ex) {
-        Assert.fail();
+        Assert.assertEquals("Invalid AuthenticationToken type", ex.getMessage());
+        failed = true;
+      } finally {
+        Assert.assertTrue("token not invalid type", failed);
       }
     } finally {
       filter.destroy();
@@ -398,7 +407,9 @@ public class TestAuthenticationFilter {
 
       filter.doFilter(request, response, chain);
 
-      Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+      Mockito.verify(response).sendError(
+          HttpServletResponse.SC_UNAUTHORIZED, "Authentication required");
+      Mockito.verify(response).setHeader("WWW-Authenticate", "dummyauth");
     } finally {
       filter.destroy();
     }
@@ -468,10 +479,10 @@ public class TestAuthenticationFilter {
 
       if (expired) {
         Mockito.verify(response, Mockito.never()).
-          addCookie(Mockito.any(Cookie.class));
+          addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
       } else {
         String v = cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
-        Assert.assertNotNull(v);
+        Assert.assertNotNull("cookie missing", v);
         Assert.assertTrue(v.contains("u=") && v.contains("p=") && v.contains
                 ("t=") && v.contains("e=") && v.contains("s="));
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
@@ -585,8 +596,68 @@ public class TestAuthenticationFilter {
     }
   }
 
+  @Test
+  public void testDoFilterAuthenticationFailure() throws Exception {
+    AuthenticationFilter filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
+        DummyAuthenticationHandler.class.getName());
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
+      filter.init(config);
+
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[]{});
+      Mockito.when(request.getHeader("WWW-Authenticate")).thenReturn("dummyauth");
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+      FilterChain chain = Mockito.mock(FilterChain.class);
+
+      final HashMap<String, String> cookieMap = new HashMap<String, String>();
+      Mockito.doAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            Object[] args = invocation.getArguments();
+            parseCookieMap((String) args[1], cookieMap);
+            return null;
+          }
+        }
+      ).when(response).addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
+
+      Mockito.doAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            Assert.fail("shouldn't get here");
+            return null;
+          }
+        }
+      ).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
+
+      filter.doFilter(request, response, chain);
+
+      Mockito.verify(response).sendError(
+          HttpServletResponse.SC_FORBIDDEN, "AUTH FAILED");
+      Mockito.verify(response, Mockito.never()).setHeader(Mockito.eq("WWW-Authenticate"), Mockito.anyString());
+
+      String value = cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
+      Assert.assertNotNull("cookie missing", value);
+      Assert.assertEquals("", value);
+    } finally {
+      filter.destroy();
+    }
+  }
+
   @Test
   public void testDoFilterAuthenticatedExpired() throws Exception {
+    String secret = "secret";
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
@@ -594,9 +665,12 @@ public class TestAuthenticationFilter {
         thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
+      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn(
+        secret);
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
       filter.init(config);
 
@@ -605,7 +679,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(secret.getBytes());
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -643,12 +717,14 @@ public class TestAuthenticationFilter {
     Mockito.verify(chain, Mockito.never()).doFilter(Mockito.any
             (ServletRequest.class), Mockito.any(ServletResponse.class));
 
-    Assert.assertTrue(cookieMap.containsKey(AuthenticatedURL.AUTH_COOKIE));
+    Assert.assertTrue("cookie is missing",
+        cookieMap.containsKey(AuthenticatedURL.AUTH_COOKIE));
     Assert.assertEquals("", cookieMap.get(AuthenticatedURL.AUTH_COOKIE));
   }
 
   @Test
   public void testDoFilterAuthenticatedInvalidType() throws Exception {
+    String secret = "secret";
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
@@ -656,9 +732,12 @@ public class TestAuthenticationFilter {
         thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
+      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn(
+        secret);
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
       filter.init(config);
 
@@ -667,7 +746,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(secret.getBytes());
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);

+ 114 - 2
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -121,7 +121,25 @@ Trunk (Unreleased)
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     build a new UGI. (Larry McCay via omalley)
 
-    HADOOP-9968. Makes ProxyUsers to work with NetGroups (Benoy Antony via ddas)
+    HADOOP-9968. Makes ProxyUsers to work with NetGroups (Benoy Antony via 
+    ddas)
+
+    HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions 
+    correctly. (Larry McCay via omalley)
+
+    HADOOP-10432. Refactor SSLFactory to expose static method to determine
+    HostnameVerifier. (tucu)
+
+    HADOOP-10427. KeyProvider implementations should be thread safe. (tucu)
+
+    HADOOP-10429. KeyStores should have methods to generate the materials 
+    themselves, KeyShell should use them. (tucu)
+
+    HADOOP-10428. JavaKeyStoreProvider should accept keystore password via 
+    configuration falling back to ENV VAR. (tucu)
+
+    HADOOP-10430. KeyProvider Metadata should have an optional description, 
+    there should be a method to retrieve the metadata from all keys. (tucu)
 
   BUG FIXES
 
@@ -297,6 +315,10 @@ Trunk (Unreleased)
 
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
+    HADOOP-10488. TestKeyProviderFactory fails randomly. (tucu)
+
+    HADOOP-10431. Change visibility of KeyStore.Options getter methods to public. (tucu)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -307,10 +329,27 @@ Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+    HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
+
   NEW FEATURES
 
+    HADOOP-10498. Add support for proxy server. (daryn)
+
   IMPROVEMENTS
 
+    HADOOP-10451. Remove unused field and imports from SaslRpcServer.
+    (Benoy Antony via jing9)
+
+    HADOOP-10345. Sanitize the the inputs (groups and hosts) for the proxyuser
+    configuration. (Benoy Antony via jing9)
+
+    HADOOP-10454. Provide FileContext version of har file system. (Kihwal Lee
+    via jeagles)
+
+    HADOOP-10104. Update jackson to 1.9.13 (Akira Ajisaka via stevel)
+
+    HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES 
@@ -325,7 +364,63 @@ Release 2.5.0 - UNRELEASED
     removes unused FileContext.getFileStatus(..) and fixes various javac
     warnings.  (szetszwo)
 
-Release 2.4.0 - UNRELEASED
+    HADOOP-10414. Incorrect property name for RefreshUserMappingProtocol in
+    hadoop-policy.xml. (Joey Echeverria via atm)
+
+    HADOOP-10459. distcp V2 doesn't preserve root dir's attributes when -p is
+    specified. (Yongjun Zhang via atm)
+
+    HADOOP-10462. DF#getFilesystem is not parsing the command output. 
+    (Akira AJISAKA via umamahesh)
+
+    HADOOP-10468. TestMetricsSystemImpl.testMultiThreadedPublish fails
+    intermediately. (wheat9)
+
+    HADOOP-10475. ConcurrentModificationException in
+    AbstractDelegationTokenSelector.selectToken(). (jing9)
+
+    HADOOP-10350. BUILDING.txt should mention openssl dependency required
+    for hadoop-pipes (Vinayakumar B)
+
+    HADOOP-10495. TestFileUtil fails on Windows due to bad permission
+    assertions. (cnauroth)
+
+    HADOOP-10496. Metrics system FileSink can leak file descriptor. (cnauroth)
+
+    HADOOP-10500. TestDoAsEffectiveUser fails on JDK7 due to failure to reset
+    proxy user configuration. (cnauroth)
+
+    HADOOP-10499. Remove unused parameter from ProxyUsers.authorize().
+    (Benoy Antony via cnauroth)
+
+Release 2.4.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-10466. Lower the log level in UserGroupInformation.  (Nicolas
+    Liochon via szetszwo)
+
+  OPTIMIZATIONS
+
+  BUG FIXES 
+
+    HADOOP-10455. When there is an exception, ipc.Server should first check
+    whether it is an terse exception.  (szetszwo)
+
+    HADOOP-10456. Bug in Configuration.java exposed by Spark
+    (ConcurrentModificationException).  (Nishkam Ravi via cnauroth)
+
+    HADOOP-10473. TestCallQueueManager should interrupt before counting calls.
+    (szetszwo)
+
+    HADOOP-10490. TestMapFile and TestBloomMapFile leak file descriptors.
+    (cnauroth)
+
+Release 2.4.0 - 2014-04-07 
 
   INCOMPATIBLE CHANGES
 
@@ -463,6 +558,17 @@ Release 2.4.0 - UNRELEASED
     HADOOP-10441. Namenode metric "rpc.RetryCache/NameNodeRetryCache.CacheHit"
     can't be correctly processed by Ganglia. (jing9)
 
+    HADOOP-10449. Fix the javac warnings in the security package.  (szetszwo)
+
+    HADOOP-10450. Build zlib native code bindings in hadoop.dll for Windows.
+    (cnauroth)
+
+    HADOOP-10301. AuthenticationFilter should return Forbidden for failed
+    authentication. (Daryn Sharp via jing9)
+
+    HADOOP-9525. Add tests that validate winutils chmod behavior on folders
+    (ivanmi)
+
   BREAKDOWN OF HADOOP-10184 SUBTASKS AND RELATED JIRAS
 
     HADOOP-10185. FileSystem API for ACLs. (cnauroth)
@@ -500,6 +606,9 @@ Release 2.4.0 - UNRELEASED
 
     HADOOP-10399. FileContext API for ACLs. (Vinayakumar B via cnauroth)
 
+    HADOOP-10442. Group look-up can cause segmentation fault when certain
+    JNI-based mapping module is used. (Kihwal Lee via jeagles)
+
 Release 2.3.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2720,6 +2829,9 @@ Release 0.23.11 - UNRELEASED
 
     HADOOP-10332. HttpServer's jetty audit log always logs 200 OK (jeagles)
 
+    HADOOP-8826. Docs still refer to 0.20.205 as stable line (Mit Desai via
+    jeagles)
+
 Release 0.23.10 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 0 - 18
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -15,9 +15,6 @@
    limitations under the License.
 -->
 <FindBugsFilter>
-     <Match>
-       <Package name="org.apache.hadoop.record.compiler.generated" />
-     </Match>
      <Match>
        <Package name="org.apache.hadoop.security.proto" />
      </Match>
@@ -196,21 +193,6 @@
        <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS" />
      </Match>
 
-     <Match>
-       <Class name="org.apache.hadoop.record.meta.Utils" />
-       <Method name="skip" />
-       <Bug pattern="BC_UNCONFIRMED_CAST" />
-     </Match>
-
-     <!--
-        The compareTo method is actually a dummy method that just
-        throws excpetions. So, no need to override equals. Ignore
-     -->
-     <Match>
-       <Class name="org.apache.hadoop.record.meta.RecordTypeInfo" />
-       <Bug pattern="EQ_COMPARETO_USE_OBJECT_EQUALS" />
-     </Match>
-
      <Match>
        <Class name="org.apache.hadoop.util.ProcfsBasedProcessTree" />
        <Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />

+ 2 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -202,7 +202,7 @@
     <dependency>
       <groupId>org.apache.ant</groupId>
       <artifactId>ant</artifactId>
-      <scope>provided</scope>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>com.google.protobuf</groupId>
@@ -483,6 +483,7 @@
             <exclude>src/test/resources/test.har/_index</exclude>
             <exclude>src/test/resources/test.har/_masterindex</exclude>
             <exclude>src/test/resources/test.har/part-0</exclude>
+            <exclude>src/test/resources/javakeystoreprovider.password</exclude>
           </excludes>
         </configuration>
       </plugin>

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -85,7 +85,7 @@
   </property>
 
   <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <name>security.refresh.user.mappings.protocol.acl</name>
     <value>*</value>
     <description>ACL for RefreshUserMappingsProtocol. Used to refresh
     users mappings. The ACL is a comma-separated list of user and

+ 2363 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,4 +1,2367 @@
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.4.0 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.4.0 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.3.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1893">YARN-1893</a>.
+     Blocker sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Make ApplicationMasterProtocol#allocate AtMostOnce</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1891">YARN-1891</a>.
+     Minor task reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>Document NodeManager health-monitoring</b><br>
+     <blockquote>Start documenting node manager starting with the health monitoring.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1873">YARN-1873</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestDistributedShell#testDSShell fails when the test cases are out of order</b><br>
+     <blockquote>testDSShell fails when the tests are run in random order. I see a cleanup issue here.
+
+{noformat}
+Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 72.222 sec &lt;&lt;&lt; FAILURE! - in org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell
+testOrder(org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell)  Time elapsed: 44.127 sec  &lt;&lt;&lt; FAILURE!
+java.lang.AssertionError: expected:&lt;1&gt; but was:&lt;6&gt;
+	at org.junit.Assert.fail(Assert.java:93)
+	at org.junit.Assert.failNotEquals(Assert.java:647)
+	at org.junit.Assert.assertEquals(Assert.java:128)
+	at org.junit.Assert.assertEquals(Assert.java:472)
+	at org.junit.Assert.assertEquals(Assert.java:456)
+	at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:204)
+	at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testOrder(TestDistributedShell.java:134)
+
+
+Results :
+
+Failed tests: 
+  TestDistributedShell.testOrder:134-&gt;testDSShell:204 expected:&lt;1&gt; but was:&lt;6&gt;
+{noformat}
+
+The Line numbers will be little deviated because I was trying to reproduce the error by running the tests in specific order. But the Line that causes the assert fail is {{Assert.assertEquals(1, entitiesAttempts.getEntities().size());}}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1867">YARN-1867</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Vinod Kumar Vavilapalli (resourcemanager)<br>
+     <b>NPE while fetching apps via the REST API</b><br>
+     <blockquote>We ran into the following NPE when fetching applications using the REST API:
+
+{noformat}
+INTERNAL_SERVER_ERROR
+java.lang.NullPointerException
+at org.apache.hadoop.yarn.server.security.ApplicationACLsManager.checkAccess(ApplicationACLsManager.java:104)
+at org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices.hasAccess(RMWebServices.java:123)
+at org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices.getApps(RMWebServices.java:418)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1866">YARN-1866</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>YARN RM fails to load state store with delegation token parsing error</b><br>
+     <blockquote>In our secure Nightlies we saw exceptions in the RM log where it failed to parse the deletegation token.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1863">YARN-1863</a>.
+     Blocker test reported by Ted Yu and fixed by Xuan Gong <br>
+     <b>TestRMFailover fails with 'AssertionError: null' </b><br>
+     <blockquote>This happened in Hadoop-Yarn-trunk - Build # 516 and can be reproduced:
+{code}
+testWebAppProxyInStandAloneMode(org.apache.hadoop.yarn.client.TestRMFailover)  Time elapsed: 5.834 sec  &lt;&lt;&lt; FAILURE!
+java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.client.TestRMFailover.verifyExpectedException(TestRMFailover.java:250)
+	at org.apache.hadoop.yarn.client.TestRMFailover.testWebAppProxyInStandAloneMode(TestRMFailover.java:216)
+
+testEmbeddedWebAppProxy(org.apache.hadoop.yarn.client.TestRMFailover)  Time elapsed: 5.341 sec  &lt;&lt;&lt; FAILURE!
+java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.client.TestRMFailover.verifyExpectedException(TestRMFailover.java:250)
+	at org.apache.hadoop.yarn.client.TestRMFailover.testEmbeddedWebAppProxy(TestRMFailover.java:241)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1859">YARN-1859</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>WebAppProxyServlet will throw ApplicationNotFoundException if the app is no longer cached in RM</b><br>
+     <blockquote>WebAppProxyServlet checks null to determine whether the application is not found or not.
+{code}
+ ApplicationReport applicationReport = getApplicationReport(id);
+      if(applicationReport == null) {
+        LOG.warn(req.getRemoteUser()+" Attempting to access "+id+
+            " that was not found");
+{code}
+However, WebAppProxyServlet calls AppReportFetcher, which consequently calls ClientRMService. When application is not found, ClientRMService throws ApplicationNotFoundException. Therefore, in WebAppProxyServlet, the following logic to create the tracking url for a non-cached app will no longer be in use.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1855">YARN-1855</a>.
+     Critical test reported by Ted Yu and fixed by Zhijie Shen <br>
+     <b>TestRMFailover#testRMWebAppRedirect fails in trunk</b><br>
+     <blockquote>From https://builds.apache.org/job/Hadoop-Yarn-trunk/514/console :
+{code}
+testRMWebAppRedirect(org.apache.hadoop.yarn.client.TestRMFailover)  Time elapsed: 5.39 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+	at org.apache.hadoop.yarn.client.TestRMFailover.testRMWebAppRedirect(TestRMFailover.java:269)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1854">YARN-1854</a>.
+     Blocker test reported by Mit Desai and fixed by Rohith <br>
+     <b>Race condition in TestRMHA#testStartAndTransitions</b><br>
+     <blockquote>There is race in test.
+TestRMHA#testStartAndTransitions calls verifyClusterMetrics() immediately after application is submitted, but QueueMetrics are updated after app attempt is sheduled. Calling verifyClusterMetrics() without verifying app attempt is in Scheduled state cause random test failures.
+ MockRM.submitApp() return when application is in ACCEPTED, but QueueMetrics updated at APP_ATTEMPT_ADDED event. There is high chance of getting queue metrics before app attempt is Scheduled.
+
+
+
+
+{noformat}
+testStartAndTransitions(org.apache.hadoop.yarn.server.resourcemanager.TestRMHA)  Time elapsed: 5.883 sec  &lt;&lt;&lt; FAILURE!
+java.lang.AssertionError: Incorrect value for metric availableMB expected:&lt;2048&gt; but was:&lt;4096&gt;
+	at org.junit.Assert.fail(Assert.java:93)
+	at org.junit.Assert.failNotEquals(Assert.java:647)
+	at org.junit.Assert.assertEquals(Assert.java:128)
+	at org.junit.Assert.assertEquals(Assert.java:472)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMHA.assertMetric(TestRMHA.java:396)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMHA.verifyClusterMetrics(TestRMHA.java:387)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMHA.testStartAndTransitions(TestRMHA.java:160)
+
+
+Results :
+
+Failed tests: 
+  TestRMHA.testStartAndTransitions:160-&gt;verifyClusterMetrics:387-&gt;assertMetric:396 Incorrect value for metric availableMB expected:&lt;2048&gt; but was:&lt;4096&gt;
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1852">YARN-1852</a>.
+     Major bug reported by Rohith and fixed by Rohith (resourcemanager)<br>
+     <b>Application recovery throws InvalidStateTransitonException for FAILED and KILLED jobs</b><br>
+     <blockquote>Recovering for failed/killed application throw InvalidStateTransitonException.
+
+These are logged during recovery of applications.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1850">YARN-1850</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Make enabling timeline service configurable </b><br>
+     <blockquote>Like generic history service, we'd better to make enabling timeline service configurable, in case the timeline server is not up</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1849">YARN-1849</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>NPE in ResourceTrackerService#registerNodeManager for UAM</b><br>
+     <blockquote>While running an UnmanagedAM on secure cluster, ran into an NPE on failover/restart. This is similar to YARN-1821. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1846">YARN-1846</a>.
+     Major bug reported by Robert Kanter and fixed by Robert Kanter <br>
+     <b>TestRM#testNMTokenSentForNormalContainer assumes CapacityScheduler</b><br>
+     <blockquote>TestRM.testNMTokenSentForNormalContainer assumes the CapacityScheduler is being used and tries to do:
+{code:java}
+CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+{code}
+
+This throws a {{ClassCastException}} if you're not using the CapacityScheduler.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1839">YARN-1839</a>.
+     Critical bug reported by Tassapol Athiapinya and fixed by Jian He (applications , capacityscheduler)<br>
+     <b>Capacity scheduler preempts an AM out. AM attempt 2 fails to launch task container with SecretManager$InvalidToken: No NMToken sent</b><br>
+     <blockquote>Use single-node cluster. Turn on capacity scheduler preemption. Run MR sleep job as app 1. Take entire cluster. Run MR sleep job as app 2. Preempt app1 out. Wait till app 2 finishes. App 1 AM attempt 2 will start. It won't be able to launch a task container with this error stack trace in AM logs:
+
+{code}
+2014-03-13 20:13:50,254 INFO [AsyncDispatcher event handler] org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl: Diagnostics report from attempt_1394741557066_0001_m_000000_1009: Container launch failed for container_1394741557066_0001_02_000021 : org.apache.hadoop.security.token.SecretManager$InvalidToken: No NMToken sent for &lt;host&gt;:45454
+	at org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy$ContainerManagementProtocolProxyData.newProxy(ContainerManagementProtocolProxy.java:206)
+	at org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy$ContainerManagementProtocolProxyData.&lt;init&gt;(ContainerManagementProtocolProxy.java:196)
+	at org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.getProxy(ContainerManagementProtocolProxy.java:117)
+	at org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl.getCMProxy(ContainerLauncherImpl.java:403)
+	at org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl$Container.launch(ContainerLauncherImpl.java:138)
+	at org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl$EventProcessor.run(ContainerLauncherImpl.java:369)
+	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
+	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
+	at java.lang.Thread.run(Thread.java:722)
+{code}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1838">YARN-1838</a>.
+     Major sub-task reported by Srimanth Gunturi and fixed by Billie Rinaldi <br>
+     <b>Timeline service getEntities API should provide ability to get entities from given id</b><br>
+     <blockquote>To support pagination, we need ability to get entities from a certain ID by providing a new param called {{fromid}}.
+
+For example on a page of 10 jobs, our first call will be like
+[http://server:8188/ws/v1/timeline/HIVE_QUERY_ID?fields=events,primaryfilters,otherinfo&amp;limit=11]
+When user hits next, we would like to call
+[http://server:8188/ws/v1/timeline/HIVE_QUERY_ID?fields=events,primaryfilters,otherinfo&amp;fromid=JID11&amp;limit=11]
+and continue on for further _Next_ clicks
+
+On hitting back, we will make similar calls for previous items
+[http://server:8188/ws/v1/timeline/HIVE_QUERY_ID?fields=events,primaryfilters,otherinfo&amp;fromid=JID1&amp;limit=11]
+
+{{fromid}} should be inclusive of the id given.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1833">YARN-1833</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMAdminService Fails in trunk and branch-2 : Assert Fails due to different count of UserGroups for currentUser()</b><br>
+     <blockquote>In the test testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider, the following assert is not needed.
+
+{code}
+Assert.assertTrue(groupWithInit.size() != groupBefore.size());
+{code}
+
+As the assert takes the default groups for groupWithInit (which in my case are users, sshusers and wheel), it fails as the size of both groupWithInit and groupBefore are same.
+
+I do not think we need to have this assert here. Moreover we are also checking that the groupInit does not have the userGroups that are in the groupBefore so removing the assert may not be harmful.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1830">YARN-1830</a>.
+     Major bug reported by Karthik Kambatla and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>TestRMRestart.testQueueMetricsOnRMRestart failure</b><br>
+     <blockquote>TestRMRestart.testQueueMetricsOnRMRestart fails intermittently as follows (reported on YARN-1815):
+
+{noformat}
+java.lang.AssertionError: expected:&lt;37&gt; but was:&lt;38&gt;
+...
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.assertQueueMetrics(TestRMRestart.java:1728)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.testQueueMetricsOnRMRestart(TestRMRestart.java:1682)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1824">YARN-1824</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Make Windows client work with Linux/Unix cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1821">YARN-1821</a>.
+     Blocker sub-task reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>NPE on registerNodeManager if the request has containers for UnmanagedAMs</b><br>
+     <blockquote>On RM restart (or failover), NM re-registers with the RM. If it was running containers for Unmanaged AMs, it runs into the following NPE:
+
+{noformat}
+Caused by: org.apache.hadoop.ipc.RemoteException(java.lang.NullPointerException): java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService.registerNodeManager(ResourceTrackerService.java:213)
+        at org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl.registerNodeManager(ResourceTrackerPBServiceImpl.java:54)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1816">YARN-1816</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Jian He <br>
+     <b>Succeeded application remains in accepted after RM restart</b><br>
+     <blockquote>{code}
+2014-03-10 18:07:31,944|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:07:31,945|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:08:02,125|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:08:03,198|beaver.machine|INFO|14/03/10 18:08:03 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:08:03,238|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:08:03,239|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:08:03,239|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:08:33,390|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:08:34,437|beaver.machine|INFO|14/03/10 18:08:34 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:08:34,477|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:08:34,477|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:08:34,478|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:09:04,628|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:09:05,688|beaver.machine|INFO|14/03/10 18:09:05 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:09:05,728|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:09:05,728|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:09:05,729|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:09:35,879|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:09:36,951|beaver.machine|INFO|14/03/10 18:09:36 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:09:36,992|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:09:36,993|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:09:36,993|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:10:07,142|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:10:08,201|beaver.machine|INFO|14/03/10 18:10:08 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:10:08,242|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:10:08,242|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:10:08,242|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+2014-03-10 18:10:38,392|beaver.machine|INFO|RUNNING: /usr/bin/yarn application -list -appStates NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING
+2014-03-10 18:10:39,443|beaver.machine|INFO|14/03/10 18:10:39 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
+2014-03-10 18:10:39,484|beaver.machine|INFO|Total number of applications (application-types: [] and states: [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING]):1
+2014-03-10 18:10:39,484|beaver.machine|INFO|Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+2014-03-10 18:10:39,485|beaver.machine|INFO|application_1394449508064_0008	test_mapred_ha_multiple_job_nn-rm-1-min-5-jobs_1394449960-4	           MAPREDUCE	    hrt_qa	   default	          ACCEPTED	         SUCCEEDED	           100%	http://hostname:19888/jobhistory/job/job_1394449508064_0008
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1812">YARN-1812</a>.
+     Major sub-task reported by Yesha Vora and fixed by Jian He <br>
+     <b>Job stays in PREP state for long time after RM Restarts</b><br>
+     <blockquote>Steps followed:
+
+1) start a sort job with 80 maps and 5 reducers
+2) restart Resource manager when 60 maps and 0 reducers are finished
+3) Wait for job to come out of PREP state.
+
+The job does not come out of PREP state after 7-8 mins.
+After waiting for 7-8 mins, test kills the job.
+
+However, Sort job should not take this long time to come out of PREP state</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1811">YARN-1811</a>.
+     Major sub-task reported by Robert Kanter and fixed by Robert Kanter (resourcemanager)<br>
+     <b>RM HA: AM link broken if the AM is on nodes other than RM</b><br>
+     <blockquote>When using RM HA, if you click on the "Application Master" link in the RM web UI while the job is running, you get an Error 500:
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1800">YARN-1800</a>.
+     Critical sub-task reported by Paul Isaychuk and fixed by Varun Vasudev (nodemanager)<br>
+     <b>YARN NodeManager with java.util.concurrent.RejectedExecutionException</b><br>
+     <blockquote>Noticed this on tests running on Apache Hadoop 2.2 cluster
+
+{code}
+2014-01-23 01:30:28,575 INFO  localizer.LocalizedResource (LocalizedResource.java:handle(196)) - Resource hdfs://colo-2:8020/user/fertrist/oozie-oozi/0000605-140114233013619-oozie-oozi-W/aggregator--map-reduce/map-reduce-launcher.jar transitioned from INIT to DOWNLOADING
+2014-01-23 01:30:28,575 INFO  localizer.LocalizedResource (LocalizedResource.java:handle(196)) - Resource hdfs://colo-2:8020/user/fertrist/.staging/job_1389742077466_0396/job.splitmetainfo transitioned from INIT to DOWNLOADING
+2014-01-23 01:30:28,575 INFO  localizer.LocalizedResource (LocalizedResource.java:handle(196)) - Resource hdfs://colo-2:8020/user/fertrist/.staging/job_1389742077466_0396/job.split transitioned from INIT to DOWNLOADING
+2014-01-23 01:30:28,575 INFO  localizer.LocalizedResource (LocalizedResource.java:handle(196)) - Resource hdfs://colo-2:8020/user/fertrist/.staging/job_1389742077466_0396/job.xml transitioned from INIT to DOWNLOADING
+2014-01-23 01:30:28,576 INFO  localizer.ResourceLocalizationService (ResourceLocalizationService.java:addResource(651)) - Downloading public rsrc:{ hdfs://colo-2:8020/user/fertrist/oozie-oozi/0000605-140114233013619-oozie-oozi-W/aggregator--map-reduce/map-reduce-launcher.jar, 1390440627435, FILE, null }
+2014-01-23 01:30:28,576 FATAL event.AsyncDispatcher (AsyncDispatcher.java:dispatch(141)) - Error in dispatcher thread
+java.util.concurrent.RejectedExecutionException
+        at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:1768)
+        at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:767)
+        at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:658)
+        at java.util.concurrent.ExecutorCompletionService.submit(ExecutorCompletionService.java:152)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService$PublicLocalizer.addResource(ResourceLocalizationService.java:678)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService$LocalizerTracker.handle(ResourceLocalizationService.java:583)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService$LocalizerTracker.handle(ResourceLocalizationService.java:525)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:134)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:81)
+        at java.lang.Thread.run(Thread.java:662)
+2014-01-23 01:30:28,577 INFO  event.AsyncDispatcher (AsyncDispatcher.java:dispatch(144)) - Exiting, bbye..
+2014-01-23 01:30:28,596 INFO  mortbay.log (Slf4jLog.java:info(67)) - Stopped SelectChannelConnector@0.0.0.0:50060
+2014-01-23 01:30:28,597 INFO  containermanager.ContainerManagerImpl (ContainerManagerImpl.java:cleanUpApplicationsOnNMShutDown(328)) - Applications still running : [application_1389742077466_0396]
+2014-01-23 01:30:28,597 INFO  containermanager.ContainerManagerImpl (ContainerManagerImpl.java:cleanUpApplicationsOnNMShutDown(336)) - Wa
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1793">YARN-1793</a>.
+     Critical bug reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>yarn application -kill doesn't kill UnmanagedAMs</b><br>
+     <blockquote>Trying to kill an Unmanaged AM though CLI (yarn application -kill &lt;id&gt;) logs a success, but doesn't actually kill the AM or reclaim the containers allocated to it.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1789">YARN-1789</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>ApplicationSummary does not escape newlines in the app name</b><br>
+     <blockquote>YARN-side of MAPREDUCE-5778.
+ApplicationSummary is not escaping newlines in the app name. This can result in an application summary log entry that spans multiple lines when users are expecting one-app-per-line output.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1788">YARN-1788</a>.
+     Critical bug reported by Tassapol Athiapinya and fixed by Varun Vasudev (resourcemanager)<br>
+     <b>AppsCompleted/AppsKilled metric is incorrect when MR job is killed with yarn application -kill</b><br>
+     <blockquote>Run MR sleep job. Kill the application in RUNNING state. Observe RM metrics.
+Expecting AppsCompleted = 0/AppsKilled = 1
+Actual is AppsCompleted = 1/AppsKilled = 0</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1787">YARN-1787</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>yarn applicationattempt/container print wrong usage information</b><br>
+     <blockquote>yarn applicationattempt prints:
+{code}
+Invalid Command Usage : 
+usage: application
+ -appStates &lt;States&gt;             Works with -list to filter applications
+                                 based on input comma-separated list of
+                                 application states. The valid application
+                                 state can be one of the following:
+                                 ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUN
+                                 NING,FINISHED,FAILED,KILLED
+ -appTypes &lt;Types&gt;               Works with -list to filter applications
+                                 based on input comma-separated list of
+                                 application types.
+ -help                           Displays help for all commands.
+ -kill &lt;Application ID&gt;          Kills the application.
+ -list &lt;arg&gt;                     List application attempts for aplication
+                                 from AHS.
+ -movetoqueue &lt;Application ID&gt;   Moves the application to a different
+                                 queue.
+ -queue &lt;Queue Name&gt;             Works with the movetoqueue command to
+                                 specify which queue to move an
+                                 application to.
+ -status &lt;Application ID&gt;        Prints the status of the application.
+{code}
+
+yarn container prints:
+{code}
+Invalid Command Usage : 
+usage: application
+ -appStates &lt;States&gt;             Works with -list to filter applications
+                                 based on input comma-separated list of
+                                 application states. The valid application
+                                 state can be one of the following:
+                                 ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUN
+                                 NING,FINISHED,FAILED,KILLED
+ -appTypes &lt;Types&gt;               Works with -list to filter applications
+                                 based on input comma-separated list of
+                                 application types.
+ -help                           Displays help for all commands.
+ -kill &lt;Application ID&gt;          Kills the application.
+ -list &lt;arg&gt;                     List application attempts for aplication
+                                 from AHS.
+ -movetoqueue &lt;Application ID&gt;   Moves the application to a different
+                                 queue.
+ -queue &lt;Queue Name&gt;             Works with the movetoqueue command to
+                                 specify which queue to move an
+                                 application to.
+ -status &lt;Application ID&gt;        Prints the status of the application.
+{code}
+
+Both commands print irrelevant yarn application usage information.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1785">YARN-1785</a>.
+     Major bug reported by bc Wong and fixed by bc Wong <br>
+     <b>FairScheduler treats app lookup failures as ERRORs</b><br>
+     <blockquote>When invoking the /ws/v1/cluster/apps endpoint, RM will eventually get to RMAppImpl#createAndGetApplicationReport, which calls RMAppAttemptImpl#getApplicationResourceUsageReport, which looks up the app in the scheduler, which may or may not exist. So FairScheduler shouldn't log an error for every lookup failure:
+
+{noformat}
+2014-02-17 08:23:21,240 ERROR org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler: Request for appInfo of unknown attemptappattempt_1392419715319_0135_000001
+
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1783">YARN-1783</a>.
+     Critical bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>yarn application does not make any progress even when no other application is running when RM is being restarted in the background</b><br>
+     <blockquote>Noticed that during HA tests some tests took over 3 hours to run when the test failed.
+Looking at the logs i see the application made no progress for a very long time. However if i look at application log from yarn it actually ran in 5 mins
+I am seeing same behavior when RM was being restarted in the background and when both RM and AM were being restarted. This does not happen for all applications but a few will hit this in the nightly run.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1781">YARN-1781</a>.
+     Major sub-task reported by Varun Vasudev and fixed by Varun Vasudev (nodemanager)<br>
+     <b>NM should allow users to specify max disk utilization for local disks</b><br>
+     <blockquote>This is related to YARN-257(it's probably a sub task?). Currently, the NM does not detect full disks and allows full disks to be used by containers leading to repeated failures. YARN-257 deals with graceful handling of full disks. This ticket is only about detection of full disks by the disk health checkers.
+
+The NM should allow users to set a maximum disk utilization for local disks and mark disks as bad once they exceed that utilization. At the very least, the NM should at least detect full disks.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1780">YARN-1780</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Improve logging in timeline service</b><br>
+     <blockquote>It's difficult to trace whether the client has successfully posted the entity to the timeline service or not.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1776">YARN-1776</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>renewDelegationToken should survive RM failover</b><br>
+     <blockquote>When a delegation token is renewed, two RMStateStore operations: 1) removing the old DT, and 2) storing the new DT will happen. If RM fails in between. There would be problem.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1775">YARN-1775</a>.
+     Major sub-task reported by Rajesh Balamohan and fixed by Rajesh Balamohan (nodemanager)<br>
+     <b>Create SMAPBasedProcessTree to get PSS information</b><br>
+     <blockquote>Create SMAPBasedProcessTree (by extending ProcfsBasedProcessTree), which will make use of PSS for computing the memory usage. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1774">YARN-1774</a>.
+     Blocker bug reported by Anubhav Dhoot and fixed by Anubhav Dhoot (resourcemanager)<br>
+     <b>FS: Submitting to non-leaf queue throws NPE</b><br>
+     <blockquote>If you create a hierarchy of queues and assign a job to parent queue, FairScheduler quits with a NPE.
+
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1771">YARN-1771</a>.
+     Critical improvement reported by Sangjin Lee and fixed by Sangjin Lee (nodemanager)<br>
+     <b>many getFileStatus calls made from node manager for localizing a public distributed cache resource</b><br>
+     <blockquote>We're observing that the getFileStatus calls are putting a fair amount of load on the name node as part of checking the public-ness for localizing a resource that belong in the public cache.
+
+We see 7 getFileStatus calls made for each of these resource. We should look into reducing the number of calls to the name node. One example:
+
+{noformat}
+2014-02-27 18:07:27,351 INFO audit: ... cmd=getfileinfo	src=/tmp/temp-887708724/tmp883330348/foo-0.0.44.jar ...
+2014-02-27 18:07:27,352 INFO audit: ... cmd=getfileinfo	src=/tmp/temp-887708724/tmp883330348/foo-0.0.44.jar ...
+2014-02-27 18:07:27,352 INFO audit: ... cmd=getfileinfo	src=/tmp/temp-887708724/tmp883330348 ...
+2014-02-27 18:07:27,353 INFO audit: ... cmd=getfileinfo	src=/tmp/temp-887708724 ...
+2014-02-27 18:07:27,353 INFO audit: ... cmd=getfileinfo	src=/tmp ...
+2014-02-27 18:07:27,354 INFO audit: ... cmd=getfileinfo	src=/	 ...
+2014-02-27 18:07:27,354 INFO audit: ... cmd=getfileinfo	src=/tmp/temp-887708724/tmp883330348/foo-0.0.44.jar ...
+2014-02-27 18:07:27,355 INFO audit: ... cmd=open	src=/tmp/temp-887708724/tmp883330348/foo-0.0.44.jar ...
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1768">YARN-1768</a>.
+     Minor bug reported by Hitesh Shah and fixed by Tsuyoshi OZAWA (client)<br>
+     <b>yarn kill non-existent application is too verbose</b><br>
+     <blockquote>Instead of catching ApplicationNotFound and logging a simple app not found message, the whole stack trace is logged.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1766">YARN-1766</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>When RM does the initiation, it should use loaded Configuration instead of bootstrap configuration.</b><br>
+     <blockquote>Right now, we have FileSystemBasedConfigurationProvider to let Users upload the configurations into remote File System, and let different RMs share the same configurations.  During the initiation, RM will load the configurations from Remote File System. So when RM initiates the services, it should use the loaded Configurations instead of using the bootstrap configurations.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1765">YARN-1765</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Write test cases to verify that killApplication API works in RM HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1764">YARN-1764</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Handle RM fail overs after the submitApplication call.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1761">YARN-1761</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>RMAdminCLI should check whether HA is enabled before executes transitionToActive/transitionToStandby</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1760">YARN-1760</a>.
+     Trivial bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>TestRMAdminService assumes CapacityScheduler</b><br>
+     <blockquote>YARN-1611 adds TestRMAdminService which assumes the use of CapacityScheduler. 
+
+{noformat}
+java.lang.ClassCastException: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler cannot be cast to org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testAdminRefreshQueuesWithFileSystemBasedConfigurationProvider(TestRMAdminService.java:115)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1758">YARN-1758</a>.
+     Blocker bug reported by Hitesh Shah and fixed by Xuan Gong <br>
+     <b>MiniYARNCluster broken post YARN-1666</b><br>
+     <blockquote>NPE seen when trying to use MiniYARNCluster</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1752">YARN-1752</a>.
+     Major bug reported by Jian He and fixed by Rohith <br>
+     <b>Unexpected Unregistered event at Attempt Launched state</b><br>
+     <blockquote>{code}
+2014-02-21 14:56:03,453 ERROR org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: Can't handle this event at current state
+org.apache.hadoop.yarn.state.InvalidStateTransitonException: Invalid event: UNREGISTERED at LAUNCHED
+  at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:305)
+  at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)
+  at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)
+  at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:647)
+  at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:103)
+  at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:733)
+  at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:714)
+  at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:173)
+  at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:106)
+  at java.lang.Thread.run(Thread.java:695)
+{code}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1749">YARN-1749</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Review AHS configs and sync them up with the timeline-service configs</b><br>
+     <blockquote>We need to:
+1. Review the configuration names and default values
+2. Combine the two store class configurations
+
+Some other thoughts:
+1. Maybe we don't need null implementation of ApplicationHistoryStore any more
+2. Maybe if yarn.ahs.enabled = false, we should stop AHS web server returning historic information</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1748">YARN-1748</a>.
+     Blocker bug reported by Sravya Tirukkovalur and fixed by Sravya Tirukkovalur <br>
+     <b>hadoop-yarn-server-tests packages core-site.xml breaking downstream tests</b><br>
+     <blockquote>Jars should not package config files, as this might come into the classpaths of clients causing the clients to break.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1742">YARN-1742</a>.
+     Trivial bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Fix javadoc of parameter DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION</b><br>
+     <blockquote>In YarnConfiguration.java, 
+{code}
+  /**
+   * By default, at least 5% of disks are to be healthy to say that the node
+   * is healthy in terms of disks.
+   */
+  public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION
+    = 0.25F;
+{code}
+25% is the correct.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1734">YARN-1734</a>.
+     Critical sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>RM should get the updated Configurations when it transits from Standby to Active</b><br>
+     <blockquote>Currently, we have ConfigurationProvider which can support LocalConfiguration, and FileSystemBasedConfiguration. When HA is enabled, and FileSystemBasedConfiguration is enabled, RM can not get the updated Configurations when it transits from Standby to Active</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1732">YARN-1732</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Change types of related entities and primary filters in ATSEntity</b><br>
+     <blockquote>The current types Map&lt;String, List&lt;String&gt;&gt; relatedEntities and Map&lt;String, Object&gt; primaryFilters have issues.  The List&lt;String&gt; value of the related entities map could have multiple identical strings in it, which doesn't make sense. A more major issue is that we cannot allow primary filter values to be overwritten, because otherwise we will be unable to find those primary filter entries when we want to delete an entity (without doing a nearly full scan).
+
+I propose changing related entities to Map&lt;String, Set&lt;String&gt;&gt; and primary filters to Map&lt;String, Set&lt;Object&gt;&gt;.  The basic methods to add primary filters and related entities are of the form add(key, value) and will not need to change.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1730">YARN-1730</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Leveldb timeline store needs simple write locking</b><br>
+     <blockquote>Although the leveldb writes are performed atomically in a batch, a start time for the entity needs to identified before each write.  Thus a per-entity write lock should be acquired.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1729">YARN-1729</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>TimelineWebServices always passes primary and secondary filters as strings</b><br>
+     <blockquote>Primary filters and secondary filter values can be arbitrary json-compatible Object.  The web services should determine if the filters specified as query parameters are objects or strings before passing them to the store.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1724">YARN-1724</a>.
+     Critical bug reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Race condition in Fair Scheduler when continuous scheduling is turned on </b><br>
+     <blockquote>If nodes resource allocations change during
+        Collections.sort(nodeIdList, nodeAvailableResourceComparator);
+we'll hit:
+java.lang.IllegalArgumentException: Comparison method violates its general contract!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1721">YARN-1721</a>.
+     Critical bug reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>When moving app between queues in Fair Scheduler, grab lock on FSSchedulerApp</b><br>
+     <blockquote>FairScheduler.moveApplication should grab lock on FSSchedulerApp, so that allocate() can't be modifying it at the same time.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1719">YARN-1719</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>ATSWebServices produces jersey warnings</b><br>
+     <blockquote>These don't appear to affect how the web services work, but the following warnings are logged:
+{noformat}
+WARNING: The following warnings have been detected with resource and/or provider
+ classes:
+  WARNING: A sub-resource method, public org.apache.hadoop.yarn.server.applicati
+onhistoryservice.webapp.ATSWebServices$AboutInfo org.apache.hadoop.yarn.server.a
+pplicationhistoryservice.webapp.ATSWebServices.about(javax.servlet.http.HttpServ
+letRequest,javax.servlet.http.HttpServletResponse), with URI template, "/", is t
+reated as a resource method
+  WARNING: A sub-resource method, public org.apache.hadoop.yarn.api.records.appt
+imeline.ATSPutErrors org.apache.hadoop.yarn.server.applicationhistoryservice.web
+app.ATSWebServices.postEntities(javax.servlet.http.HttpServletRequest,javax.serv
+let.http.HttpServletResponse,org.apache.hadoop.yarn.api.records.apptimeline.ATSE
+ntities), with URI template, "/", is treated as a resource method
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1717">YARN-1717</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Enable offline deletion of entries in leveldb timeline store</b><br>
+     <blockquote>The leveldb timeline store implementation needs the following:
+* better documentation of its internal structures
+* internal changes to enable deleting entities
+** never overwrite existing primary filter entries
+** add hidden reverse pointers to related entities</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1706">YARN-1706</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Create an utility function to dump timeline records to json </b><br>
+     <blockquote>For verification and log purpose</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1704">YARN-1704</a>.
+     Blocker sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Review LICENSE and NOTICE to reflect new levelDB releated libraries being used</b><br>
+     <blockquote>Make any changes necessary in LICENSE and NOTICE related to dependencies introduced by the application timeline store.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1698">YARN-1698</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Replace MemoryApplicationTimelineStore with LeveldbApplicationTimelineStore as default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1697">YARN-1697</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza (nodemanager)<br>
+     <b>NodeManager reports negative running containers</b><br>
+     <blockquote>We're seeing the NodeManager metrics report a negative number of running containers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1692">YARN-1692</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (scheduler)<br>
+     <b>ConcurrentModificationException in fair scheduler AppSchedulable</b><br>
+     <blockquote>We saw a ConcurrentModificationException thrown in the fair scheduler:
+
+{noformat}
+2014-02-07 01:40:01,978 ERROR org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler: Exception in fair scheduler UpdateThread
+java.util.ConcurrentModificationException
+        at java.util.HashMap$HashIterator.nextEntry(HashMap.java:926)
+        at java.util.HashMap$ValueIterator.next(HashMap.java:954)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AppSchedulable.updateDemand(AppSchedulable.java:85)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue.updateDemand(FSLeafQueue.java:125)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue.updateDemand(FSParentQueue.java:82)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.update(FairScheduler.java:217)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler$UpdateThread.run(FairScheduler.java:195)
+        at java.lang.Thread.run(Thread.java:724)
+{noformat}
+
+The map that  gets returned by FSSchedulerApp.getResourceRequests() are iterated on without proper synchronization.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1690">YARN-1690</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>Sending timeline entities+events from Distributed shell </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1689">YARN-1689</a>.
+     Critical bug reported by Deepesh Khandelwal and fixed by Vinod Kumar Vavilapalli (resourcemanager)<br>
+     <b>RMAppAttempt is not killed when RMApp is at ACCEPTED</b><br>
+     <blockquote>When running some Hive on Tez jobs, the RM after a while gets into an unusable state where no jobs run. In the RM log I see the following exception:
+{code}
+2014-02-04 20:28:08,553 WARN  ipc.Server (Server.java:run(1978)) - IPC Server handler 0 on 8030, call org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB.registerApplicationMaster from 172.18.145.156:40474 Call#0 Retry#0: error: java.lang.NullPointerException
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler.getTransferredContainers(AbstractYarnScheduler.java:48)
+        at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.registerApplicationMaster(ApplicationMasterService.java:278)
+        at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationMasterProtocolPBServiceImpl.registerApplicationMaster(ApplicationMasterProtocolPBServiceImpl.java:90)
+        at org.apache.hadoop.yarn.proto.ApplicationMasterProtocol$ApplicationMasterProtocolService$2.callBlockingMethod(ApplicationMasterProtocol.java:95)
+        at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
+        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928)
+        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1962)
+        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1958)
+        at java.security.AccessController.doPrivileged(Native Method)
+        at javax.security.auth.Subject.doAs(Subject.java:396)
+        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
+        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1956)
+......
+2014-02-04 20:28:08,544 ERROR rmapp.RMAppImpl (RMAppImpl.java:handle(626)) - Can't handle this event at current state
+org.apache.hadoop.yarn.state.InvalidStateTransitonException: Invalid event: ATTEMPT_REGISTERED at KILLED
+        at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:305)
+        at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)
+        at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)
+        at org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl.handle(RMAppImpl.java:624)
+        at org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl.handle(RMAppImpl.java:81)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationEventDispatcher.handle(ResourceManager.java:656)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationEventDispatcher.handle(ResourceManager.java:640)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:173)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:106)
+        at java.lang.Thread.run(Thread.java:662)
+2014-02-04 20:28:08,549 INFO  resourcemanager.RMAuditLogger (RMAuditLogger.java:logSuccess(140)) - USER=hrt_qa  IP=172.18.145.156       OPERATION=Kill Application Request      TARGET=ClientRMService  RESULT=SUCCESS  APPID=application_1391543307203_0001
+2014-02-04 20:28:08,553 WARN  ipc.Server (Server.java:run(1978)) - IPC Server handler 0 on 8030, call org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB.registerApplicationMaster from 172.18.145.156:40474 Call#0 Retry#0: error: java.lang.NullPointerException
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler.getTransferredContainers(AbstractYarnScheduler.java:48)
+        at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.registerApplicationMaster(ApplicationMasterService.java:278)
+        at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationMasterProtocolPBServiceImpl.registerApplicationMaster(ApplicationMasterProtocolPBServiceImpl.java:90)
+        at org.apache.hadoop.yarn.proto.ApplicationMasterProtocol$ApplicationMasterProtocolService$2.callBlockingMethod(ApplicationMasterProtocol.java:95)
+        at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
+        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928)
+        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1962)
+        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1958)
+        at java.security.AccessController.doPrivileged(Native Method)
+        at javax.security.auth.Subject.doAs(Subject.java:396)
+        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
+        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1956)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1687">YARN-1687</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Refactoring timeline classes to remove "app" related words</b><br>
+     <blockquote>Remove ATS prefix, change package name, fix javadoc and so on</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1686">YARN-1686</a>.
+     Major bug reported by Rohith and fixed by Rohith (nodemanager)<br>
+     <b>NodeManager.resyncWithRM() does not handle exception which cause NodeManger to Hang.</b><br>
+     <blockquote>During start of NodeManager,if registration with resourcemanager throw exception then nodemager shutdown happens. 
+
+Consider case where NM-1 is registered with RM. RM issued Resync to NM. If any exception thrown in "resyncWithRM" (starts new thread which does not handle exception) during RESYNC evet, then this thread is lost. NodeManger enters hanged state. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1685">YARN-1685</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Zhijie Shen <br>
+     <b>Bugs around log URL</b><br>
+     <blockquote>1. Log URL should be different when the container is running and finished
+
+2. Null case needs to be handled
+
+3. The way of constructing log URL should be corrected</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1684">YARN-1684</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Fix history server heap size in yarn script</b><br>
+     <blockquote>The yarn script currently has the following:
+{noformat}
+  if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
+    JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
+  fi
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1676">YARN-1676</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refreshUserToGroupsMappings of configuration work across RM failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1673">YARN-1673</a>.
+     Blocker bug reported by Tassapol Athiapinya and fixed by Mayank Bansal (client)<br>
+     <b>Valid yarn kill application prints out help message.</b><br>
+     <blockquote>yarn application -kill &lt;application ID&gt; 
+used to work previously. In 2.4.0 it prints out help message and does not kill the application.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1672">YARN-1672</a>.
+     Trivial bug reported by Karthik Kambatla and fixed by Naren Koneru (nodemanager)<br>
+     <b>YarnConfiguration is missing a default for yarn.nodemanager.log.retain-seconds</b><br>
+     <blockquote>YarnConfiguration is missing a default for yarn.nodemanager.log.retain-seconds</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1670">YARN-1670</a>.
+     Critical bug reported by Thomas Graves and fixed by Mit Desai <br>
+     <b>aggregated log writer can write more log data then it says is the log length</b><br>
+     <blockquote>We have seen exceptions when using 'yarn logs' to read log files. 
+at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
+       at java.lang.Long.parseLong(Long.java:441)
+       at java.lang.Long.parseLong(Long.java:483)
+       at org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat$LogReader.readAContainerLogsForALogType(AggregatedLogFormat.java:518)
+       at org.apache.hadoop.yarn.logaggregation.LogDumper.dumpAContainerLogs(LogDumper.java:178)
+       at org.apache.hadoop.yarn.logaggregation.LogDumper.run(LogDumper.java:130)
+       at org.apache.hadoop.yarn.logaggregation.LogDumper.main(LogDumper.java:246)
+
+
+We traced it down to the reader trying to read the file type of the next file but where it reads is still log data from the previous file.  What happened was the Log Length was written as a certain size but the log data was actually longer then that.  
+
+Inside of the write() routine in LogValue it first writes what the logfile length is, but then when it goes to write the log itself it just goes to the end of the file.  There is a race condition here where if someone is still writing to the file when it goes to be aggregated the length written could be to small.
+
+We should have the write() routine stop when it writes whatever it said was the length.  It would be nice if we could somehow tell the user it might be truncated but I'm not sure of a good way to do this.
+
+We also noticed that a bug in readAContainerLogsForALogType where it is using an int for curRead whereas it should be using a long. 
+
+      while (len != -1 &amp;&amp; curRead &lt; fileLength) {
+
+This isn't actually a problem right now as it looks like the underlying decoder is doing the right thing and the len condition exits.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1669">YARN-1669</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refreshServiceAcls work across RM failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1668">YARN-1668</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refreshAdminAcls work across RM failover</b><br>
+     <blockquote>Change the handling of admin-acls to be available across RM failover by making using of a remote configuration-provider
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1667">YARN-1667</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refreshSuperUserGroupsConfiguration work across RM failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1666">YARN-1666</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refreshNodes work across RM failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1665">YARN-1665</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Set better defaults for HA configs for automatic failover</b><br>
+     <blockquote>In order to enable HA (automatic failover) i had to set the following configs
+
+
+{code}
+&lt;property&gt;
+    &lt;name&gt;yarn.resourcemanager.ha.enabled&lt;/name&gt;
+    &lt;value&gt;true&lt;/value&gt;
+  &lt;/property&gt;
+  
+  &lt;property&gt;
+    &lt;name&gt;yarn.resourcemanager.ha.automatic-failover.enabled&lt;/name&gt;
+    &lt;value&gt;true&lt;/value&gt;
+  &lt;/property&gt;
+
+  &lt;property&gt;
+    &lt;name&gt;yarn.resourcemanager.ha.automatic-failover.embedded&lt;/name&gt;
+    &lt;value&gt;true&lt;/value&gt;
+  &lt;/property&gt;
+
+{code}
+
+
+I believe the user should just have to set yarn.resourcemanager.ha.enabled=true and the rest should be set as defaults. Basically automatic failover should be the default.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1661">YARN-1661</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
+     <b>AppMaster logs says failing even if an application does succeed.</b><br>
+     <blockquote>Run:
+/usr/bin/yarn  org.apache.hadoop.yarn.applications.distributedshell.Client -jar &lt;distributed shell jar&gt; -shell_command ls
+
+Open AM logs. Last line would indicate AM failure even though container logs print good ls result.
+
+{code}
+2014-01-24 21:45:29,592 INFO  [main] distributedshell.ApplicationMaster (ApplicationMaster.java:finish(599)) - Application completed. Signalling finish to RM
+2014-01-24 21:45:29,612 INFO  [main] impl.AMRMClientImpl (AMRMClientImpl.java:unregisterApplicationMaster(315)) - Waiting for application to be successfully unregistered.
+2014-01-24 21:45:29,816 INFO  [main] distributedshell.ApplicationMaster (ApplicationMaster.java:main(267)) - Application Master failed. exiting
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1660">YARN-1660</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Xuan Gong (resourcemanager)<br>
+     <b>add the ability to set yarn.resourcemanager.hostname.rm-id instead of setting all the various host:port properties for RM</b><br>
+     <blockquote>Currently the user has to specify all the various host:port properties for RM. We should follow the pattern that we do for non HA setup where we can specify yarn.resourcemanager.hostname.rm-id and the defaults are used for all other affected properties.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1659">YARN-1659</a>.
+     Major sub-task reported by Billie Rinaldi and fixed by Billie Rinaldi <br>
+     <b>Define the ApplicationTimelineStore store as an abstraction for implementing different storage impls for storing timeline information</b><br>
+     <blockquote>These will be used by ApplicationTimelineStore interface.  The web services will convert the store-facing obects to the user-facing objects.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1658">YARN-1658</a>.
+     Major sub-task reported by Cindy Li and fixed by Cindy Li <br>
+     <b>Webservice should redirect to active RM when HA is enabled.</b><br>
+     <blockquote>When HA is enabled, web service to standby RM should be redirected to the active RM. This is a related Jira to YARN-1525.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1641">YARN-1641</a>.
+     Major sub-task reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>ZK store should attempt a write periodically to ensure it is still Active</b><br>
+     <blockquote>Fencing in ZK store kicks in when the RM tries to write something to the store. If the RM doesn't write anything to the store, it doesn't get fenced and can continue to assume being the Active. 
+
+By periodically writing a file (say, every RM_ZK_TIMEOUT_MS seconds), we can ensure it gets fenced.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1640">YARN-1640</a>.
+     Blocker sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Manual Failover does not work in secure clusters</b><br>
+     <blockquote>NodeManager gets rejected after manually making one RM as active.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1639">YARN-1639</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Xuan Gong (resourcemanager)<br>
+     <b>YARM RM HA requires different configs on different RM hosts</b><br>
+     <blockquote>We need to set yarn.resourcemanager.ha.id to rm1 or rm2 based on which rm you want to first or second.
+This means we have different configs on different RM nodes. This is unlike HDFS HA where the same configs are pushed to both NN's and it would be better to have the same setup for RM as this would make installation and managing easier.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1637">YARN-1637</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Implement a client library for java users to post entities+events</b><br>
+     <blockquote>This is a wrapper around the web-service to facilitate easy posting of entity+event data to the time-line server.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1636">YARN-1636</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Implement timeline related web-services inside AHS for storing and retrieving entities+events</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1635">YARN-1635</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Billie Rinaldi <br>
+     <b>Implement a Leveldb based ApplicationTimelineStore</b><br>
+     <blockquote>As per the design doc, we need a levelDB + local-filesystem based implementation to start with and for small deployments.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1634">YARN-1634</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Define an in-memory implementation of ApplicationTimelineStore</b><br>
+     <blockquote>As per the design doc, the store needs to pluggable. We need a base interface, and an in-memory implementation for testing.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1633">YARN-1633</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Define user-faced entity, entity-info and event objects</b><br>
+     <blockquote>Define the core objects of the application-timeline effort.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1632">YARN-1632</a>.
+     Minor bug reported by Chen He and fixed by Chen He <br>
+     <b>TestApplicationMasterServices should be under org.apache.hadoop.yarn.server.resourcemanager package</b><br>
+     <blockquote>ApplicationMasterService is under org.apache.hadoop.yarn.server.resourcemanager package. However, its unit test file TestApplicationMasterService is placed under org.apache.hadoop.yarn.server.resourcemanager.applicationmasterservice package which only contains one file (TestApplicationMasterService). </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1625">YARN-1625</a>.
+     Trivial sub-task reported by Shinichi Yamashita and fixed by Shinichi Yamashita <br>
+     <b>mvn apache-rat:check outputs warning message in YARN-321 branch</b><br>
+     <blockquote>When I ran dev-support/test-patch.sh, following message output.
+
+{code}
+mvn apache-rat:check -DHadoopPatchProcess &gt; /tmp/patchReleaseAuditOutput.txt 2&gt;&amp;1
+There appear to be 1 release audit warnings after applying the patch.
+{code}
+
+{code}
+ !????? /home/sinchii/git/YARN-321-test/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/applicationhistory/.keep
+Lines that start with ????? in the release audit report indicate files that do not have an Apache license header.
+{code}
+
+To avoid release audit warning, it should fix pom.xml.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1617">YARN-1617</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Remove ancient comment and surround LOG.debug in AppSchedulingInfo.allocate</b><br>
+     <blockquote>{code}
+  synchronized private void allocate(Container container) {
+    // Update consumption and track allocations
+    //TODO: fixme sharad
+    /* try {
+        store.storeContainer(container);
+      } catch (IOException ie) {
+        // TODO fix this. we shouldnt ignore
+      }*/
+    
+    LOG.debug("allocate: applicationId=" + applicationId + " container="
+        + container.getId() + " host="
+        + container.getNodeId().toString());
+  }
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1613">YARN-1613</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Akira AJISAKA <br>
+     <b>Fix config name YARN_HISTORY_SERVICE_ENABLED</b><br>
+     <blockquote>YARN_HISTORY_SERVICE_ENABLED property name is "yarn.ahs..enabled", which is wrong.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1611">YARN-1611</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Make admin refresh of capacity scheduler configuration work across RM failover</b><br>
+     <blockquote>Currently, If we do refresh* for a standby RM, it will failover to the current active RM, and do the refresh* based on the local configuration file of the active RM. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1605">YARN-1605</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Fix formatting issues with new module in YARN-321 branch</b><br>
+     <blockquote>There are a bunch of formatting issues. I'm restricting myself for a sweep of all the files in the new module.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1597">YARN-1597</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>FindBugs warnings on YARN-321 branch</b><br>
+     <blockquote>There are a bunch of findBugs warnings on YARN-321 branch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1596">YARN-1596</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Javadoc failures on YARN-321 branch</b><br>
+     <blockquote>There are some javadoc issues on YARN-321 branch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1595">YARN-1595</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Test failures on YARN-321 branch</b><br>
+     <blockquote>mvn test doesn't pass on YARN-321 branch anymore.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1594">YARN-1594</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>YARN-321 branch needs to be updated after YARN-888 pom changes</b><br>
+     <blockquote>YARN-888 changed the pom structure. And so latest merge to trunk breaks YARN-321 branch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1591">YARN-1591</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Tsuyoshi OZAWA <br>
+     <b>TestResourceTrackerService fails randomly on trunk</b><br>
+     <blockquote>As evidenced by Jenkins at https://issues.apache.org/jira/browse/YARN-1041?focusedCommentId=13868621&amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13868621.
+
+It's failing randomly on trunk on my local box too </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1590">YARN-1590</a>.
+     Major bug reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam (resourcemanager)<br>
+     <b>_HOST doesn't expand properly for RM, NM, ProxyServer and JHS</b><br>
+     <blockquote>_HOST is not properly substituted when we use VIP address. Currently it always used the host name of the machine and disregard the VIP address. It is true mainly for RM, NM, WebProxy, and JHS rpc service. Looks like it is working fine for webservice authentication.
+
+On the other hand, the same thing is working fine for NN and SNN in RPC as well as webservice.
+ </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1588">YARN-1588</a>.
+     Major sub-task reported by Jian He and fixed by Jian He <br>
+     <b>Rebind NM tokens for previous attempt's running containers to the new attempt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1587">YARN-1587</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Vinod Kumar Vavilapalli <br>
+     <b>[YARN-321] Merge Patch for YARN-321</b><br>
+     <blockquote>Merge Patch</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1578">YARN-1578</a>.
+     Major sub-task reported by Shinichi Yamashita and fixed by Shinichi Yamashita <br>
+     <b>Fix how to read history file in FileSystemApplicationHistoryStore</b><br>
+     <blockquote>I carried out PiEstimator job at Hadoop cluster which applied YARN-321.
+After the job end and when I accessed Web UI of HistoryServer, it displayed "500". And HistoryServer daemon log was output as follows.
+
+{code}
+2014-01-09 13:31:12,227 ERROR org.apache.hadoop.yarn.webapp.Dispatcher: error handling URI: /applicationhistory/appattempt/appattempt_1389146249925_0008_000001
+java.lang.reflect.InvocationTargetException
+        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+        at java.lang.reflect.Method.invoke(Method.java:597)
+        at org.apache.hadoop.yarn.webapp.Dispatcher.service(Dispatcher.java:153)
+        at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
+(snip...)
+Caused by: java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore.mergeContainerHistoryData(FileSystemApplicationHistoryStore.java:696)
+        at org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore.getContainers(FileSystemApplicationHistoryStore.java:429)
+        at org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl.getContainers(ApplicationHistoryManagerImpl.java:201)
+        at org.apache.hadoop.yarn.server.webapp.AppAttemptBlock.render(AppAttemptBlock.java:110)
+(snip...)
+{code}
+
+I confirmed that there was container which was not finished from ApplicationHistory file.
+In ResourceManager daemon log, ResourceManager reserved this container, but did not allocate it.
+
+When FileSystemApplicationHistoryStore reads container information without finish data in history file, this problem occurs.
+In consideration of the case which there is not finish data, we should fix how to read history file in FileSystemApplicationHistoryStore.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1577">YARN-1577</a>.
+     Blocker sub-task reported by Jian He and fixed by Jian He <br>
+     <b>Unmanaged AM is broken because of YARN-1493</b><br>
+     <blockquote>Today unmanaged AM client is waiting for app state to be Accepted to launch the AM. This is broken since we changed in YARN-1493 to start the attempt after the application is Accepted. We may need to introduce an attempt state report that client can rely on to query the attempt state and choose to launch the unmanaged AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1570">YARN-1570</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Formatting the lines within 80 chars in YarnCommands.apt.vm</b><br>
+     <blockquote>In YarnCommands.apt.vm, there are some lines longer than 80 characters.
+For example:
+{code}
+  Yarn commands are invoked by the bin/yarn script. Running the yarn script without any arguments prints the description for all commands.
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1566">YARN-1566</a>.
+     Major sub-task reported by Jian He and fixed by Jian He <br>
+     <b>Change distributed-shell to retain containers from previous AppAttempt</b><br>
+     <blockquote>Change distributed-shell to reuse previous AM's running containers when AM is restarting.  It can also be made configurable whether to enable this feature or not.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1555">YARN-1555</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>[YARN-321] Failing tests in org.apache.hadoop.yarn.server.applicationhistoryservice.*</b><br>
+     <blockquote>Several tests are failing on the latest YARN-321 branch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1553">YARN-1553</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Do not use HttpConfig.isSecure() in YARN</b><br>
+     <blockquote>HDFS-5305 and related jira decide that each individual project will have their own configuration on http policy. {{HttpConfig.isSecure}} is a global static method which does not fit the design anymore. The same functionality should be moved into the YARN code base.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1536">YARN-1536</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Anubhav Dhoot (resourcemanager)<br>
+     <b>Cleanup: Get rid of ResourceManager#get*SecretManager() methods and use the RMContext methods instead</b><br>
+     <blockquote>Both ResourceManager and RMContext have methods to access the secret managers, and it should be safe (cleaner) to get rid of the ResourceManager methods.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1534">YARN-1534</a>.
+     Major sub-task reported by Shinichi Yamashita and fixed by Shinichi Yamashita <br>
+     <b>TestAHSWebApp failed in YARN-321 branch</b><br>
+     <blockquote>I ran the following commands. And I confirmed failure of TestAHSWebApp.
+
+{code}
+[sinchii@hdX YARN-321-test]$ mvn clean test -Dtest=org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.*
+{code}
+
+{code}
+Running org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebServices
+Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.492 sec - in org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebServices
+Running org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebApp
+Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 0.193 sec &lt;&lt;&lt; FAILURE! - in org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebApp
+initializationError(org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebApp)  Time elapsed: 0.016 sec  &lt;&lt;&lt; ERROR!
+java.lang.Exception: Test class should have exactly one public zero-argument constructor
+        at org.junit.runners.BlockJUnit4ClassRunner.validateZeroArgConstructor(BlockJUnit4ClassRunner.java:144)
+        at org.junit.runners.BlockJUnit4ClassRunner.validateConstructor(BlockJUnit4ClassRunner.java:121)
+        at org.junit.runners.BlockJUnit4ClassRunner.collectInitializationErrors(BlockJUnit4ClassRunner.java:101)
+        at org.junit.runners.ParentRunner.validate(ParentRunner.java:344)
+(*snip*)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1531">YARN-1531</a>.
+     Major bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>True up yarn command documentation</b><br>
+     <blockquote>There are some options which are not written to Yarn Command document.
+For example, "yarn rmadmin" command options are as follows:
+{code}
+ Usage: yarn rmadmin
+   -refreshQueues 
+   -refreshNodes 
+   -refreshSuperUserGroupsConfiguration 
+   -refreshUserToGroupsMappings 
+   -refreshAdminAcls 
+   -refreshServiceAcl 
+   -getGroups [username]
+   -help [cmd]
+   -transitionToActive &lt;serviceId&gt;
+   -transitionToStandby &lt;serviceId&gt;
+   -failover [--forcefence] [--forceactive] &lt;serviceId&gt; &lt;serviceId&gt;
+   -getServiceState &lt;serviceId&gt;
+   -checkHealth &lt;serviceId&gt;
+{code}
+But some of the new options such as "-getGroups", "-transitionToActive", and "-transitionToStandby" are not documented.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1528">YARN-1528</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Allow setting auth for ZK connections</b><br>
+     <blockquote>ZK store and embedded election allow setting ZK-acls but not auth information</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1525">YARN-1525</a>.
+     Major sub-task reported by Xuan Gong and fixed by Cindy Li <br>
+     <b>Web UI should redirect to active RM when HA is enabled.</b><br>
+     <blockquote>When failover happens, web UI should redirect to the current active rm.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1521">YARN-1521</a>.
+     Blocker sub-task reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Mark appropriate protocol methods with the idempotent annotation or AtMostOnce annotation</b><br>
+     <blockquote>After YARN-1028, we add the automatically failover into RMProxy. This JIRA is to identify whether we need to add idempotent annotation and which methods can be marked as idempotent.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1512">YARN-1512</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Enhance CS to decouple scheduling from node heartbeats</b><br>
+     <blockquote>Enhance CS to decouple scheduling from node heartbeats; a prototype has improved latency significantly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1493">YARN-1493</a>.
+     Major sub-task reported by Jian He and fixed by Jian He <br>
+     <b>Schedulers don't recognize apps separately from app-attempts</b><br>
+     <blockquote>Today, scheduler is tied to attempt only.
+
+We need to separate app-level handling logic in scheduler. We can add new app-level events to the scheduler and separate the app-level logic out. This is good for work-preserving AM restart, RM restart, and also needed for differentiating app-level metrics and attempt-level metrics.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1490">YARN-1490</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Jian He <br>
+     <b>RM should optionally not kill all containers when an ApplicationMaster exits</b><br>
+     <blockquote>This is needed to enable work-preserving AM restart. Some apps can chose to reconnect with old running containers, some may not want to. This should be an option.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1470">YARN-1470</a>.
+     Major bug reported by Sandy Ryza and fixed by Anubhav Dhoot <br>
+     <b>Add audience annotation to MiniYARNCluster</b><br>
+     <blockquote>We should make it clear whether this is a public interface.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1461">YARN-1461</a>.
+     Major sub-task reported by Karthik Kambatla and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>RM API and RM changes to handle tags for running jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1459">YARN-1459</a>.
+     Major sub-task reported by Karthik Kambatla and fixed by Xuan Gong (resourcemanager)<br>
+     <b>RM services should depend on ConfigurationProvider during startup too</b><br>
+     <blockquote>YARN-1667, YARN-1668, YARN-1669 already changed RM to depend on a configuration provider so as to be able to refresh many configuration files across RM fail-over. The dependency on the configuration-provider by the RM should happen at its boot up time too.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1452">YARN-1452</a>.
+     Major task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Document the usage of the generic application history and the timeline data service</b><br>
+     <blockquote>We need to write a bunch of documents to guide users. such as command line tools, configurations and REST APIs</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1444">YARN-1444</a>.
+     Blocker bug reported by Robert Grandl and fixed by Wangda Tan (client , resourcemanager)<br>
+     <b>RM crashes when node resource request sent without corresponding off-switch request</b><br>
+     <blockquote>I have tried to force reducers to execute on certain nodes. What I did is I changed for reduce tasks, the RMContainerRequestor#addResourceRequest(req.priority, ResourceRequest.ANY, req.capability) to RMContainerRequestor#addResourceRequest(req.priority, HOST_NAME, req.capability). 
+
+However, this change lead to RM crashes when reducers needs to be assigned with the following exception:
+FATAL org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Error in handling event type NODE_UPDATE to the scheduler
+java.lang.NullPointerException
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue.assignContainers(LeafQueue.java:841)
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue.assignContainersToChildQueues(ParentQueue.java:640)
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue.assignContainers(ParentQueue.java:554)
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.nodeUpdate(CapacityScheduler.java:695)
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.handle(CapacityScheduler.java:739)
+    at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.handle(CapacityScheduler.java:86)
+    at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:549)
+    at java.lang.Thread.run(Thread.java:722)
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1428">YARN-1428</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>RM cannot write the final state of RMApp/RMAppAttempt to the application history store in the transition to the final state</b><br>
+     <blockquote>ApplicationFinishData and ApplicationAttemptFinishData are written in the final transitions of RMApp/RMAppAttempt respectively. However, in the transitions, getState() is not getting the state that RMApp/RMAppAttempt is going to enter, but prior one.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1417">YARN-1417</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Jian He <br>
+     <b>RM may issue expired container tokens to AM while issuing new containers.</b><br>
+     <blockquote>Today we create new container token when we create container in RM as a part of schedule cycle. However that container may get reserved or assigned. If the container gets reserved and remains like that (in reserved state) for more than container token expiry interval then RM will end up issuing container with expired token.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1410">YARN-1410</a>.
+     Major sub-task reported by Bikas Saha and fixed by Xuan Gong <br>
+     <b>Handle RM fails over after getApplicationID() and before submitApplication().</b><br>
+     <blockquote>App submission involves
+1) creating appId
+2) using that appId to submit an ApplicationSubmissionContext to the user.
+The client may have obtained an appId from an RM, the RM may have failed over, and the client may submit the app to the new RM.
+Since the new RM has a different notion of cluster timestamp (used to create app id) the new RM may reject the app submission resulting in unexpected failure on the client side.
+
+The same may happen for other 2 step client API operations.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1398">YARN-1398</a>.
+     Blocker bug reported by Sunil G and fixed by Vinod Kumar Vavilapalli (resourcemanager)<br>
+     <b>Deadlock in capacity scheduler leaf queue and parent queue for getQueueInfo and completedContainer call</b><br>
+     <blockquote>getQueueInfo in parentQueue will call  child.getQueueInfo().
+This will try acquire the leaf queue lock over parent queue lock.
+
+Now at same time if a completedContainer call comes and acquired LeafQueue lock and it will wait for ParentQueue's completedConatiner call.
+
+This lock usage is not in synchronous and can lead to deadlock.
+
+With JCarder, this is showing as a potential deadlock scenario.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1389">YARN-1389</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>ApplicationClientProtocol and ApplicationHistoryProtocol should expose analogous APIs</b><br>
+     <blockquote>As we plan to have the APIs in ApplicationHistoryProtocol to expose the reports of *finished* application attempts and containers, we should do the same for ApplicationClientProtocol, which will return the reports of *running* attempts and containers.
+
+Later on, we can improve YarnClient to direct the query of running instance to ApplicationClientProtocol, while that of finished instance to ApplicationHistoryProtocol, making it transparent to the users.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1379">YARN-1379</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>[YARN-321] AHS protocols need to be in yarn proto package name after YARN-1170</b><br>
+     <blockquote>Found this while merging YARN-321 to the latest branch-2. Without this, compilation fails.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1345">YARN-1345</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Removing FINAL_SAVING from YarnApplicationAttemptState</b><br>
+     <blockquote>Whenever YARN-891 is done, we need to add the mapping of RMAppAttemptState.FINAL_SAVING -&gt; YarnApplicationAttemptState.FINAL_SAVING in RMServerUtils#createApplicationAttemptState</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1301">YARN-1301</a>.
+     Minor bug reported by Zhijie Shen and fixed by Tsuyoshi OZAWA <br>
+     <b>Need to log the blacklist additions/removals when YarnSchedule#allocate</b><br>
+     <blockquote>Now without the log, it's hard to debug whether blacklist is updated on the scheduler side or not</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1285">YARN-1285</a>.
+     Major bug reported by Zhijie Shen and fixed by Kenji Kikushima <br>
+     <b>Inconsistency of default "yarn.acl.enable" value</b><br>
+     <blockquote>In yarn-default.xml, "yarn.acl.enable" is true while in YarnConfiguration, DEFAULT_YARN_ACL_ENABLE is false.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1266">YARN-1266</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>Implement PB service and client wrappers for ApplicationHistoryProtocol</b><br>
+     <blockquote>Adding ApplicationHistoryProtocolPBService to make web apps to work and changing yarn to run AHS as a seprate process</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1242">YARN-1242</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Mayank Bansal <br>
+     <b>Script changes to start AHS as an individual process</b><br>
+     <blockquote>Add the command in yarn and yarn.cmd to start and stop AHS</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1206">YARN-1206</a>.
+     Blocker bug reported by Jian He and fixed by Rohith <br>
+     <b>AM container log link broken on NM web page even though local container logs are available</b><br>
+     <blockquote>With log aggregation disabled, when container is running, its logs link works properly, but after the application is finished, the link shows 'Container does not exist.'</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1191">YARN-1191</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Update artifact versions for application history service</b><br>
+     <blockquote>Compilation is failing for YARN-321 branch
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1171">YARN-1171</a>.
+     Major improvement reported by Sandy Ryza and fixed by Naren Koneru (documentation , scheduler)<br>
+     <b>Add default queue properties to Fair Scheduler documentation </b><br>
+     <blockquote>The Fair Scheduler doc is missing the following properties.
+- defaultMinSharePreemptionTimeout
+- queueMaxAppsDefault</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1166">YARN-1166</a>.
+     Blocker bug reported by Srimanth Gunturi and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>YARN 'appsFailed' metric should be of type 'counter'</b><br>
+     <blockquote>Currently in YARN's queue metrics, the cumulative metric 'appsFailed' is of type 'guage' - which means the exact value will be reported. 
+
+All other cumulative queue metrics (AppsSubmitted, AppsCompleted, AppsKilled) are all of type 'counter' - meaning Ganglia will use slope to provide deltas between time-points.
+
+To be consistent, AppsFailed metric should also be of type 'counter'. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1123">YARN-1123</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Adding ContainerReport and Protobuf implementation</b><br>
+     <blockquote>Like YARN-978, we need some client-oriented class to expose the container history info. Neither Container nor RMContainer is the right one.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1071">YARN-1071</a>.
+     Major bug reported by Srimanth Gunturi and fixed by Jian He (resourcemanager)<br>
+     <b>ResourceManager's decommissioned and lost node count is 0 after restart</b><br>
+     <blockquote>I had 6 nodes in a cluster with 2 NMs stopped. Then I put a host into YARN's {{yarn.resourcemanager.nodes.exclude-path}}. After running {{yarn rmadmin -refreshNodes}}, RM's JMX correctly showed decommissioned node count:
+{noformat}
+"NumActiveNMs" : 3,
+"NumDecommissionedNMs" : 1,
+"NumLostNMs" : 2,
+"NumUnhealthyNMs" : 0,
+"NumRebootedNMs" : 0
+{noformat}
+
+After restarting RM, the counts were shown as below in JMX.
+{noformat}
+"NumActiveNMs" : 3,
+"NumDecommissionedNMs" : 0,
+"NumLostNMs" : 0,
+"NumUnhealthyNMs" : 0,
+"NumRebootedNMs" : 0
+{noformat}
+
+Notice that the lost and decommissioned NM counts are both 0.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1041">YARN-1041</a>.
+     Major sub-task reported by Steve Loughran and fixed by Jian He (resourcemanager)<br>
+     <b>Protocol changes for RM to bind and notify a restarted AM of existing containers</b><br>
+     <blockquote>For long lived containers we don't want the AM to be a SPOF.
+
+When the RM restarts a (failed) AM, it should be given the list of containers it had already been allocated. the AM should then be able to contact the NMs to get details on them. NMs would also need to do any binding of the containers needed to handle a moved/restarted AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1023">YARN-1023</a>.
+     Major sub-task reported by Devaraj K and fixed by Zhijie Shen <br>
+     <b>[YARN-321] Webservices REST API's support for Application History</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1017">YARN-1017</a>.
+     Blocker sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Document RM Restart feature</b><br>
+     <blockquote>This should give users a general idea about how RM Restart works and how to use RM Restart</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1007">YARN-1007</a>.
+     Major sub-task reported by Devaraj K and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Enhance History Reader interface for Containers</b><br>
+     <blockquote>If we want to show the containers used by application/app attempt, We need to have two more API's which returns collection of ContainerHistoryData for application id and applcation attempt id something like below. 
+
+{code:xml}
+  Collection&lt;ContainerHistoryData&gt; getContainers(
+      ApplicationAttemptId appAttemptId);
+
+  Collection&lt;ContainerHistoryData&gt; getContainers(ApplicationId appId);
+{code}
+
+
+{code:xml}
+  /**
+   * This method returns {@link Container} for specified {@link ContainerId}.
+   * 
+   * @param {@link ContainerId}
+   * @return {@link Container} for ContainerId
+   */
+  ContainerHistoryData getAMContainer(ContainerId containerId);
+{code}
+In the above API, we need to change the argument to application attempt id or we can remove this API because every attempt history data has master container id field, using master container id, history data can get using this below API if it takes argument as container id.
+
+
+{code:xml}
+  /**
+   * This method returns {@link ContainerHistoryData} for specified
+   * {@link ApplicationAttemptId}.
+   * 
+   * @param {@link ApplicationAttemptId}
+   * @return {@link ContainerHistoryData} for ApplicationAttemptId
+   */
+  ContainerHistoryData getContainer(ApplicationAttemptId appAttemptId);
+{code}
+Here application attempt can use numbers of containers but we cannot choose which container history data to return. This API argument also need to be changed to take container id instead of app attempt id. 
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-987">YARN-987</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>Adding ApplicationHistoryManager responsible for exposing reports to all clients</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-986">YARN-986</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Karthik Kambatla <br>
+     <b>RM DT token service should have service addresses of both RMs</b><br>
+     <blockquote>Previously: YARN should use cluster-id as token service address
+
+This needs to be done to support non-ip based fail over of RM. Once the server sets the token service address to be this generic ClusterId/ServiceId, clients can translate it to appropriate final IP and then be able to select tokens via TokenSelectors.
+
+Some workarounds for other related issues were put in place at YARN-945.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-984">YARN-984</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K <br>
+     <b>[YARN-321] Move classes from applicationhistoryservice.records.pb.impl package to applicationhistoryservice.records.impl.pb</b><br>
+     <blockquote>While creating instance for applicationhistoryservice.records.* pb records, It is throwing the ClassNotFoundException. 
+
+{code:xml}
+Caused by: java.lang.ClassNotFoundException: Class org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationHistoryDataPBImpl not found
+	at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:1619)
+	at org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl.newRecordInstance(RecordFactoryPBImpl.java:56)
+	... 49 more
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-979">YARN-979</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Add more APIs related to ApplicationAttempt and Container in ApplicationHistoryProtocol</b><br>
+     <blockquote>ApplicationHistoryProtocol should have the following APIs as well:
+
+* getApplicationAttemptReport
+* getApplicationAttempts
+* getContainerReport
+* getContainers
+
+The corresponding request and response classes need to be added as well.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-978">YARN-978</a>.
+     Major sub-task reported by Mayank Bansal and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Adding ApplicationAttemptReport and Protobuf implementation</b><br>
+     <blockquote>We dont have ApplicationAttemptReport and Protobuf implementation.
+
+Adding that.
+
+Thanks,
+Mayank</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-975">YARN-975</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Add a file-system implementation for history-storage</b><br>
+     <blockquote>HDFS implementation should be a standard persistence strategy of history storage</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-974">YARN-974</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>RMContainer should collect more useful information to be recorded in Application-History</b><br>
+     <blockquote>To record the history of a container, users may be also interested in the following information:
+
+1. Start Time
+2. Stop Time
+3. Diagnostic Information
+4. URL to the Log File
+5. Actually Allocated Resource
+6. Actually Assigned Node
+
+These should be remembered during the RMContainer's life cycle.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-967">YARN-967</a>.
+     Major sub-task reported by Devaraj K and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Command Line Interface(CLI) for Reading Application History Storage Data</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-962">YARN-962</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Update application_history_service.proto</b><br>
+     <blockquote>1. Change it's name to application_history_client.proto
+2. Fix the incorrect proto reference.
+3. Correct the dir in pom.xml</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-956">YARN-956</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>[YARN-321] Add a testable in-memory HistoryStorage </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-955">YARN-955</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Mayank Bansal <br>
+     <b>[YARN-321] Implementation of ApplicationHistoryProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-954">YARN-954</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>[YARN-321] History Service should create the webUI and wire it to HistoryStorage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-953">YARN-953</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>[YARN-321] Enable ResourceManager to write history data</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-947">YARN-947</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Defining the history data classes for the implementation of the reading/writing interface</b><br>
+     <blockquote>We need to define the history data classes have the exact fields to be stored. Therefore, all the implementations don't need to have the duplicate logic to exact the required information from RMApp, RMAppAttempt and RMContainer.
+
+We use protobuf to define these classes, such that they can be ser/des to/from bytes, which are easier for persistence.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-935">YARN-935</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>YARN-321 branch is broken due to applicationhistoryserver module's pom.xml</b><br>
+     <blockquote>The branch was created from branch-2, hadoop-yarn-server-applicationhistoryserver/pom.xml should use 2.2.0-SNAPSHOT, not 3.0.0-SNAPSHOT. Otherwise, the sub-project cannot be built correctly because of wrong dependency.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-934">YARN-934</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>HistoryStorage writer interface for Application History Server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-930">YARN-930</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Bootstrap ApplicationHistoryService module</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-713">YARN-713</a>.
+     Critical bug reported by Jason Lowe and fixed by Jian He (resourcemanager)<br>
+     <b>ResourceManager can exit unexpectedly if DNS is unavailable</b><br>
+     <blockquote>As discussed in MAPREDUCE-5261, there's a possibility that a DNS outage could lead to an unhandled exception in the ResourceManager's AsyncDispatcher, and that ultimately would cause the RM to exit.  The RM should not exit during DNS hiccups.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5813">MAPREDUCE-5813</a>.
+     Blocker bug reported by Gera Shegalov and fixed by Gera Shegalov (mrv2 , task)<br>
+     <b>YarnChild does not load job.xml with mapreduce.job.classloader=true </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5810">MAPREDUCE-5810</a>.
+     Major bug reported by Mit Desai and fixed by Akira AJISAKA (contrib/streaming)<br>
+     <b>TestStreamingTaskLog#testStreamingTaskLogWithHadoopCmd is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5806">MAPREDUCE-5806</a>.
+     Major bug reported by Eugene Koifman and fixed by Varun Vasudev <br>
+     <b>Log4j settings in container-log4j.properties cannot be overridden </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5805">MAPREDUCE-5805</a>.
+     Major bug reported by Fengdong Yu and fixed by Akira AJISAKA (jobhistoryserver)<br>
+     <b>Unable to parse launch time from job history file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5795">MAPREDUCE-5795</a>.
+     Major bug reported by Yesha Vora and fixed by Xuan Gong <br>
+     <b>Job should be marked as Failed if it is recovered from commit.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5794">MAPREDUCE-5794</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>SliveMapper always uses default FileSystem.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5791">MAPREDUCE-5791</a>.
+     Major bug reported by Nikola Vujic and fixed by Nikola Vujic (client)<br>
+     <b>Shuffle phase is slow in Windows - FadviseFileRegion::transferTo does not read disks efficiently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5789">MAPREDUCE-5789</a>.
+     Major bug reported by Rushabh S Shah and fixed by Rushabh S Shah (jobhistoryserver , webapps)<br>
+     <b>Average Reduce time is incorrect on Job Overview page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5787">MAPREDUCE-5787</a>.
+     Critical sub-task reported by Rajesh Balamohan and fixed by Rajesh Balamohan (nodemanager)<br>
+     <b>Modify ShuffleHandler to support Keep-Alive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5780">MAPREDUCE-5780</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>SliveTest always uses default FileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5778">MAPREDUCE-5778</a>.
+     Major bug reported by Jason Lowe and fixed by Akira AJISAKA (jobhistoryserver)<br>
+     <b>JobSummary does not escape newlines in the job name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5773">MAPREDUCE-5773</a>.
+     Blocker improvement reported by Gera Shegalov and fixed by Gera Shegalov (mr-am)<br>
+     <b>Provide dedicated MRAppMaster syslog length limit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5770">MAPREDUCE-5770</a>.
+     Major bug reported by Yesha Vora and fixed by Jian He <br>
+     <b>Redirection from AM-URL is broken with HTTPS_ONLY policy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5769">MAPREDUCE-5769</a>.
+     Major bug reported by Rohith and fixed by Rohith <br>
+     <b>Unregistration to RM should not be called if AM is crashed before registering with RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5768">MAPREDUCE-5768</a>.
+     Major bug reported by Zhijie Shen and fixed by Gera Shegalov <br>
+     <b>TestMRJobs.testContainerRollingLog fails on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5766">MAPREDUCE-5766</a>.
+     Minor bug reported by Ramya Sunil and fixed by Jian He (applicationmaster)<br>
+     <b>Ping messages from attempts should be moved to DEBUG</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5761">MAPREDUCE-5761</a>.
+     Trivial improvement reported by Yesha Vora and fixed by Jian He <br>
+     <b>Add a log message like "encrypted shuffle is ON" in nodemanager logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5757">MAPREDUCE-5757</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (client)<br>
+     <b>ConcurrentModificationException in JobControl.toList</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5754">MAPREDUCE-5754</a>.
+     Major improvement reported by Gera Shegalov and fixed by Gera Shegalov (jobhistoryserver , mr-am)<br>
+     <b>Preserve Job diagnostics in history</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5751">MAPREDUCE-5751</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee <br>
+     <b>MR app master fails to start in some cases if mapreduce.job.classloader is true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5746">MAPREDUCE-5746</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (jobhistoryserver)<br>
+     <b>Job diagnostics can implicate wrong task for a failed job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5732">MAPREDUCE-5732</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>Report proper queue when job has been automatically placed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5699">MAPREDUCE-5699</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla (applicationmaster)<br>
+     <b>Allow setting tags on MR jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5688">MAPREDUCE-5688</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestStagingCleanup fails intermittently with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5670">MAPREDUCE-5670</a>.
+     Minor bug reported by Jason Lowe and fixed by Chen He (mrv2)<br>
+     <b>CombineFileRecordReader should report progress when moving to the next file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5570">MAPREDUCE-5570</a>.
+     Major bug reported by Jason Lowe and fixed by Rushabh S Shah (mr-am , mrv2)<br>
+     <b>Map task attempt with fetch failure has incorrect attempt finish time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5553">MAPREDUCE-5553</a>.
+     Minor improvement reported by Paul Han and fixed by Paul Han (applicationmaster)<br>
+     <b>Add task state filters on Application/MRJob page for MR Application master </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5028">MAPREDUCE-5028</a>.
+     Critical bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Maps fail when io.sort.mb is set to high value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4052">MAPREDUCE-4052</a>.
+     Major bug reported by xieguiming and fixed by Jian He (job submission)<br>
+     <b>Windows eclipse cannot submit job from Windows client to Linux/Unix Hadoop cluster.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2349">MAPREDUCE-2349</a>.
+     Major improvement reported by Joydeep Sen Sarma and fixed by Siddharth Seth (task)<br>
+     <b>speed up list[located]status calls from input formats</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6166">HDFS-6166</a>.
+     Blocker bug reported by Nathan Roberts and fixed by Nathan Roberts (balancer)<br>
+     <b>revisit balancer so_timeout </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6163">HDFS-6163</a>.
+     Minor bug reported by Fengdong Yu and fixed by Fengdong Yu (documentation)<br>
+     <b>Fix a minor bug in the HA upgrade document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6157">HDFS-6157</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Fix the entry point of OfflineImageViewer for hdfs.cmd</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6150">HDFS-6150</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (namenode)<br>
+     <b>Add inode id information in the logs to make debugging easier</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6140">HDFS-6140</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHDFS cannot create a file with spaces in the name after HA failover changes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6138">HDFS-6138</a>.
+     Minor improvement reported by Sanjay Radia and fixed by Sanjay Radia (documentation)<br>
+     <b>User Guide for how to use viewfs with federation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6135">HDFS-6135</a>.
+     Blocker bug reported by Jing Zhao and fixed by Jing Zhao (journal-node)<br>
+     <b>In HDFS upgrade with HA setup, JournalNode cannot handle layout version bump when rolling back</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6131">HDFS-6131</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (documentation)<br>
+     <b>Move HDFSHighAvailabilityWithNFS.apt.vm and HDFSHighAvailabilityWithQJM.apt.vm from Yarn to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6130">HDFS-6130</a>.
+     Blocker bug reported by Fengdong Yu and fixed by Haohui Mai (namenode)<br>
+     <b>NPE when upgrading namenode from fsimages older than -32</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6129">HDFS-6129</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode)<br>
+     <b>When a replica is not found for deletion, do not throw exception.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6127">HDFS-6127</a>.
+     Major bug reported by Arpit Gupta and fixed by Haohui Mai (ha)<br>
+     <b>WebHDFS tokens cannot be renewed in HA setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6124">HDFS-6124</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Add final modifier to class members</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6123">HDFS-6123</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode)<br>
+     <b>Improve datanode error messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6120">HDFS-6120</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Fix and improve safe mode log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6117">HDFS-6117</a>.
+     Minor bug reported by Suresh Srinivas and fixed by Suresh Srinivas (namenode)<br>
+     <b>Print file path information in FileNotFoundException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6115">HDFS-6115</a>.
+     Minor bug reported by Vinayakumar B and fixed by Vinayakumar B (datanode)<br>
+     <b>flush() should be called for every append on block scan verification log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6107">HDFS-6107</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode)<br>
+     <b>When a block can't be cached due to limited space on the DataNode, that block becomes uncacheable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6106">HDFS-6106</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Reduce default for dfs.namenode.path.based.cache.refresh.interval.ms</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6105">HDFS-6105</a>.
+     Major bug reported by Kihwal Lee and fixed by Haohui Mai <br>
+     <b>NN web UI for DN list loads the same jmx page multiple times.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6102">HDFS-6102</a>.
+     Blocker bug reported by Andrew Wang and fixed by Andrew Wang (namenode)<br>
+     <b>Lower the default maximum items per directory to fix PB fsimage loading</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6100">HDFS-6100</a>.
+     Major bug reported by Arpit Gupta and fixed by Haohui Mai (ha)<br>
+     <b>DataNodeWebHdfsMethods does not failover in HA mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6099">HDFS-6099</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>HDFS file system limits not enforced on renames.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6097">HDFS-6097</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>zero-copy reads are incorrectly disabled on file offsets above 2GB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6096">HDFS-6096</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestWebHdfsTokens may timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6094">HDFS-6094</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>The same block can be counted twice towards safe mode threshold</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6090">HDFS-6090</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (test)<br>
+     <b>Use MiniDFSCluster.Builder instead of deprecated constructors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6089">HDFS-6089</a>.
+     Major bug reported by Arpit Gupta and fixed by Jing Zhao (ha)<br>
+     <b>Standby NN while transitioning to active throws a connection refused error when the prior active NN process is suspended</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6086">HDFS-6086</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode)<br>
+     <b>Fix a case where zero-copy or no-checksum reads were not allowed even when the block was cached</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6085">HDFS-6085</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (namenode)<br>
+     <b>Improve CacheReplicationMonitor log messages a bit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6084">HDFS-6084</a>.
+     Minor improvement reported by Travis Thompson and fixed by Travis Thompson (namenode)<br>
+     <b>Namenode UI - "Hadoop" logo link shouldn't go to hadoop homepage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6080">HDFS-6080</a>.
+     Major improvement reported by Abin Shahab and fixed by Abin Shahab (nfs , performance)<br>
+     <b>Improve NFS gateway performance by making rtmax and wtmax configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6079">HDFS-6079</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang (hdfs-client)<br>
+     <b>Timeout for getFileBlockStorageLocations does not work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6078">HDFS-6078</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>TestIncrementalBlockReports is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6077">HDFS-6077</a>.
+     Major bug reported by Arpit Gupta and fixed by Jing Zhao <br>
+     <b>running slive with webhdfs on secure HA cluster fails with unkown host exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6076">HDFS-6076</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode , test)<br>
+     <b>SimulatedDataSet should not create DatanodeRegistration with namenode layout version and type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6072">HDFS-6072</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Clean up dead code of FSImage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6071">HDFS-6071</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>BlockReaderLocal doesn't return -1 on EOF when doing a zero-length read on a short file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6070">HDFS-6070</a>.
+     Trivial improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Cleanup use of ReadStatistics in DFSInputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6069">HDFS-6069</a>.
+     Trivial improvement reported by Andrew Wang and fixed by Chris Nauroth (namenode)<br>
+     <b>Quash stack traces when ACLs are disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6068">HDFS-6068</a>.
+     Major bug reported by Andrew Wang and fixed by sathish (snapshots)<br>
+     <b>Disallow snapshot names that are also invalid directory names</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6067">HDFS-6067</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>TestPread.testMaxOutHedgedReadPool is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6065">HDFS-6065</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>HDFS zero-copy reads should return null on EOF when doing ZCR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6064">HDFS-6064</a>.
+     Minor bug reported by Vinayakumar B and fixed by Vinayakumar B (datanode)<br>
+     <b>DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT is not updated with latest block report interval of 6 hrs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6063">HDFS-6063</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Chris Nauroth (test , tools)<br>
+     <b>TestAclCLI fails intermittently when running test 24: copyFromLocal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6062">HDFS-6062</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>TestRetryCacheWithHA#testConcat is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6061">HDFS-6061</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode)<br>
+     <b>Allow dfs.datanode.shared.file.descriptor.path to contain multiple entries and fall back when needed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6060">HDFS-6060</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (namenode)<br>
+     <b>NameNode should not check DataNode layout version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6059">HDFS-6059</a>.
+     Major bug reported by Akira AJISAKA and fixed by Akira AJISAKA <br>
+     <b>TestBlockReaderLocal fails if native library is not available</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6058">HDFS-6058</a>.
+     Major bug reported by Vinayakumar B and fixed by Haohui Mai <br>
+     <b>Fix TestHDFSCLI failures after HADOOP-8691 change</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6057">HDFS-6057</a>.
+     Blocker bug reported by Eric Sirianni and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>DomainSocketWatcher.watcherThread should be marked as daemon thread</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6055">HDFS-6055</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Chris Nauroth (namenode)<br>
+     <b>Change default configuration to limit file name length in HDFS</b><br>
+     <blockquote>The default configuration of HDFS now sets dfs.namenode.fs-limits.max-component-length to 255 for improved interoperability with other file system implementations.  This limits each component of a file system path to a maximum of 255 bytes in UTF-8 encoding.  Attempts to create new files that violate this rule will fail with an error.  Existing files that violate the rule are not effected.  Previously, dfs.namenode.fs-limits.max-component-length was set to 0 (ignored).  If necessary, it is possible to set the value back to 0 in the cluster's configuration to restore the old behavior.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6053">HDFS-6053</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Fix TestDecommissioningStatus and TestDecommission in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6051">HDFS-6051</a>.
+     Blocker bug reported by Chris Nauroth and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>HDFS cannot run on Windows since short-circuit shared memory segment changes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6047">HDFS-6047</a>.
+     Major bug reported by stack and fixed by stack <br>
+     <b>TestPread NPE inside in DFSInputStream hedgedFetchBlockByteRange</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6046">HDFS-6046</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>add dfs.client.mmap.enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6044">HDFS-6044</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add property for setting the NFS look up time for users</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6043">HDFS-6043</a>.
+     Major improvement reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Give HDFS daemons NFS3 and Portmap their own OPTS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6040">HDFS-6040</a>.
+     Blocker sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>fix DFSClient issue without libhadoop.so and some other ShortCircuitShm cleanups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6039">HDFS-6039</a>.
+     Major bug reported by Yesha Vora and fixed by Chris Nauroth (namenode)<br>
+     <b>Uploading a File under a Dir with default acls throws "Duplicated ACLFeature"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6038">HDFS-6038</a>.
+     Major sub-task reported by Haohui Mai and fixed by Jing Zhao (journal-node , namenode)<br>
+     <b>Allow JournalNode to handle editlog produced by new release with future layoutversion</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6033">HDFS-6033</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (caching)<br>
+     <b>PBImageXmlWriter incorrectly handles processing cache directives</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6030">HDFS-6030</a>.
+     Trivial task reported by Yongjun Zhang and fixed by Yongjun Zhang <br>
+     <b>Remove an unused constructor in INode.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6028">HDFS-6028</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Print clearer error message when user attempts to delete required mask entry from ACL.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6025">HDFS-6025</a>.
+     Minor task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (build)<br>
+     <b>Update findbugsExcludeFile.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6018">HDFS-6018</a>.
+     Trivial improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Exception recorded in LOG when IPCLoggerChannel#close is called</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6008">HDFS-6008</a>.
+     Minor bug reported by Benoy Antony and fixed by Benoy Antony (namenode)<br>
+     <b>Namenode dead node link is giving HTTP error 500</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6006">HDFS-6006</a>.
+     Trivial improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (namenode)<br>
+     <b>Remove duplicate code in FSNameSystem#getFileInfo</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5988">HDFS-5988</a>.
+     Blocker bug reported by Andrew Wang and fixed by Andrew Wang (namenode)<br>
+     <b>Bad fsimage always generated after upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5986">HDFS-5986</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Chris Nauroth (namenode)<br>
+     <b>Capture the number of blocks pending deletion on namenode webUI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5982">HDFS-5982</a>.
+     Critical bug reported by Tassapol Athiapinya and fixed by Jing Zhao (namenode)<br>
+     <b>Need to update snapshot manager when applying editlog for deleting a snapshottable directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5981">HDFS-5981</a>.
+     Minor bug reported by Haohui Mai and fixed by Haohui Mai (tools)<br>
+     <b>PBImageXmlWriter generates malformed XML</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5979">HDFS-5979</a>.
+     Minor improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Typo and logger fix for fsimage PB code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5973">HDFS-5973</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>add DomainSocket#shutdown method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5962">HDFS-5962</a>.
+     Critical bug reported by Kihwal Lee and fixed by Akira AJISAKA <br>
+     <b>Mtime and atime are not persisted for symbolic links</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5961">HDFS-5961</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>OIV cannot load fsimages containing a symbolic link</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5959">HDFS-5959</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA <br>
+     <b>Fix typo at section name in FSImageFormatProtobuf.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5956">HDFS-5956</a>.
+     Major sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (tools)<br>
+     <b>A file size is multiplied by the replication factor in 'hdfs oiv -p FileDistribution' option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5953">HDFS-5953</a>.
+     Major test reported by Ted Yu and fixed by Akira AJISAKA <br>
+     <b>TestBlockReaderFactory fails if libhadoop.so has not been built</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5950">HDFS-5950</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode , hdfs-client)<br>
+     <b>The DFSClient and DataNode should use shared memory segments to communicate short-circuit information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5949">HDFS-5949</a>.
+     Minor bug reported by Travis Thompson and fixed by Travis Thompson (namenode)<br>
+     <b>New Namenode UI when trying to download a file, the browser doesn't know the file name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5948">HDFS-5948</a>.
+     Major bug reported by Andrew Wang and fixed by Haohui Mai <br>
+     <b>TestBackupNode flakes with port in use error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5944">HDFS-5944</a>.
+     Major bug reported by zhaoyunjiong and fixed by zhaoyunjiong (namenode)<br>
+     <b>LeaseManager:findLeaseWithPrefixPath can't handle path like /a/b/ right and cause SecondaryNameNode failed do checkpoint</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5943">HDFS-5943</a>.
+     Major bug reported by Yesha Vora and fixed by Suresh Srinivas <br>
+     <b>'dfs.namenode.https-address.ns1' property is not used in federation setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5942">HDFS-5942</a>.
+     Minor sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation , tools)<br>
+     <b>Fix javadoc in OfflineImageViewer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5941">HDFS-5941</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai (documentation , namenode)<br>
+     <b>add dfs.namenode.secondary.https-address and dfs.namenode.secondary.https-address in hdfs-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5940">HDFS-5940</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>Minor cleanups to ShortCircuitReplica, FsDatasetCache, and DomainSocketWatcher</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5939">HDFS-5939</a>.
+     Major improvement reported by Yongjun Zhang and fixed by Yongjun Zhang (hdfs-client)<br>
+     <b>WebHdfs returns misleading error code and logs nothing if trying to create a file with no DNs in cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5938">HDFS-5938</a>.
+     Trivial sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>Make BlockReaderFactory#BlockReaderPeer a static class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5936">HDFS-5936</a>.
+     Major test reported by Andrew Wang and fixed by Binglin Chang (namenode , test)<br>
+     <b>MiniDFSCluster does not clean data left behind by SecondaryNameNode.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5935">HDFS-5935</a>.
+     Minor improvement reported by Travis Thompson and fixed by Travis Thompson (namenode)<br>
+     <b>New Namenode UI FS browser should throw smarter error messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5934">HDFS-5934</a>.
+     Minor bug reported by Travis Thompson and fixed by Travis Thompson (namenode)<br>
+     <b>New Namenode UI back button doesn't work as expected</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5929">HDFS-5929</a>.
+     Major improvement reported by Siqi Li and fixed by Siqi Li (federation)<br>
+     <b>Add Block pool % usage to HDFS federated nn page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5922">HDFS-5922</a>.
+     Major bug reported by Aaron T. Myers and fixed by Arpit Agarwal (datanode)<br>
+     <b>DN heartbeat thread can get stuck in tight loop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5915">HDFS-5915</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai (namenode)<br>
+     <b>Refactor FSImageFormatProtobuf to simplify cross section reads</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5913">HDFS-5913</a>.
+     Minor bug reported by Ted Yu and fixed by Brandon Li (nfs)<br>
+     <b>Nfs3Utils#getWccAttr() should check attr parameter against null</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5910">HDFS-5910</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Enhance DataTransferProtocol to allow per-connection choice of encryption/plain-text</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5904">HDFS-5904</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestFileStatus fails intermittently on trunk and branch2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5901">HDFS-5901</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (namenode)<br>
+     <b>NameNode new UI doesn't support IE8 and IE9 on windows 7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5900">HDFS-5900</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Andrew Wang (caching)<br>
+     <b>Cannot set cache pool limit of "unlimited" via CacheAdmin</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5898">HDFS-5898</a>.
+     Major sub-task reported by Jing Zhao and fixed by Abin Shahab (nfs)<br>
+     <b>Allow NFS gateway to login/relogin from its kerberos keytab</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5895">HDFS-5895</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Tassapol Athiapinya (tools)<br>
+     <b>HDFS cacheadmin -listPools has exit_code of 1 when the command returns 0 result.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5893">HDFS-5893</a>.
+     Major bug reported by Yesha Vora and fixed by Haohui Mai <br>
+     <b>HftpFileSystem.RangeHeaderUrlOpener uses the default URLConnectionFactory which does not import SSL certificates</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5892">HDFS-5892</a>.
+     Minor test reported by Ted Yu and fixed by  <br>
+     <b>TestDeleteBlockPool fails in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5891">HDFS-5891</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai (namenode , webhdfs)<br>
+     <b>webhdfs should not try connecting the DN during redirection</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5886">HDFS-5886</a>.
+     Major bug reported by Ted Yu and fixed by Brandon Li (nfs)<br>
+     <b>Potential null pointer deference in RpcProgramNfs3#readlink()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5882">HDFS-5882</a>.
+     Minor test reported by Jimmy Xiang and fixed by Jimmy Xiang <br>
+     <b>TestAuditLogs is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5881">HDFS-5881</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Fix skip() of the short-circuit local reader (legacy).</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5879">HDFS-5879</a>.
+     Major bug reported by Gera Shegalov and fixed by Gera Shegalov (test)<br>
+     <b>Some TestHftpFileSystem tests do not close streams</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5868">HDFS-5868</a>.
+     Major sub-task reported by Taylor, Buddy and fixed by  (datanode)<br>
+     <b>Make hsync implementation pluggable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5866">HDFS-5866</a>.
+     Major sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (tools)<br>
+     <b>'-maxSize' and '-step' option fail in OfflineImageViewer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5859">HDFS-5859</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode)<br>
+     <b>DataNode#checkBlockToken should check block tokens even if security is not enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5857">HDFS-5857</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestWebHDFS#testNamenodeRestart fails intermittently with NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5856">HDFS-5856</a>.
+     Minor bug reported by Josh Elser and fixed by Josh Elser (datanode)<br>
+     <b>DataNode.checkDiskError might throw NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5847">HDFS-5847</a>.
+     Major sub-task reported by Haohui Mai and fixed by Jing Zhao <br>
+     <b>Consolidate INodeReference into a separate section</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5846">HDFS-5846</a>.
+     Major bug reported by Nikola Vujic and fixed by Nikola Vujic (namenode)<br>
+     <b>Assigning DEFAULT_RACK in resolveNetworkLocation method can break data resiliency</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5843">HDFS-5843</a>.
+     Major bug reported by Laurent Goujon and fixed by Laurent Goujon (datanode)<br>
+     <b>DFSClient.getFileChecksum() throws IOException if checksum is disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5840">HDFS-5840</a>.
+     Blocker bug reported by Aaron T. Myers and fixed by Jing Zhao (ha , journal-node , namenode)<br>
+     <b>Follow-up to HDFS-5138 to improve error handling during partial upgrade failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5828">HDFS-5828</a>.
+     Major bug reported by Taylor, Buddy and fixed by Taylor, Buddy (namenode)<br>
+     <b>BlockPlacementPolicyWithNodeGroup can place multiple replicas on the same node group when dfs.namenode.avoid.write.stale.datanode is true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5821">HDFS-5821</a>.
+     Major bug reported by Gera Shegalov and fixed by Gera Shegalov (test)<br>
+     <b>TestHDFSCLI fails for user names with the dash character</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5810">HDFS-5810</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>Unify mmap cache and short-circuit file descriptor cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5807">HDFS-5807</a>.
+     Major bug reported by Mit Desai and fixed by Chen He (test)<br>
+     <b>TestBalancerWithNodeGroup.testBalancerWithNodeGroup fails intermittently on Branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5804">HDFS-5804</a>.
+     Major sub-task reported by Abin Shahab and fixed by Abin Shahab (nfs)<br>
+     <b>HDFS NFS Gateway fails to mount and proxy when using Kerberos</b><br>
+     <blockquote>Fixes NFS on Kerberized cluster.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5803">HDFS-5803</a>.
+     Major bug reported by Mit Desai and fixed by Chen He <br>
+     <b>TestBalancer.testBalancer0 fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5791">HDFS-5791</a>.
+     Major bug reported by Brandon Li and fixed by Haohui Mai (test)<br>
+     <b>TestHttpsFileSystem should use a random port to avoid binding error during testing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5790">HDFS-5790</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (namenode , performance)<br>
+     <b>LeaseManager.findPath is very slow when many leases need recovery</b><br>
+     <blockquote>Committed to branch-2 and trunk.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5781">HDFS-5781</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Use an array to record the mapping between FSEditLogOpCode and the corresponding byte value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5780">HDFS-5780</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRBWBlockInvalidation times out intemittently on branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5776">HDFS-5776</a>.
+     Major improvement reported by Liang Xie and fixed by Liang Xie (hdfs-client)<br>
+     <b>Support 'hedged' reads in DFSClient</b><br>
+     <blockquote>If a read from a block is slow, start up another parallel, 'hedged' read against a different block replica.  We then take the result of which ever read returns first (the outstanding read is cancelled).  This 'hedged' read feature will help rein in the outliers, the odd read that takes a long time because it hit a bad patch on the disc, etc.
+
+This feature is off by default.  To enable this feature, set &lt;code&gt;dfs.client.hedged.read.threadpool.size&lt;/code&gt; to a positive number.  The threadpool size is how many threads to dedicate to the running of these 'hedged', concurrent reads in your client.
+
+Then set &lt;code&gt;dfs.client.hedged.read.threshold.millis&lt;/code&gt; to the number of milliseconds to wait before starting up a 'hedged' read.  For example, if you set this property to 10, then if a read has not returned within 10 milliseconds, we will start up a new read against a different block replica.
+
+This feature emits new metrics:
+
++ hedgedReadOps
++ hedgeReadOpsWin -- how many times the hedged read 'beat' the original read
++ hedgedReadOpsInCurThread -- how many times we went to do a hedged read but we had to run it in the current thread because dfs.client.hedged.read.threadpool.size was at a maximum.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5775">HDFS-5775</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai (namenode)<br>
+     <b>Consolidate the code for serialization in CacheManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5768">HDFS-5768</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai (namenode)<br>
+     <b>Consolidate the serialization code in DelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5767">HDFS-5767</a>.
+     Blocker bug reported by Yongjun Zhang and fixed by Yongjun Zhang (nfs)<br>
+     <b>NFS implementation assumes userName userId mapping to be unique, which is not true sometimes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5759">HDFS-5759</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Web UI does not show up during the period of loading FSImage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5746">HDFS-5746</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode , hdfs-client)<br>
+     <b>add ShortCircuitSharedMemorySegment</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5742">HDFS-5742</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>DatanodeCluster (mini cluster of DNs) fails to start</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5726">HDFS-5726</a>.
+     Minor sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Fix compilation error in AbstractINodeDiff for JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5716">HDFS-5716</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai (webhdfs)<br>
+     <b>Allow WebHDFS to use pluggable authentication filter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5715">HDFS-5715</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Use Snapshot ID to indicate the corresponding Snapshot for a FileDiff/DirectoryDiff</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5709">HDFS-5709</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang (namenode)<br>
+     <b>Improve NameNode upgrade with existing reserved paths and path components</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5705">HDFS-5705</a>.
+     Major bug reported by Ted Yu and fixed by Ted Yu (datanode)<br>
+     <b>TestSecondaryNameNodeUpgrade#testChangeNsIDFails may fail due to ConcurrentModificationException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5698">HDFS-5698</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai (namenode)<br>
+     <b>Use protobuf to serialize / deserialize FSImage</b><br>
+     <blockquote>Use protobuf to serialize/deserialize the FSImage.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5672">HDFS-5672</a>.
+     Major test reported by Ted Yu and fixed by Jing Zhao (namenode)<br>
+     <b>TestHASafeMode#testSafeBlockTracking fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5647">HDFS-5647</a>.
+     Major sub-task reported by Haohui Mai and fixed by Haohui Mai (namenode)<br>
+     <b>Merge INodeDirectory.Feature and INodeFile.Feature</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5638">HDFS-5638</a>.
+     Major sub-task reported by Chris Nauroth and fixed by Vinayakumar B (hdfs-client)<br>
+     <b>HDFS implementation of FileContext API for ACLs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5632">HDFS-5632</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Add Snapshot feature to INodeDirectory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5626">HDFS-5626</a>.
+     Major bug reported by Stephen Chu and fixed by Colin Patrick McCabe (caching)<br>
+     <b>dfsadmin -report shows incorrect cache values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5554">HDFS-5554</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Add Snapshot Feature to INodeFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5537">HDFS-5537</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode , snapshots)<br>
+     <b>Remove FileWithSnapshot interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5535">HDFS-5535</a>.
+     Major new feature reported by Nathan Roberts and fixed by Tsz Wo Nicholas Sze (datanode , ha , hdfs-client , namenode)<br>
+     <b>Umbrella jira for improved HDFS rolling upgrades</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5531">HDFS-5531</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>Combine the getNsQuota() and getDsQuota() methods in INode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5516">HDFS-5516</a>.
+     Major bug reported by Chris Nauroth and fixed by Miodrag Radulovic (webhdfs)<br>
+     <b>WebHDFS does not require user name when anonymous http requests are disallowed.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5492">HDFS-5492</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Port HDFS-2069 (Incorrect default trash interval in the docs) to trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5483">HDFS-5483</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>NN should gracefully handle multiple block replicas on same DN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5339">HDFS-5339</a>.
+     Major bug reported by Stephen Chu and fixed by Haohui Mai (webhdfs)<br>
+     <b>WebHDFS URI does not accept logical nameservices when security is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5321">HDFS-5321</a>.
+     Major sub-task reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Clean up the HTTP-related configuration in HDFS</b><br>
+     <blockquote>dfs.http.port and dfs.https.port are removed. Filesystem clients, such as WebHdfsFileSystem, now have fixed instead of configurable default ports (i.e., 50070 for http and 50470 for https).
+
+Users can explicitly specify the port in the URI to access the file system which runs on non-default ports.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5318">HDFS-5318</a>.
+     Major improvement reported by Eric Sirianni and fixed by  (namenode)<br>
+     <b>Support read-only and read-write paths to shared replicas</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5286">HDFS-5286</a>.
+     Major sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>Flatten INodeDirectory hierarchy: add DirectoryWithQuotaFeature</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5285">HDFS-5285</a>.
+     Major sub-task reported by Tsz Wo Nicholas Sze and fixed by Jing Zhao (namenode)<br>
+     <b>Flatten INodeFile hierarchy: Add UnderContruction Feature</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5244">HDFS-5244</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (test)<br>
+     <b>TestNNStorageRetentionManager#testPurgeMultipleDirs fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5167">HDFS-5167</a>.
+     Minor sub-task reported by Jing Zhao and fixed by Tsuyoshi OZAWA (ha , namenode)<br>
+     <b>Add metrics about the NameNode retry cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5153">HDFS-5153</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Datanode should send block reports for each storage in a separate message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5138">HDFS-5138</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Aaron T. Myers <br>
+     <b>Support HDFS upgrade in HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5064">HDFS-5064</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , namenode)<br>
+     <b>Standby checkpoints should not block concurrent readers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4911">HDFS-4911</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Reduce PeerCache timeout to be commensurate with dfs.datanode.socket.reuse.keepalive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4858">HDFS-4858</a>.
+     Minor bug reported by Jagane Sundar and fixed by Henry Wang (datanode)<br>
+     <b>HDFS DataNode to NameNode RPC should timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4685">HDFS-4685</a>.
+     Major new feature reported by Sachin Jose and fixed by Chris Nauroth (hdfs-client , namenode , security)<br>
+     <b>Implementation of ACLs in HDFS</b><br>
+     <blockquote>HDFS now supports ACLs (Access Control Lists).  ACLs can specify fine-grained file permissions for specific named users or named groups.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4564">HDFS-4564</a>.
+     Blocker sub-task reported by Daryn Sharp and fixed by Daryn Sharp (webhdfs)<br>
+     <b>Webhdfs returns incorrect http response codes for denied operations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4370">HDFS-4370</a>.
+     Major improvement reported by Konstantin Shvachko and fixed by Chu Tong (datanode)<br>
+     <b>Fix typo Blanacer in DataNode</b><br>
+     <blockquote>I just committed this. Thank you Chu.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4200">HDFS-4200</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Andrew Wang (datanode)<br>
+     <b>Reduce the size of synchronized sections in PacketResponder </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3969">HDFS-3969</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (hdfs-client)<br>
+     <b>Small bug fixes and improvements for disk locations API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3405">HDFS-3405</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Vinayakumar B <br>
+     <b>Checkpointing should use HTTP POST or PUT instead of GET-GET to send merged fsimages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3128">HDFS-3128</a>.
+     Minor bug reported by Eli Collins and fixed by Andrew Wang (test)<br>
+     <b>Unit tests should not use a test root in /tmp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10450">HADOOP-10450</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (io , native)<br>
+     <b>Build zlib native code bindings in hadoop.dll for Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10449">HADOOP-10449</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (security)<br>
+     <b>Fix the javac warnings in the security packages.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10442">HADOOP-10442</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Group look-up can cause segmentation fault when certain JNI-based mapping module is used.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10441">HADOOP-10441</a>.
+     Blocker bug reported by Jing Zhao and fixed by Jing Zhao (metrics)<br>
+     <b>Namenode metric "rpc.RetryCache/NameNodeRetryCache.CacheHit" can't be correctly processed by Ganglia</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10440">HADOOP-10440</a>.
+     Major bug reported by guodongdong and fixed by guodongdong (fs)<br>
+     <b>HarFsInputStream of HarFileSystem, when reading data, computing the position has bug</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10437">HADOOP-10437</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (conf , util)<br>
+     <b>Fix the javac warnings in the conf and the util package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10425">HADOOP-10425</a>.
+     Critical bug reported by Brandon Li and fixed by Tsz Wo Nicholas Sze (fs)<br>
+     <b>Incompatible behavior of LocalFileSystem:getContentSummary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10423">HADOOP-10423</a>.
+     Minor improvement reported by Chris Nauroth and fixed by Chris Nauroth (documentation)<br>
+     <b>Clarify compatibility policy document for combination of new client and old server.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10422">HADOOP-10422</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (ipc)<br>
+     <b>Remove redundant logging of RPC retry attempts.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10407">HADOOP-10407</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (ipc)<br>
+     <b>Fix the javac warnings in the ipc package.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10399">HADOOP-10399</a>.
+     Major sub-task reported by Chris Nauroth and fixed by Vinayakumar B (fs)<br>
+     <b>FileContext API for ACLs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10395">HADOOP-10395</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>TestCallQueueManager is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10394">HADOOP-10394</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>TestAuthenticationFilter is flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10393">HADOOP-10393</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (security)<br>
+     <b>Fix hadoop-auth javac warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10386">HADOOP-10386</a>.
+     Minor improvement reported by Arpit Gupta and fixed by Haohui Mai (ha)<br>
+     <b>Log proxy hostname in various exceptions being thrown in a HA setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10383">HADOOP-10383</a>.
+     Major improvement reported by Enis Soztutar and fixed by Enis Soztutar <br>
+     <b>InterfaceStability annotations should have RetentionPolicy.RUNTIME</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10379">HADOOP-10379</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Protect authentication cookies with the HttpOnly and Secure flags</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10374">HADOOP-10374</a>.
+     Major improvement reported by Enis Soztutar and fixed by Enis Soztutar <br>
+     <b>InterfaceAudience annotations should have RetentionPolicy.RUNTIME</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10368">HADOOP-10368</a>.
+     Minor bug reported by Ted Yu and fixed by Tsuyoshi OZAWA (util)<br>
+     <b>InputStream is not closed in VersionInfo ctor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10355">HADOOP-10355</a>.
+     Major bug reported by Akira AJISAKA and fixed by Haohui Mai <br>
+     <b>TestLoadGenerator#testLoadGenerator fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10353">HADOOP-10353</a>.
+     Major bug reported by Tudor Scurtu and fixed by Tudor Scurtu (fs)<br>
+     <b>FsUrlStreamHandlerFactory is not thread safe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10348">HADOOP-10348</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Deprecate hadoop.ssl.configuration in branch-2, and remove it in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10346">HADOOP-10346</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (security)<br>
+     <b>Deadlock while logging tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10343">HADOOP-10343</a>.
+     Minor improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>Change info to debug log in LossyRetryInvocationHandler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10338">HADOOP-10338</a>.
+     Major bug reported by Andrew Wang and fixed by Colin Patrick McCabe <br>
+     <b>Cannot get the FileStatus of the root inode from the new Globber</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10337">HADOOP-10337</a>.
+     Major bug reported by Liang Xie and fixed by Liang Xie (metrics)<br>
+     <b>ConcurrentModificationException from MetricsDynamicMBeanBase.createMBeanInfo()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10333">HADOOP-10333</a>.
+     Trivial improvement reported by Ren&#233; Nyffenegger and fixed by Ren&#233; Nyffenegger <br>
+     <b>Fix grammatical error in overview.html document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10330">HADOOP-10330</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>TestFrameDecoder fails if it cannot bind port 12345</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10328">HADOOP-10328</a>.
+     Major bug reported by Arpit Gupta and fixed by Haohui Mai (tools)<br>
+     <b>loadGenerator exit code is not reliable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10327">HADOOP-10327</a>.
+     Blocker bug reported by Vinayakumar B and fixed by Vinayakumar B (native)<br>
+     <b>Trunk windows build broken after HDFS-5746</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10326">HADOOP-10326</a>.
+     Major bug reported by Manuel DE FERRAN and fixed by bc Wong (security)<br>
+     <b>M/R jobs can not access S3 if Kerberos is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10320">HADOOP-10320</a>.
+     Trivial bug reported by Ren&#233; Nyffenegger and fixed by Ren&#233; Nyffenegger (documentation)<br>
+     <b>Javadoc in InterfaceStability.java lacks final &lt;/ul&gt;</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10314">HADOOP-10314</a>.
+     Major bug reported by Kihwal Lee and fixed by Rushabh S Shah <br>
+     <b>The ls command help still shows outdated 0.16 format.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10301">HADOOP-10301</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>AuthenticationFilter should return Forbidden for failed authentication</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10295">HADOOP-10295</a>.
+     Major improvement reported by Jing Zhao and fixed by Jing Zhao (tools/distcp)<br>
+     <b>Allow distcp to automatically identify the checksum type of source files and use it for the target</b><br>
+     <blockquote>Add option for distcp to preserve the checksum type of the source files. Users can use "-pc" as distcp command option to preserve the checksum type.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10285">HADOOP-10285</a>.
+     Major sub-task reported by Chris Li and fixed by  <br>
+     <b>Admin interface to swap callqueue at runtime</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10280">HADOOP-10280</a>.
+     Major sub-task reported by Chris Li and fixed by Chris Li <br>
+     <b>Make Schedulables return a configurable identity of user or group</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10278">HADOOP-10278</a>.
+     Major sub-task reported by Chris Li and fixed by Chris Li (ipc)<br>
+     <b>Refactor to make CallQueue pluggable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10249">HADOOP-10249</a>.
+     Major bug reported by Dilli Arumugam and fixed by Dilli Arumugam <br>
+     <b>LdapGroupsMapping should trim ldap password read from file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10221">HADOOP-10221</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Add a plugin to specify SaslProperties for RPC protocol based on connection properties</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10211">HADOOP-10211</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Enable RPC protocol to negotiate SASL-QOP values between clients and servers</b><br>
+     <blockquote>The hadoop.rpc.protection configuration property previously supported specifying a single value: one of authentication, integrity or privacy.  An unrecognized value was silently assumed to mean authentication.  This configuration property now accepts a comma-separated list of any of the 3 values, and unrecognized values are rejected with an error. Existing configurations containing an invalid value must be corrected. If the property is empty or not specified, authentication is assumed. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10191">HADOOP-10191</a>.
+     Blocker bug reported by Gera Shegalov and fixed by Gera Shegalov (viewfs)<br>
+     <b>Missing executable permission on viewfs internal dirs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10184">HADOOP-10184</a>.
+     Major new feature reported by Chris Nauroth and fixed by Chris Nauroth (fs , security)<br>
+     <b>Hadoop Common changes required to support HDFS ACLs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10139">HADOOP-10139</a>.
+     Major improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Update and improve the Single Cluster Setup document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10085">HADOOP-10085</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Steve Loughran <br>
+     <b>CompositeService should allow adding services while being inited</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10070">HADOOP-10070</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (security)<br>
+     <b>RPC client doesn't use per-connection conf to determine server's expected Kerberos principal name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10015">HADOOP-10015</a>.
+     Minor bug reported by Haohui Mai and fixed by Nicolas Liochon (security)<br>
+     <b>UserGroupInformation prints out excessive ERROR warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9525">HADOOP-9525</a>.
+     Major test reported by Ivan Mitic and fixed by Ivan Mitic (test , util)<br>
+     <b>Add tests that validate winutils chmod behavior on folders</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9454">HADOOP-9454</a>.
+     Major improvement reported by Jordan Mendelson and fixed by Akira AJISAKA (fs/s3)<br>
+     <b>Support multipart uploads for s3native</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8691">HADOOP-8691</a>.
+     Minor improvement reported by Jason Lowe and fixed by Daryn Sharp (fs)<br>
+     <b>FsShell can print "Found xxx items" unnecessarily often</b><br>
+     <blockquote>The `ls` command only prints "Found foo items" once when listing the directories recursively.</blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.3.0 Release Notes</title>
 <STYLE type="text/css">
 	H1 {font-family: sans-serif}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -666,9 +666,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
      }
 
      this.updatingResource = new HashMap<String, String[]>(other.updatingResource);
+     this.finalParameters = new HashSet<String>(other.finalParameters);
    }
    
-    this.finalParameters = new HashSet<String>(other.finalParameters);
     synchronized(Configuration.class) {
       REGISTRY.put(this, null);
     }

+ 223 - 132
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -18,18 +18,22 @@
 
 package org.apache.hadoop.crypto.key;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-
+import org.apache.hadoop.fs.permission.FsPermission;
 import javax.crypto.spec.SecretKeySpec;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.net.URI;
+import java.net.URL;
 import java.security.Key;
 import java.security.KeyStore;
 import java.security.KeyStoreException;
@@ -42,16 +46,30 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * KeyProvider based on Java's KeyStore file format. The file may be stored in
  * any Hadoop FileSystem using the following name mangling:
  *  jks://hdfs@nn1.example.com/my/keys.jks -> hdfs://nn1.example.com/my/keys.jks
  *  jks://file/home/owen/keys.jks -> file:///home/owen/keys.jks
- *
- * The password for the keystore is taken from the HADOOP_KEYSTORE_PASSWORD
- * environment variable with a default of 'none'.
- *
+ * <p/>
+ * If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is set,
+ * its value is used as the password for the keystore.
+ * <p/>
+ * If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is not set,
+ * the password for the keystore is read from file specified in the
+ * {@link #KEYSTORE_PASSWORD_FILE_KEY} configuration property. The password file
+ * is looked up in Hadoop's configuration directory via the classpath.
+ * <p/>
+ * <b>NOTE:</b> Make sure the password in the password file does not have an
+ * ENTER at the end, else it won't be valid for the Java KeyStore.
+ * <p/>
+ * If the environment variable, nor the property are not set, the password used
+ * is 'none'.
+ * <p/>
  * It is expected for encrypted InputFormats and OutputFormats to copy the keys
  * from the original provider into the job's Credentials object, which is
  * accessed via the UserProvider. Therefore, this provider won't be used by
@@ -61,16 +79,23 @@ import java.util.Map;
 public class JavaKeyStoreProvider extends KeyProvider {
   private static final String KEY_METADATA = "KeyMetadata";
   public static final String SCHEME_NAME = "jceks";
-  public static final String KEYSTORE_PASSWORD_NAME =
+
+  public static final String KEYSTORE_PASSWORD_FILE_KEY =
+      "hadoop.security.keystore.java-keystore-provider.password-file";
+
+  public static final String KEYSTORE_PASSWORD_ENV_VAR =
       "HADOOP_KEYSTORE_PASSWORD";
-  public static final String KEYSTORE_PASSWORD_DEFAULT = "none";
+  public static final char[] KEYSTORE_PASSWORD_DEFAULT = "none".toCharArray();
 
   private final URI uri;
   private final Path path;
   private final FileSystem fs;
+  private final FsPermission permissions;
   private final KeyStore keyStore;
-  private final char[] password;
+  private char[] password;
   private boolean changed = false;
+  private Lock readLock;
+  private Lock writeLock;
 
   private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
 
@@ -78,17 +103,40 @@ public class JavaKeyStoreProvider extends KeyProvider {
     this.uri = uri;
     path = unnestUri(uri);
     fs = path.getFileSystem(conf);
-    // Get the password from the user's environment
-    String pw = System.getenv(KEYSTORE_PASSWORD_NAME);
-    if (pw == null) {
-      pw = KEYSTORE_PASSWORD_DEFAULT;
+    // Get the password file from the conf, if not present from the user's
+    // environment var
+    if (System.getenv().containsKey(KEYSTORE_PASSWORD_ENV_VAR)) {
+      password = System.getenv(KEYSTORE_PASSWORD_ENV_VAR).toCharArray();
+    }
+    if (password == null) {
+      String pwFile = conf.get(KEYSTORE_PASSWORD_FILE_KEY);
+      if (pwFile != null) {
+        ClassLoader cl = Thread.currentThread().getContextClassLoader();
+        URL pwdFile = cl.getResource(pwFile);
+        if (pwdFile != null) {
+          InputStream is = pwdFile.openStream();
+          try {
+            password = IOUtils.toCharArray(is);
+          } finally {
+            is.close();
+          }
+        }
+      }
+    }
+    if (password == null) {
+      password = KEYSTORE_PASSWORD_DEFAULT;
     }
-    password = pw.toCharArray();
     try {
       keyStore = KeyStore.getInstance(SCHEME_NAME);
       if (fs.exists(path)) {
+        // save off permissions in case we need to
+        // rewrite the keystore in flush()
+        FileStatus s = fs.getFileStatus(path);
+        permissions = s.getPermission();
+
         keyStore.load(fs.open(path), password);
       } else {
+        permissions = new FsPermission("700");
         // required to create an empty keystore. *sigh*
         keyStore.load(null, password);
       }
@@ -99,138 +147,171 @@ public class JavaKeyStoreProvider extends KeyProvider {
     } catch (CertificateException e) {
       throw new IOException("Can't load keystore " + path, e);
     }
+    ReadWriteLock lock = new ReentrantReadWriteLock(true);
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
   }
 
   @Override
   public KeyVersion getKeyVersion(String versionName) throws IOException {
-    SecretKeySpec key = null;
+    readLock.lock();
     try {
-      if (!keyStore.containsAlias(versionName)) {
-        return null;
+      SecretKeySpec key = null;
+      try {
+        if (!keyStore.containsAlias(versionName)) {
+          return null;
+        }
+        key = (SecretKeySpec) keyStore.getKey(versionName, password);
+      } catch (KeyStoreException e) {
+        throw new IOException("Can't get key " + versionName + " from " +
+                              path, e);
+      } catch (NoSuchAlgorithmException e) {
+        throw new IOException("Can't get algorithm for key " + key + " from " +
+                              path, e);
+      } catch (UnrecoverableKeyException e) {
+        throw new IOException("Can't recover key " + key + " from " + path, e);
       }
-      key = (SecretKeySpec) keyStore.getKey(versionName, password);
-    } catch (KeyStoreException e) {
-      throw new IOException("Can't get key " + versionName + " from " +
-                            path, e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IOException("Can't get algorithm for key " + key + " from " +
-                            path, e);
-    } catch (UnrecoverableKeyException e) {
-      throw new IOException("Can't recover key " + key + " from " + path, e);
+      return new KeyVersion(versionName, key.getEncoded());
+    } finally {
+      readLock.unlock();
     }
-    return new KeyVersion(versionName, key.getEncoded());
   }
 
   @Override
   public List<String> getKeys() throws IOException {
-    ArrayList<String> list = new ArrayList<String>();
-    String alias = null;
+    readLock.lock();
     try {
-      Enumeration<String> e = keyStore.aliases();
-      while (e.hasMoreElements()) {
-         alias = e.nextElement();
-         // only include the metadata key names in the list of names
-         if (!alias.contains("@")) {
-             list.add(alias);
-         }
+      ArrayList<String> list = new ArrayList<String>();
+      String alias = null;
+      try {
+        Enumeration<String> e = keyStore.aliases();
+        while (e.hasMoreElements()) {
+           alias = e.nextElement();
+           // only include the metadata key names in the list of names
+           if (!alias.contains("@")) {
+               list.add(alias);
+           }
+        }
+      } catch (KeyStoreException e) {
+        throw new IOException("Can't get key " + alias + " from " + path, e);
       }
-    } catch (KeyStoreException e) {
-      throw new IOException("Can't get key " + alias + " from " + path, e);
+      return list;
+    } finally {
+      readLock.unlock();
     }
-    return list;
   }
 
   @Override
   public List<KeyVersion> getKeyVersions(String name) throws IOException {
-    List<KeyVersion> list = new ArrayList<KeyVersion>();
-    Metadata km = getMetadata(name);
-    if (km != null) {
-      int latestVersion = km.getVersions();
-      KeyVersion v = null;
-      String versionName = null;
-      for (int i = 0; i < latestVersion; i++) {
-        versionName = buildVersionName(name, i);
-        v = getKeyVersion(versionName);
-        if (v != null) {
-          list.add(v);
+    readLock.lock();
+    try {
+      List<KeyVersion> list = new ArrayList<KeyVersion>();
+      Metadata km = getMetadata(name);
+      if (km != null) {
+        int latestVersion = km.getVersions();
+        KeyVersion v = null;
+        String versionName = null;
+        for (int i = 0; i < latestVersion; i++) {
+          versionName = buildVersionName(name, i);
+          v = getKeyVersion(versionName);
+          if (v != null) {
+            list.add(v);
+          }
         }
       }
+      return list;
+    } finally {
+      readLock.unlock();
     }
-    return list;
   }
 
   @Override
   public Metadata getMetadata(String name) throws IOException {
-    if (cache.containsKey(name)) {
-      return cache.get(name);
-    }
+    readLock.lock();
     try {
-      if (!keyStore.containsAlias(name)) {
-        return null;
+      if (cache.containsKey(name)) {
+        return cache.get(name);
       }
-      Metadata meta = ((KeyMetadata) keyStore.getKey(name, password)).metadata;
-      cache.put(name, meta);
-      return meta;
-    } catch (KeyStoreException e) {
-      throw new IOException("Can't get metadata for " + name +
-          " from keystore " + path, e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IOException("Can't get algorithm for " + name +
-          " from keystore " + path, e);
-    } catch (UnrecoverableKeyException e) {
-      throw new IOException("Can't recover key for " + name +
-          " from keystore " + path, e);
+      try {
+        if (!keyStore.containsAlias(name)) {
+          return null;
+        }
+        Metadata meta = ((KeyMetadata) keyStore.getKey(name, password)).metadata;
+        cache.put(name, meta);
+        return meta;
+      } catch (KeyStoreException e) {
+        throw new IOException("Can't get metadata for " + name +
+            " from keystore " + path, e);
+      } catch (NoSuchAlgorithmException e) {
+        throw new IOException("Can't get algorithm for " + name +
+            " from keystore " + path, e);
+      } catch (UnrecoverableKeyException e) {
+        throw new IOException("Can't recover key for " + name +
+            " from keystore " + path, e);
+      }
+    } finally {
+      readLock.unlock();
     }
   }
 
   @Override
   public KeyVersion createKey(String name, byte[] material,
                                Options options) throws IOException {
+    writeLock.lock();
     try {
-      if (keyStore.containsAlias(name) || cache.containsKey(name)) {
-        throw new IOException("Key " + name + " already exists in " + this);
+      try {
+        if (keyStore.containsAlias(name) || cache.containsKey(name)) {
+          throw new IOException("Key " + name + " already exists in " + this);
+        }
+      } catch (KeyStoreException e) {
+        throw new IOException("Problem looking up key " + name + " in " + this,
+            e);
       }
-    } catch (KeyStoreException e) {
-      throw new IOException("Problem looking up key " + name + " in " + this,
-          e);
-    }
-    Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
-        new Date(), 1);
-    if (options.getBitLength() != 8 * material.length) {
-      throw new IOException("Wrong key length. Required " +
-          options.getBitLength() + ", but got " + (8 * material.length));
+      Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
+          options.getDescription(), new Date(), 1);
+      if (options.getBitLength() != 8 * material.length) {
+        throw new IOException("Wrong key length. Required " +
+            options.getBitLength() + ", but got " + (8 * material.length));
+      }
+      cache.put(name, meta);
+      String versionName = buildVersionName(name, 0);
+      return innerSetKeyVersion(versionName, material, meta.getCipher());
+    } finally {
+      writeLock.unlock();
     }
-    cache.put(name, meta);
-    String versionName = buildVersionName(name, 0);
-    return innerSetKeyVersion(versionName, material, meta.getCipher());
   }
 
   @Override
   public void deleteKey(String name) throws IOException {
-    Metadata meta = getMetadata(name);
-    if (meta == null) {
-      throw new IOException("Key " + name + " does not exist in " + this);
-    }
-    for(int v=0; v < meta.getVersions(); ++v) {
-      String versionName = buildVersionName(name, v);
+    writeLock.lock();
+    try {
+      Metadata meta = getMetadata(name);
+      if (meta == null) {
+        throw new IOException("Key " + name + " does not exist in " + this);
+      }
+      for(int v=0; v < meta.getVersions(); ++v) {
+        String versionName = buildVersionName(name, v);
+        try {
+          if (keyStore.containsAlias(versionName)) {
+            keyStore.deleteEntry(versionName);
+          }
+        } catch (KeyStoreException e) {
+          throw new IOException("Problem removing " + versionName + " from " +
+              this, e);
+        }
+      }
       try {
-        if (keyStore.containsAlias(versionName)) {
-          keyStore.deleteEntry(versionName);
+        if (keyStore.containsAlias(name)) {
+          keyStore.deleteEntry(name);
         }
       } catch (KeyStoreException e) {
-        throw new IOException("Problem removing " + versionName + " from " +
-            this, e);
+        throw new IOException("Problem removing " + name + " from " + this, e);
       }
+      cache.remove(name);
+      changed = true;
+    } finally {
+      writeLock.unlock();
     }
-    try {
-      if (keyStore.containsAlias(name)) {
-        keyStore.deleteEntry(name);
-      }
-    } catch (KeyStoreException e) {
-      throw new IOException("Problem removing " + name + " from " + this, e);
-    }
-    cache.remove(name);
-    changed = true;
   }
 
   KeyVersion innerSetKeyVersion(String versionName, byte[] material,
@@ -249,47 +330,57 @@ public class JavaKeyStoreProvider extends KeyProvider {
   @Override
   public KeyVersion rollNewVersion(String name,
                                     byte[] material) throws IOException {
-    Metadata meta = getMetadata(name);
-    if (meta == null) {
-      throw new IOException("Key " + name + " not found");
-    }
-    if (meta.getBitLength() != 8 * material.length) {
-      throw new IOException("Wrong key length. Required " +
-          meta.getBitLength() + ", but got " + (8 * material.length));
+    writeLock.lock();
+    try {
+      Metadata meta = getMetadata(name);
+      if (meta == null) {
+        throw new IOException("Key " + name + " not found");
+      }
+      if (meta.getBitLength() != 8 * material.length) {
+        throw new IOException("Wrong key length. Required " +
+            meta.getBitLength() + ", but got " + (8 * material.length));
+      }
+      int nextVersion = meta.addVersion();
+      String versionName = buildVersionName(name, nextVersion);
+      return innerSetKeyVersion(versionName, material, meta.getCipher());
+    } finally {
+      writeLock.unlock();
     }
-    int nextVersion = meta.addVersion();
-    String versionName = buildVersionName(name, nextVersion);
-    return innerSetKeyVersion(versionName, material, meta.getCipher());
   }
 
   @Override
   public void flush() throws IOException {
-    if (!changed) {
-      return;
-    }
-    // put all of the updates into the keystore
-    for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
+    writeLock.lock();
+    try {
+      if (!changed) {
+        return;
+      }
+      // put all of the updates into the keystore
+      for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
+        try {
+          keyStore.setKeyEntry(entry.getKey(), new KeyMetadata(entry.getValue()),
+              password, null);
+        } catch (KeyStoreException e) {
+          throw new IOException("Can't set metadata key " + entry.getKey(),e );
+        }
+      }
+      // write out the keystore
+      FSDataOutputStream out = FileSystem.create(fs, path, permissions);
       try {
-        keyStore.setKeyEntry(entry.getKey(), new KeyMetadata(entry.getValue()),
-            password, null);
+        keyStore.store(out, password);
       } catch (KeyStoreException e) {
-        throw new IOException("Can't set metadata key " + entry.getKey(),e );
+        throw new IOException("Can't store keystore " + this, e);
+      } catch (NoSuchAlgorithmException e) {
+        throw new IOException("No such algorithm storing keystore " + this, e);
+      } catch (CertificateException e) {
+        throw new IOException("Certificate exception storing keystore " + this,
+            e);
       }
+      out.close();
+      changed = false;
+    } finally {
+      writeLock.unlock();
     }
-    // write out the keystore
-    FSDataOutputStream out = fs.create(path, true);
-    try {
-      keyStore.store(out, password);
-    } catch (KeyStoreException e) {
-      throw new IOException("Can't store keystore " + this, e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IOException("No such algorithm storing keystore " + this, e);
-    } catch (CertificateException e) {
-      throw new IOException("Certificate exception storing keystore " + this,
-          e);
-    }
-    out.close();
-    changed = false;
   }
 
   @Override

+ 126 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -24,8 +24,12 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.net.URI;
+import java.security.NoSuchAlgorithmException;
+import java.text.MessageFormat;
 import java.util.Date;
+import java.util.LinkedHashMap;
 import java.util.List;
+import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
@@ -34,11 +38,15 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 
+import javax.crypto.KeyGenerator;
+
 /**
  * A provider of secret key material for Hadoop applications. Provides an
  * abstraction to separate key storage from users of encryption. It
  * is intended to support getting or storing keys in a variety of ways,
  * including third party bindings.
+ * <P/>
+ * <code>KeyProvider</code> implementations must be thread safe.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
@@ -99,21 +107,34 @@ public abstract class KeyProvider {
     private final static String CIPHER_FIELD = "cipher";
     private final static String BIT_LENGTH_FIELD = "bitLength";
     private final static String CREATED_FIELD = "created";
+    private final static String DESCRIPTION_FIELD = "description";
     private final static String VERSIONS_FIELD = "versions";
 
     private final String cipher;
     private final int bitLength;
+    private final String description;
     private final Date created;
     private int versions;
 
     protected Metadata(String cipher, int bitLength,
-                       Date created, int versions) {
+                       String description, Date created, int versions) {
       this.cipher = cipher;
       this.bitLength = bitLength;
+      this.description = description;
       this.created = created;
       this.versions = versions;
     }
 
+    public String toString() {
+      return MessageFormat.format(
+          "cipher: {0}, length: {1} description: {2} created: {3} version: {4}",
+          cipher, bitLength, description, created, versions);
+    }
+
+    public String getDescription() {
+      return description;
+    }
+
     public Date getCreated() {
       return created;
     }
@@ -165,6 +186,9 @@ public abstract class KeyProvider {
       if (created != null) {
         writer.name(CREATED_FIELD).value(created.getTime());
       }
+      if (description != null) {
+        writer.name(DESCRIPTION_FIELD).value(description);
+      }
       writer.name(VERSIONS_FIELD).value(versions);
       writer.endObject();
       writer.flush();
@@ -181,6 +205,7 @@ public abstract class KeyProvider {
       int bitLength = 0;
       Date created = null;
       int versions = 0;
+      String description = null;
       JsonReader reader = new JsonReader(new InputStreamReader
           (new ByteArrayInputStream(bytes)));
       reader.beginObject();
@@ -194,12 +219,15 @@ public abstract class KeyProvider {
           created = new Date(reader.nextLong());
         } else if (VERSIONS_FIELD.equals(field)) {
           versions = reader.nextInt();
+        } else if (DESCRIPTION_FIELD.equals(field)) {
+          description = reader.nextString();
         }
       }
       reader.endObject();
       this.cipher = cipher;
       this.bitLength = bitLength;
       this.created = created;
+      this.description = description;
       this.versions = versions;
     }
   }
@@ -210,6 +238,7 @@ public abstract class KeyProvider {
   public static class Options {
     private String cipher;
     private int bitLength;
+    private String description;
 
     public Options(Configuration conf) {
       cipher = conf.get(DEFAULT_CIPHER_NAME, DEFAULT_CIPHER);
@@ -226,13 +255,22 @@ public abstract class KeyProvider {
       return this;
     }
 
-    protected String getCipher() {
+    public Options setDescription(String description) {
+      this.description = description;
+      return this;
+    }
+
+    public String getCipher() {
       return cipher;
     }
 
-    protected int getBitLength() {
+    public int getBitLength() {
       return bitLength;
     }
+
+    public String getDescription() {
+      return description;
+    }
   }
 
   /**
@@ -272,6 +310,24 @@ public abstract class KeyProvider {
    */
   public abstract List<String> getKeys() throws IOException;
 
+
+  /**
+   * Get the key metadata for all keys.
+   *
+   * @return a Map with all the keys and their metadata
+   * @throws IOException
+   */
+  public Map<String, Metadata> getKeysMetadata() throws IOException {
+    Map<String, Metadata> keysMetadata = new LinkedHashMap<String, Metadata>();
+    for (String key : getKeys()) {
+      Metadata meta = getMetadata(key);
+      if (meta != null) {
+        keysMetadata.put(key, meta);
+      }
+    }
+    return keysMetadata;
+  }
+
   /**
    * Get the key material for all versions of a specific key name.
    * @return the list of key material
@@ -314,6 +370,56 @@ public abstract class KeyProvider {
   public abstract KeyVersion createKey(String name, byte[] material,
                                        Options options) throws IOException;
 
+  /**
+   * Get the algorithm from the cipher.
+   *
+   * @return the algorithm name
+   */
+  private String getAlgorithm(String cipher) {
+    int slash = cipher.indexOf('/');
+    if (slash == -1) {
+      return cipher;
+    } else {
+      return cipher.substring(0, slash);
+    }
+  }
+
+  /**
+   * Generates a key material.
+   *
+   * @param size length of the key.
+   * @param algorithm algorithm to use for generating the key.
+   * @return the generated key.
+   * @throws NoSuchAlgorithmException
+   */
+  protected byte[] generateKey(int size, String algorithm)
+      throws NoSuchAlgorithmException {
+    algorithm = getAlgorithm(algorithm);
+    KeyGenerator keyGenerator = KeyGenerator.getInstance(algorithm);
+    keyGenerator.init(size);
+    byte[] key = keyGenerator.generateKey().getEncoded();
+    return key;
+  }
+
+  /**
+   * Create a new key generating the material for it.
+   * The given key must not already exist.
+   * <p/>
+   * This implementation generates the key material and calls the
+   * {@link #createKey(String, byte[], Options)} method.
+   *
+   * @param name the base name of the key
+   * @param options the options for the new key.
+   * @return the version name of the first version of the key.
+   * @throws IOException
+   * @throws NoSuchAlgorithmException
+   */
+  public KeyVersion createKey(String name, Options options)
+      throws NoSuchAlgorithmException, IOException {
+    byte[] material = generateKey(options.getBitLength(), options.getCipher());
+    return createKey(name, material, options);
+  }
+
   /**
    * Delete the given key.
    * @param name the name of the key to delete
@@ -332,6 +438,23 @@ public abstract class KeyProvider {
                                              byte[] material
                                             ) throws IOException;
 
+  /**
+   * Roll a new version of the given key generating the material for it.
+   * <p/>
+   * This implementation generates the key material and calls the
+   * {@link #rollNewVersion(String, byte[])} method.
+   *
+   * @param name the basename of the key
+   * @return the name of the new version of the key
+   * @throws IOException
+   */
+  public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException,
+                                                       IOException {
+    Metadata meta = getMetadata(name);
+    byte[] material = generateKey(meta.getBitLength(), meta.getCipher());
+    return rollNewVersion(name, material);
+  }
+
   /**
    * Ensures that any changes to the keys are written to persistent store.
    * @throws IOException

+ 23 - 34
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -23,6 +23,7 @@ import java.io.PrintStream;
 import java.security.InvalidParameterException;
 import java.security.NoSuchAlgorithmException;
 import java.util.List;
+import java.util.Map;
 
 import javax.crypto.KeyGenerator;
 
@@ -45,6 +46,7 @@ public class KeyShell extends Configured implements Tool {
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
       "   [" + ListCommand.USAGE + "]\n";
+  private static final String LIST_METADATA = "keyShell.list.metadata";
 
   private boolean interactive = false;
   private Command command = null;
@@ -121,6 +123,8 @@ public class KeyShell extends Configured implements Tool {
       } else if (args[i].equals("--provider")) {
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
+      } else if (args[i].equals("--metadata")) {
+        getConf().setBoolean(LIST_METADATA, true);
       } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) {
         interactive = true;
       } else if (args[i].equals("--help")) {
@@ -185,16 +189,6 @@ public class KeyShell extends Configured implements Tool {
       return provider;
     }
 
-    protected byte[] generateKey(int size, String algorithm)
-        throws NoSuchAlgorithmException {
-      out.println("Generating key using size: " + size + " and algorithm: "
-          + algorithm);
-      KeyGenerator keyGenerator = KeyGenerator.getInstance(algorithm);
-      keyGenerator.init(size);
-      byte[] key = keyGenerator.generateKey().getEncoded();
-      return key;
-    }
-
     protected void printProviderWritten() {
         out.println(provider.getClass().getName() + " has been updated.");
     }
@@ -211,11 +205,15 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class ListCommand extends Command {
-    public static final String USAGE = "list <keyname> [--provider] [--help]";
+    public static final String USAGE =
+        "list [--provider] [--metadata] [--help]";
     public static final String DESC =
         "The list subcommand displays the keynames contained within \n" +
         "a particular provider - as configured in core-site.xml or " +
-        "indicated\nthrough the --provider argument.";
+        "indicated\nthrough the --provider argument.\n" +
+        "If the --metadata option is used, the keys metadata will be printed";
+
+    private boolean metadata = false;
 
     public boolean validate() {
       boolean rc = true;
@@ -227,16 +225,24 @@ public class KeyShell extends Configured implements Tool {
             + "you MUST use the --provider argument.");
         rc = false;
       }
+      metadata = getConf().getBoolean(LIST_METADATA, false);
       return rc;
     }
 
     public void execute() throws IOException {
       List<String> keys;
       try {
-        keys = provider.getKeys();
         out.println("Listing keys for KeyProvider: " + provider.toString());
-        for (String keyName : keys) {
-          out.println(keyName);
+        if (metadata) {
+          Map<String, Metadata> keysMeta = provider.getKeysMetadata();
+          for (Map.Entry<String, Metadata> entry : keysMeta.entrySet()) {
+            out.println(entry.getKey() + " : " + entry.getValue());
+          }
+        } else {
+          keys = provider.getKeys();
+          for (String keyName : keys) {
+            out.println(keyName);
+          }
         }
       } catch (IOException e) {
         out.println("Cannot list keys for KeyProvider: " + provider.toString()
@@ -289,9 +295,7 @@ public class KeyShell extends Configured implements Tool {
         out.println("Rolling key version from KeyProvider: "
             + provider.toString() + " for key name: " + keyName);
         try {
-          byte[] material = null;
-          material = generateKey(md.getBitLength(), md.getAlgorithm());
-          provider.rollNewVersion(keyName, material);
+          provider.rollNewVersion(keyName);
           out.println(keyName + " has been successfully rolled.");
           provider.flush();
           printProviderWritten();
@@ -423,9 +427,7 @@ public class KeyShell extends Configured implements Tool {
       warnIfTransientProvider();
       try {
         Options options = KeyProvider.options(getConf());
-        String alg = getAlgorithm(options.getCipher());
-        byte[] material = generateKey(options.getBitLength(), alg);
-        provider.createKey(keyName, material, options);
+        provider.createKey(keyName, options);
         out.println(keyName + " has been successfully created.");
         provider.flush();
         printProviderWritten();
@@ -441,19 +443,6 @@ public class KeyShell extends Configured implements Tool {
       }
     }
 
-    /**
-     * Get the algorithm from the cipher.
-     * @return the algorithm name
-     */
-    public String getAlgorithm(String cipher) {
-      int slash = cipher.indexOf('/');
-      if (slash == - 1) {
-        return cipher;
-      } else {
-        return cipher.substring(0, slash);
-      }
-    }
-
     @Override
     public String getUsage() {
       return USAGE + ":\n\n" + DESC;

+ 9 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java

@@ -55,7 +55,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public KeyVersion getKeyVersion(String versionName) {
+  public synchronized KeyVersion getKeyVersion(String versionName) {
     byte[] bytes = credentials.getSecretKey(new Text(versionName));
     if (bytes == null) {
       return null;
@@ -64,7 +64,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public Metadata getMetadata(String name) throws IOException {
+  public synchronized Metadata getMetadata(String name) throws IOException {
     if (cache.containsKey(name)) {
       return cache.get(name);
     }
@@ -78,7 +78,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public KeyVersion createKey(String name, byte[] material,
+  public synchronized KeyVersion createKey(String name, byte[] material,
                                Options options) throws IOException {
     Text nameT = new Text(name);
     if (credentials.getSecretKey(nameT) != null) {
@@ -89,7 +89,7 @@ public class UserProvider extends KeyProvider {
           options.getBitLength() + ", but got " + (8 * material.length));
     }
     Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
-        new Date(), 1);
+        options.getDescription(), new Date(), 1);
     cache.put(name, meta);
     String versionName = buildVersionName(name, 0);
     credentials.addSecretKey(nameT, meta.serialize());
@@ -98,7 +98,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public void deleteKey(String name) throws IOException {
+  public synchronized void deleteKey(String name) throws IOException {
     Metadata meta = getMetadata(name);
     if (meta == null) {
       throw new IOException("Key " + name + " does not exist in " + this);
@@ -111,7 +111,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public KeyVersion rollNewVersion(String name,
+  public synchronized KeyVersion rollNewVersion(String name,
                                     byte[] material) throws IOException {
     Metadata meta = getMetadata(name);
     if (meta == null) {
@@ -134,7 +134,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public void flush() {
+  public synchronized void flush() {
     user.addCredentials(credentials);
   }
 
@@ -151,7 +151,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public List<String> getKeys() throws IOException {
+  public synchronized List<String> getKeys() throws IOException {
     List<String> list = new ArrayList<String>();
     List<Text> keys = credentials.getAllSecretKeys();
     for (Text key : keys) {
@@ -163,7 +163,7 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public List<KeyVersion> getKeyVersions(String name) throws IOException {
+  public synchronized List<KeyVersion> getKeyVersions(String name) throws IOException {
       List<KeyVersion> list = new ArrayList<KeyVersion>();
       Metadata km = getMetadata(name);
       if (km != null) {

+ 14 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java

@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.NoSuchElementException;
 import java.util.StringTokenizer;
 
@@ -75,6 +74,8 @@ public class DF extends Shell {
       return this.filesystem;
     } else {
       run();
+      verifyExitCode();
+      parseOutput();
       return filesystem;
     }
   }
@@ -114,14 +115,7 @@ public class DF extends Shell {
       this.mount = dirFile.getCanonicalPath().substring(0, 2);
     } else {
       run();
-      // Skip parsing if df was not successful
-      if (getExitCode() != 0) {
-        StringBuffer sb = new StringBuffer("df could not be run successfully: ");
-        for (String line: output) {
-          sb.append(line);
-        }
-        throw new IOException(sb.toString());
-      }
+      verifyExitCode();
       parseOutput();
     }
 
@@ -204,6 +198,17 @@ public class DF extends Shell {
     }
   }
 
+  private void verifyExitCode() throws IOException {
+    if (getExitCode() != 0) {
+      StringBuilder sb =
+          new StringBuilder("df could not be run successfully: ");
+      for (String line : output) {
+        sb.append(line);
+      }
+      throw new IOException(sb.toString());
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     String path = ".";
     if (args.length > 0)

+ 38 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java

@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class HarFs extends DelegateToFileSystem {
+  HarFs(final URI theUri, final Configuration conf)
+      throws IOException, URISyntaxException {
+    super(theUri, new HarFileSystem(), conf, "har", true);
+  }
+
+  @Override
+  public int getUriDefaultPort() {
+    return -1;
+  }
+}
+

+ 20 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Options;
@@ -836,21 +837,24 @@ public class MapFile {
 
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);
-    MapFile.Reader reader = new MapFile.Reader(fs, in, conf);
-    MapFile.Writer writer =
-      new MapFile.Writer(conf, fs, out,
-          reader.getKeyClass().asSubclass(WritableComparable.class),
-          reader.getValueClass());
-
-    WritableComparable key =
-      ReflectionUtils.newInstance(reader.getKeyClass().asSubclass(WritableComparable.class), conf);
-    Writable value =
-      ReflectionUtils.newInstance(reader.getValueClass().asSubclass(Writable.class), conf);
-
-    while (reader.next(key, value))               // copy all entries
-      writer.append(key, value);
-
-    writer.close();
+    MapFile.Reader reader = null;
+    MapFile.Writer writer = null;
+    try {
+      reader = new MapFile.Reader(fs, in, conf);
+      writer =
+        new MapFile.Writer(conf, fs, out,
+            reader.getKeyClass().asSubclass(WritableComparable.class),
+            reader.getValueClass());
+
+      WritableComparable key = ReflectionUtils.newInstance(reader.getKeyClass()
+        .asSubclass(WritableComparable.class), conf);
+      Writable value = ReflectionUtils.newInstance(reader.getValueClass()
+        .asSubclass(Writable.class), conf);
+
+      while (reader.next(key, value))               // copy all entries
+        writer.append(key, value);
+    } finally {
+      IOUtils.cleanup(LOG, writer, reader);
+    }
   }
-
 }

+ 22 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 import java.util.Arrays;
 import java.util.UUID;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -183,6 +184,8 @@ public class RetryCache {
   private final long expirationTime;
   private String cacheName;
 
+  private final ReentrantLock lock = new ReentrantLock();
+
   /**
    * Constructor
    * @param cacheName name to identify the cache by
@@ -206,6 +209,13 @@ public class RetryCache {
         || Arrays.equals(Server.getClientId(), RpcConstants.DUMMY_CLIENT_ID);
   }
 
+  public void lock() {
+    this.lock.lock();
+  }
+
+  public void unlock() {
+    this.lock.unlock();
+  }
 
   private void incrCacheClearedCounter() {
     retryCacheMetrics.incrCacheCleared();
@@ -247,7 +257,8 @@ public class RetryCache {
    */
   private CacheEntry waitForCompletion(CacheEntry newEntry) {
     CacheEntry mapEntry = null;
-    synchronized (this) {
+    lock.lock();
+    try {
       mapEntry = set.get(newEntry);
       // If an entry in the cache does not exist, add a new one
       if (mapEntry == null) {
@@ -262,6 +273,8 @@ public class RetryCache {
       } else {
         retryCacheMetrics.incrCacheHit();
       }
+    } finally {
+      lock.unlock();
     }
     // Entry already exists in cache. Wait for completion and return its state
     Preconditions.checkNotNull(mapEntry,
@@ -292,8 +305,11 @@ public class RetryCache {
   public void addCacheEntry(byte[] clientId, int callId) {
     CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
         + expirationTime, true);
-    synchronized(this) {
+    lock.lock();
+    try {
       set.put(newEntry);
+    } finally {
+      lock.unlock();
     }
     retryCacheMetrics.incrCacheUpdated();
   }
@@ -303,8 +319,11 @@ public class RetryCache {
     // since the entry is loaded from editlog, we can assume it succeeded.    
     CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
         System.nanoTime() + expirationTime, true);
-    synchronized(this) {
+    lock.lock();
+    try {
       set.put(newEntry);
+    } finally {
+      lock.unlock();
     }
     retryCacheMetrics.incrCacheUpdated();
   }

+ 6 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1983,7 +1983,7 @@ public abstract class Server {
         // authentication
         if (user != null && user.getRealUser() != null
             && (authMethod != AuthMethod.TOKEN)) {
-          ProxyUsers.authorize(user, this.getHostAddress(), conf);
+          ProxyUsers.authorize(user, this.getHostAddress());
         }
         authorize(user, protocolName, getHostInetAddress());
         if (LOG.isDebugEnabled()) {
@@ -2107,16 +2107,15 @@ public abstract class Server {
             if (e instanceof UndeclaredThrowableException) {
               e = e.getCause();
             }
-            String logMsg = Thread.currentThread().getName() + ", call " + call + ": error: " + e;
-            if (e instanceof RuntimeException || e instanceof Error) {
+            String logMsg = Thread.currentThread().getName() + ", call " + call;
+            if (exceptionsHandler.isTerse(e.getClass())) {
+              // Don't log the whole stack trace. Way too noisy!
+              LOG.info(logMsg + ": " + e);
+            } else if (e instanceof RuntimeException || e instanceof Error) {
               // These exception types indicate something is probably wrong
               // on the server side, as opposed to just a normal exceptional
               // result.
               LOG.warn(logMsg, e);
-            } else if (exceptionsHandler.isTerse(e.getClass())) {
-             // Don't log the whole stack trace of these exceptions.
-              // Way too noisy!
-              LOG.info(logMsg);
             } else {
               LOG.info(logMsg, e);
             }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSink.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.metrics2;
 
+import java.io.Closeable;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -26,7 +28,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Implementations of this interface consume the {@link MetricsRecord} generated
  * from {@link MetricsSource}. It registers with {@link MetricsSystem} which
  * periodically pushes the {@link MetricsRecord} to the sink using
- * {@link #putMetrics(MetricsRecord)} method.
+ * {@link #putMetrics(MetricsRecord)} method.  If the implementing class also
+ * implements {@link Closeable}, then the MetricsSystem will close the sink when
+ * it is stopped.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java

@@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
   private ClassLoader pluginLoader;
 
   MetricsConfig(Configuration c, String prefix) {
-    super(c, prefix.toLowerCase(Locale.US), ".");
+    super(c, prefix, ".");
   }
 
   static MetricsConfig create(String prefix) {

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.metrics2.impl;
 
+import java.io.Closeable;
 import java.util.Random;
 import java.util.concurrent.*;
 
@@ -25,6 +26,7 @@ import static com.google.common.base.Preconditions.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
@@ -198,6 +200,9 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
     } catch (InterruptedException e) {
       LOG.warn("Stop interrupted", e);
     }
+    if (sink instanceof Closeable) {
+      IOUtils.cleanup(LOG, (Closeable)sink);
+    }
   }
 
   String name() {

+ 8 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java

@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.metrics2.sink;
 
+import java.io.Closeable;
 import java.io.File;
 import java.io.FileWriter;
+import java.io.IOException;
 import java.io.PrintWriter;
 
 import org.apache.commons.configuration.SubsetConfiguration;
@@ -36,7 +38,7 @@ import org.apache.hadoop.metrics2.MetricsTag;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class FileSink implements MetricsSink {
+public class FileSink implements MetricsSink, Closeable {
   private static final String FILENAME_KEY = "filename";
   private PrintWriter writer;
 
@@ -81,4 +83,9 @@ public class FileSink implements MetricsSink {
   public void flush() {
     writer.flush();
   }
+
+  @Override
+  public void close() throws IOException {
+    writer.close();
+  }
 }

+ 0 - 223
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java

@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.InputStreamReader;
-import java.io.InputStream;
-import java.io.IOException;
-import java.io.PushbackReader;
-import java.io.UnsupportedEncodingException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class CsvRecordInput implements RecordInput {
-    
-  private PushbackReader stream;
-    
-  private class CsvIndex implements Index {
-    @Override
-    public boolean done() {
-      char c = '\0';
-      try {
-        c = (char) stream.read();
-        stream.unread(c);
-      } catch (IOException ex) {
-      }
-      return (c == '}') ? true : false;
-    }
-    @Override
-    public void incr() {}
-  }
-    
-  private void throwExceptionOnError(String tag) throws IOException {
-    throw new IOException("Error deserializing "+tag);
-  }
-    
-  private String readField(String tag) throws IOException {
-    try {
-      StringBuilder buf = new StringBuilder();
-      while (true) {
-        char c = (char) stream.read();
-        switch (c) {
-        case ',':
-          return buf.toString();
-        case '}':
-        case '\n':
-        case '\r':
-          stream.unread(c);
-          return buf.toString();
-        default:
-          buf.append(c);
-        }
-      }
-    } catch (IOException ex) {
-      throw new IOException("Error reading "+tag);
-    }
-  }
-    
-  /** Creates a new instance of CsvRecordInput */
-  public CsvRecordInput(InputStream in) {
-    try {
-      stream = new PushbackReader(new InputStreamReader(in, "UTF-8"));
-    } catch (UnsupportedEncodingException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-    
-  @Override
-  public byte readByte(String tag) throws IOException {
-    return (byte) readLong(tag);
-  }
-    
-  @Override
-  public boolean readBool(String tag) throws IOException {
-    String sval = readField(tag);
-    return "T".equals(sval) ? true : false;
-  }
-    
-  @Override
-  public int readInt(String tag) throws IOException {
-    return (int) readLong(tag);
-  }
-    
-  @Override
-  public long readLong(String tag) throws IOException {
-    String sval = readField(tag);
-    try {
-      long lval = Long.parseLong(sval);
-      return lval;
-    } catch (NumberFormatException ex) {
-      throw new IOException("Error deserializing "+tag);
-    }
-  }
-    
-  @Override
-  public float readFloat(String tag) throws IOException {
-    return (float) readDouble(tag);
-  }
-    
-  @Override
-  public double readDouble(String tag) throws IOException {
-    String sval = readField(tag);
-    try {
-      double dval = Double.parseDouble(sval);
-      return dval;
-    } catch (NumberFormatException ex) {
-      throw new IOException("Error deserializing "+tag);
-    }
-  }
-    
-  @Override
-  public String readString(String tag) throws IOException {
-    String sval = readField(tag);
-    return Utils.fromCSVString(sval);
-  }
-    
-  @Override
-  public Buffer readBuffer(String tag) throws IOException {
-    String sval = readField(tag);
-    return Utils.fromCSVBuffer(sval);
-  }
-    
-  @Override
-  public void startRecord(String tag) throws IOException {
-    if (tag != null && !tag.isEmpty()) {
-      char c1 = (char) stream.read();
-      char c2 = (char) stream.read();
-      if (c1 != 's' || c2 != '{') {
-        throw new IOException("Error deserializing "+tag);
-      }
-    }
-  }
-    
-  @Override
-  public void endRecord(String tag) throws IOException {
-    char c = (char) stream.read();
-    if (tag == null || tag.isEmpty()) {
-      if (c != '\n' && c != '\r') {
-        throw new IOException("Error deserializing record.");
-      } else {
-        return;
-      }
-    }
-        
-    if (c != '}') {
-      throw new IOException("Error deserializing "+tag);
-    }
-    c = (char) stream.read();
-    if (c != ',') {
-      stream.unread(c);
-    }
-        
-    return;
-  }
-    
-  @Override
-  public Index startVector(String tag) throws IOException {
-    char c1 = (char) stream.read();
-    char c2 = (char) stream.read();
-    if (c1 != 'v' || c2 != '{') {
-      throw new IOException("Error deserializing "+tag);
-    }
-    return new CsvIndex();
-  }
-    
-  @Override
-  public void endVector(String tag) throws IOException {
-    char c = (char) stream.read();
-    if (c != '}') {
-      throw new IOException("Error deserializing "+tag);
-    }
-    c = (char) stream.read();
-    if (c != ',') {
-      stream.unread(c);
-    }
-    return;
-  }
-    
-  @Override
-  public Index startMap(String tag) throws IOException {
-    char c1 = (char) stream.read();
-    char c2 = (char) stream.read();
-    if (c1 != 'm' || c2 != '{') {
-      throw new IOException("Error deserializing "+tag);
-    }
-    return new CsvIndex();
-  }
-    
-  @Override
-  public void endMap(String tag) throws IOException {
-    char c = (char) stream.read();
-    if (c != '}') {
-      throw new IOException("Error deserializing "+tag);
-    }
-    c = (char) stream.read();
-    if (c != ',') {
-      stream.unread(c);
-    }
-    return;
-  }
-}

+ 0 - 271
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java

@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.InputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.xml.sax.*;
-import org.xml.sax.helpers.DefaultHandler;
-import javax.xml.parsers.SAXParserFactory;
-import javax.xml.parsers.SAXParser;
-
-/**
- * XML Deserializer.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class XmlRecordInput implements RecordInput {
-    
-  static private class Value {
-    private String type;
-    private StringBuffer sb;
-        
-    public Value(String t) {
-      type = t;
-      sb = new StringBuffer();
-    }
-    public void addChars(char[] buf, int offset, int len) {
-      sb.append(buf, offset, len);
-    }
-    public String getValue() { return sb.toString(); }
-    public String getType() { return type; }
-  }
-    
-  private static class XMLParser extends DefaultHandler {
-    private boolean charsValid = false;
-        
-    private ArrayList<Value> valList;
-        
-    private XMLParser(ArrayList<Value> vlist) {
-      valList = vlist;
-    }
-        
-    @Override
-    public void startDocument() throws SAXException {}
-        
-    @Override
-    public void endDocument() throws SAXException {}
-        
-    @Override
-    public void startElement(String ns,
-                             String sname,
-                             String qname,
-                             Attributes attrs) throws SAXException {
-      charsValid = false;
-      if ("boolean".equals(qname) ||
-          "i4".equals(qname) ||
-          "int".equals(qname) ||
-          "string".equals(qname) ||
-          "double".equals(qname) ||
-          "ex:i1".equals(qname) ||
-          "ex:i8".equals(qname) ||
-          "ex:float".equals(qname)) {
-        charsValid = true;
-        valList.add(new Value(qname));
-      } else if ("struct".equals(qname) ||
-                 "array".equals(qname)) {
-        valList.add(new Value(qname));
-      }
-    }
-        
-    @Override
-    public void endElement(String ns,
-                           String sname,
-                           String qname) throws SAXException {
-      charsValid = false;
-      if ("struct".equals(qname) ||
-          "array".equals(qname)) {
-        valList.add(new Value("/"+qname));
-      }
-    }
-        
-    @Override
-    public void characters(char buf[], int offset, int len)
-      throws SAXException {
-      if (charsValid) {
-        Value v = valList.get(valList.size()-1);
-        v.addChars(buf, offset, len);
-      }
-    }
-        
-  }
-    
-  private class XmlIndex implements Index {
-    @Override
-    public boolean done() {
-      Value v = valList.get(vIdx);
-      if ("/array".equals(v.getType())) {
-        valList.set(vIdx, null);
-        vIdx++;
-        return true;
-      } else {
-        return false;
-      }
-    }
-    @Override
-    public void incr() {}
-  }
-    
-  private ArrayList<Value> valList;
-  private int vLen;
-  private int vIdx;
-    
-  private Value next() throws IOException {
-    if (vIdx < vLen) {
-      Value v = valList.get(vIdx);
-      valList.set(vIdx, null);
-      vIdx++;
-      return v;
-    } else {
-      throw new IOException("Error in deserialization.");
-    }
-  }
-    
-  /** Creates a new instance of XmlRecordInput */
-  public XmlRecordInput(InputStream in) {
-    try{
-      valList = new ArrayList<Value>();
-      DefaultHandler handler = new XMLParser(valList);
-      SAXParserFactory factory = SAXParserFactory.newInstance();
-      SAXParser parser = factory.newSAXParser();
-      parser.parse(in, handler);
-      vLen = valList.size();
-      vIdx = 0;
-    } catch (Exception ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-    
-  @Override
-  public byte readByte(String tag) throws IOException {
-    Value v = next();
-    if (!"ex:i1".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Byte.parseByte(v.getValue());
-  }
-    
-  @Override
-  public boolean readBool(String tag) throws IOException {
-    Value v = next();
-    if (!"boolean".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return "1".equals(v.getValue());
-  }
-    
-  @Override
-  public int readInt(String tag) throws IOException {
-    Value v = next();
-    if (!"i4".equals(v.getType()) &&
-        !"int".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Integer.parseInt(v.getValue());
-  }
-    
-  @Override
-  public long readLong(String tag) throws IOException {
-    Value v = next();
-    if (!"ex:i8".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Long.parseLong(v.getValue());
-  }
-    
-  @Override
-  public float readFloat(String tag) throws IOException {
-    Value v = next();
-    if (!"ex:float".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Float.parseFloat(v.getValue());
-  }
-    
-  @Override
-  public double readDouble(String tag) throws IOException {
-    Value v = next();
-    if (!"double".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Double.parseDouble(v.getValue());
-  }
-    
-  @Override
-  public String readString(String tag) throws IOException {
-    Value v = next();
-    if (!"string".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Utils.fromXMLString(v.getValue());
-  }
-    
-  @Override
-  public Buffer readBuffer(String tag) throws IOException {
-    Value v = next();
-    if (!"string".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return Utils.fromXMLBuffer(v.getValue());
-  }
-    
-  @Override
-  public void startRecord(String tag) throws IOException {
-    Value v = next();
-    if (!"struct".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-  }
-    
-  @Override
-  public void endRecord(String tag) throws IOException {
-    Value v = next();
-    if (!"/struct".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-  }
-    
-  @Override
-  public Index startVector(String tag) throws IOException {
-    Value v = next();
-    if (!"array".equals(v.getType())) {
-      throw new IOException("Error deserializing "+tag+".");
-    }
-    return new XmlIndex();
-  }
-    
-  @Override
-  public void endVector(String tag) throws IOException {}
-    
-  @Override
-  public Index startMap(String tag) throws IOException {
-    return startVector(tag);
-  }
-    
-  @Override
-  public void endMap(String tag) throws IOException { endVector(tag); }
-
-}

+ 0 - 270
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java

@@ -1,270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.IOException;
-import java.util.TreeMap;
-import java.util.ArrayList;
-import java.io.PrintStream;
-import java.io.OutputStream;
-import java.io.UnsupportedEncodingException;
-import java.util.Stack;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * XML Serializer.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class XmlRecordOutput implements RecordOutput {
-
-  private PrintStream stream;
-    
-  private int indent = 0;
-    
-  private Stack<String> compoundStack;
-    
-  private void putIndent() {
-    StringBuilder sb = new StringBuilder("");
-    for (int idx = 0; idx < indent; idx++) {
-      sb.append("  ");
-    }
-    stream.print(sb.toString());
-  }
-    
-  private void addIndent() {
-    indent++;
-  }
-    
-  private void closeIndent() {
-    indent--;
-  }
-    
-  private void printBeginEnvelope(String tag) {
-    if (!compoundStack.empty()) {
-      String s = compoundStack.peek();
-      if ("struct".equals(s)) {
-        putIndent();
-        stream.print("<member>\n");
-        addIndent();
-        putIndent();
-        stream.print("<name>"+tag+"</name>\n");
-        putIndent();
-        stream.print("<value>");
-      } else if ("vector".equals(s)) {
-        stream.print("<value>");
-      } else if ("map".equals(s)) {
-        stream.print("<value>");
-      }
-    } else {
-      stream.print("<value>");
-    }
-  }
-    
-  private void printEndEnvelope(String tag) {
-    if (!compoundStack.empty()) {
-      String s = compoundStack.peek();
-      if ("struct".equals(s)) {
-        stream.print("</value>\n");
-        closeIndent();
-        putIndent();
-        stream.print("</member>\n");
-      } else if ("vector".equals(s)) {
-        stream.print("</value>\n");
-      } else if ("map".equals(s)) {
-        stream.print("</value>\n");
-      }
-    } else {
-      stream.print("</value>\n");
-    }
-  }
-    
-  private void insideVector(String tag) {
-    printBeginEnvelope(tag);
-    compoundStack.push("vector");
-  }
-    
-  private void outsideVector(String tag) throws IOException {
-    String s = compoundStack.pop();
-    if (!"vector".equals(s)) {
-      throw new IOException("Error serializing vector.");
-    }
-    printEndEnvelope(tag);
-  }
-    
-  private void insideMap(String tag) {
-    printBeginEnvelope(tag);
-    compoundStack.push("map");
-  }
-    
-  private void outsideMap(String tag) throws IOException {
-    String s = compoundStack.pop();
-    if (!"map".equals(s)) {
-      throw new IOException("Error serializing map.");
-    }
-    printEndEnvelope(tag);
-  }
-    
-  private void insideRecord(String tag) {
-    printBeginEnvelope(tag);
-    compoundStack.push("struct");
-  }
-    
-  private void outsideRecord(String tag) throws IOException {
-    String s = compoundStack.pop();
-    if (!"struct".equals(s)) {
-      throw new IOException("Error serializing record.");
-    }
-    printEndEnvelope(tag);
-  }
-    
-  /** Creates a new instance of XmlRecordOutput */
-  public XmlRecordOutput(OutputStream out) {
-    try {
-      stream = new PrintStream(out, true, "UTF-8");
-      compoundStack = new Stack<String>();
-    } catch (UnsupportedEncodingException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-    
-  @Override
-  public void writeByte(byte b, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<ex:i1>");
-    stream.print(Byte.toString(b));
-    stream.print("</ex:i1>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeBool(boolean b, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<boolean>");
-    stream.print(b ? "1" : "0");
-    stream.print("</boolean>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeInt(int i, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<i4>");
-    stream.print(Integer.toString(i));
-    stream.print("</i4>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeLong(long l, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<ex:i8>");
-    stream.print(Long.toString(l));
-    stream.print("</ex:i8>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeFloat(float f, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<ex:float>");
-    stream.print(Float.toString(f));
-    stream.print("</ex:float>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeDouble(double d, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<double>");
-    stream.print(Double.toString(d));
-    stream.print("</double>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeString(String s, String tag) throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<string>");
-    stream.print(Utils.toXMLString(s));
-    stream.print("</string>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void writeBuffer(Buffer buf, String tag)
-    throws IOException {
-    printBeginEnvelope(tag);
-    stream.print("<string>");
-    stream.print(Utils.toXMLBuffer(buf));
-    stream.print("</string>");
-    printEndEnvelope(tag);
-  }
-    
-  @Override
-  public void startRecord(Record r, String tag) throws IOException {
-    insideRecord(tag);
-    stream.print("<struct>\n");
-    addIndent();
-  }
-    
-  @Override
-  public void endRecord(Record r, String tag) throws IOException {
-    closeIndent();
-    putIndent();
-    stream.print("</struct>");
-    outsideRecord(tag);
-  }
-    
-  @Override
-  public void startVector(ArrayList v, String tag) throws IOException {
-    insideVector(tag);
-    stream.print("<array>\n");
-    addIndent();
-  }
-    
-  @Override
-  public void endVector(ArrayList v, String tag) throws IOException {
-    closeIndent();
-    putIndent();
-    stream.print("</array>");
-    outsideVector(tag);
-  }
-    
-  @Override
-  public void startMap(TreeMap v, String tag) throws IOException {
-    insideMap(tag);
-    stream.print("<array>\n");
-    addIndent();
-  }
-    
-  @Override
-  public void endMap(TreeMap v, String tag) throws IOException {
-    closeIndent();
-    putIndent();
-    stream.print("</array>");
-    outsideMap(tag);
-  }
-
-}

+ 0 - 72
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java

@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.ArrayList;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * C Code generator front-end for Hadoop record I/O.
- */
-class CGenerator extends CodeGenerator {
-  
-  CGenerator() {
-  }
-  
-  /**
-   * Generate C code. This method only creates the requested file(s)
-   * and spits-out file-level elements (such as include statements etc.)
-   * record-level code is generated by JRecord.
-   */
-  @Override
-  void genCode(String name, ArrayList<JFile> ilist,
-               ArrayList<JRecord> rlist, String destDir, ArrayList<String> options)
-    throws IOException {
-    name = new File(destDir, (new File(name)).getName()).getAbsolutePath();
-    FileWriter cc = new FileWriter(name+".c");
-    try {
-      FileWriter hh = new FileWriter(name+".h");
-      try {
-        hh.write("#ifndef __"+name.toUpperCase().replace('.','_')+"__\n");
-        hh.write("#define __"+name.toUpperCase().replace('.','_')+"__\n");
-        hh.write("#include \"recordio.h\"\n");
-        for (Iterator<JFile> iter = ilist.iterator(); iter.hasNext();) {
-          hh.write("#include \""+iter.next().getName()+".h\"\n");
-        }
-
-        cc.write("#include \""+name+".h\"\n");
-
-        /*
-        for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) {
-        iter.next().genCppCode(hh, cc);
-        }
-         */
-
-        hh.write("#endif //"+name.toUpperCase().replace('.','_')+"__\n");
-      } finally {
-        hh.close();
-      }
-    } finally {
-      cc.close();
-    }
-  }
-}

+ 0 - 105
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java

@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.record.compiler;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * A wrapper around StringBuffer that automatically does indentation
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class CodeBuffer {
-  
-  static private ArrayList<Character> startMarkers = new ArrayList<Character>();
-  static private ArrayList<Character> endMarkers = new ArrayList<Character>();
-  
-  static {
-    addMarkers('{', '}');
-    addMarkers('(', ')');
-  }
-  
-  static void addMarkers(char ch1, char ch2) {
-    startMarkers.add(ch1);
-    endMarkers.add(ch2);
-  }
-  
-  private int level = 0;
-  private int numSpaces = 2;
-  private boolean firstChar = true;
-  private StringBuffer sb;
-  
-  /** Creates a new instance of CodeBuffer */
-  CodeBuffer() {
-    this(2, "");
-  }
-  
-  CodeBuffer(String s) {
-    this(2, s);
-  }
-  
-  CodeBuffer(int numSpaces, String s) {
-    sb = new StringBuffer();
-    this.numSpaces = numSpaces;
-    this.append(s);
-  }
-  
-  void append(String s) {
-    int length = s.length();
-    for (int idx = 0; idx < length; idx++) {
-      char ch = s.charAt(idx);
-      append(ch);
-    }
-  }
-  
-  void append(char ch) {
-    if (endMarkers.contains(ch)) {
-      level--;
-    }
-    if (firstChar) {
-      for (int idx = 0; idx < level; idx++) {
-        for (int num = 0; num < numSpaces; num++) {
-          rawAppend(' ');
-        }
-      }
-    }
-    rawAppend(ch);
-    firstChar = false;
-    if (startMarkers.contains(ch)) {
-      level++;
-    }
-    if (ch == '\n') {
-      firstChar = true;
-    }
-  }
-
-  private void rawAppend(char ch) {
-    sb.append(ch);
-  }
-  
-  @Override
-  public String toString() {
-    return sb.toString();
-  }
-}

+ 0 - 53
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeGenerator.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-
-/**
- * CodeGenerator is a Factory and a base class for Hadoop Record I/O translators.
- * Different translators register creation methods with this factory.
- */
-abstract class CodeGenerator {
-  
-  private static HashMap<String, CodeGenerator> generators =
-    new HashMap<String, CodeGenerator>();
-  
-  static {
-    register("c", new CGenerator());
-    register("c++", new CppGenerator());
-    register("java", new JavaGenerator());
-  }
-  
-  static void register(String lang, CodeGenerator gen) {
-    generators.put(lang, gen);
-  }
-  
-  static CodeGenerator get(String lang) {
-    return generators.get(lang);
-  }
-  
-  abstract void genCode(String file,
-                        ArrayList<JFile> inclFiles,
-                        ArrayList<JRecord> records,
-                        String destDir,
-                        ArrayList<String> options) throws IOException;
-}

+ 0 - 48
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java

@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * const definitions for Record I/O compiler
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Consts {
-  
-  /** Cannot create a new instance */
-  private Consts() {
-  }
-  
-  // prefix to use for variables in generated classes
-  public static final String RIO_PREFIX = "_rio_";
-  // other vars used in generated classes
-  public static final String RTI_VAR = RIO_PREFIX + "recTypeInfo";
-  public static final String RTI_FILTER = RIO_PREFIX + "rtiFilter";
-  public static final String RTI_FILTER_FIELDS = RIO_PREFIX + "rtiFilterFields";
-  public static final String RECORD_OUTPUT = RIO_PREFIX + "a";
-  public static final String RECORD_INPUT = RIO_PREFIX + "a";
-  public static final String TAG = RIO_PREFIX + "tag";
-  
-}

+ 0 - 75
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java

@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.ArrayList;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * C++ Code generator front-end for Hadoop record I/O.
- */
-class CppGenerator extends CodeGenerator {
-  
-  CppGenerator() {
-  }
-  
-  /**
-   * Generate C++ code. This method only creates the requested file(s)
-   * and spits-out file-level elements (such as include statements etc.)
-   * record-level code is generated by JRecord.
-   */
-  @Override
-  void genCode(String name, ArrayList<JFile> ilist,
-               ArrayList<JRecord> rlist, String destDir, ArrayList<String> options)
-    throws IOException {
-    name = new File(destDir, (new File(name)).getName()).getAbsolutePath();
-
-    FileWriter cc = new FileWriter(name+".cc");
-    try {
-      FileWriter hh = new FileWriter(name+".hh");
-      
-      try {
-        String fileName = (new File(name)).getName();
-        hh.write("#ifndef __"+fileName.toUpperCase().replace('.','_')+"__\n");
-        hh.write("#define __"+fileName.toUpperCase().replace('.','_')+"__\n");
-        hh.write("#include \"recordio.hh\"\n");
-        hh.write("#include \"recordTypeInfo.hh\"\n");
-        for (Iterator<JFile> iter = ilist.iterator(); iter.hasNext();) {
-          hh.write("#include \""+iter.next().getName()+".hh\"\n");
-        }
-        
-        cc.write("#include \""+fileName+".hh\"\n");
-        cc.write("#include \"utils.hh\"\n");
-        
-        for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) {
-          iter.next().genCppCode(hh, cc, options);
-        }
-        
-        hh.write("#endif //"+fileName.toUpperCase().replace('.','_')+"__\n");
-      } finally {
-        hh.close();
-      }
-    } finally {
-      cc.close();
-    }
-  }
-}

+ 0 - 106
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java

@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JBoolean extends JType {
-  
-  class JavaBoolean extends JType.JavaType {
-    
-    JavaBoolean() {
-      super("boolean", "Bool", "Boolean", "TypeID.RIOType.BOOL");
-    }
-    
-    @Override
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      cb.append(Consts.RIO_PREFIX + "ret = ("+fname+" == "+other+")? 0 : ("+
-          fname+"?1:-1);\n");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.BoolTypeID";
-    }
-
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = ("+fname+")?0:1;\n");
-    }
-    
-    // In Binary format, boolean is written as byte. true = 1, false = 0
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("if ("+l+"<1) {\n");
-      cb.append("throw new java.io.IOException(\"Boolean is exactly 1 byte."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append(s+"++; "+l+"--;\n");
-      cb.append("}\n");
-    }
-    
-    // In Binary format, boolean is written as byte. true = 1, false = 0
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("if (l1<1 || l2<1) {\n");
-      cb.append("throw new java.io.IOException(\"Boolean is exactly 1 byte."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append("if (b1[s1] != b2[s2]) {\n");
-      cb.append("return (b1[s1]<b2[s2])? -1 : 0;\n");
-      cb.append("}\n");
-      cb.append("s1++; s2++; l1--; l2--;\n");
-      cb.append("}\n");
-    }
-  }
-  
-  class CppBoolean extends CppType {
-    
-    CppBoolean() {
-      super("bool");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BOOL)";
-    }
-  }
-
-  /** Creates a new instance of JBoolean */
-  public JBoolean() {
-    setJavaType(new JavaBoolean());
-    setCppType(new CppBoolean());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "z";
-  }
-}

+ 0 - 120
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java

@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * Code generator for "buffer" type.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JBuffer extends JCompType {
-  
-  class JavaBuffer extends JavaCompType {
-    
-    JavaBuffer() {
-      super("org.apache.hadoop.record.Buffer", "Buffer", 
-          "org.apache.hadoop.record.Buffer", "TypeID.RIOType.BUFFER");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.BufferTypeID";
-    }
-
-    @Override
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n");
-    }
-    
-    @Override
-    void genEquals(CodeBuffer cb, String fname, String peer) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n");
-    }
-    
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n");
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+
-                b+", "+s+");\n");
-      cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n");
-      cb.append(s+" += z+i; "+l+" -= (z+i);\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n");
-      cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n");
-      cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n");
-      cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n");
-      cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n");
-      cb.append("int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);\n");
-      cb.append("if (r1 != 0) { return (r1<0)?-1:0; }\n");
-      cb.append("s1+=i1; s2+=i2; l1-=i1; l1-=i2;\n");
-      cb.append("}\n");
-    }
-  }
-  
-  class CppBuffer extends CppCompType {
-    
-    CppBuffer() {
-      super(" ::std::string");
-    }
-    
-    @Override
-    void genGetSet(CodeBuffer cb, String fname) {
-      cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-      cb.append("virtual "+getType()+"& get"+toCamelCase(fname)+"() {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BUFFER)";
-    }
-
-  }
-  /** Creates a new instance of JBuffer */
-  public JBuffer() {
-    setJavaType(new JavaBuffer());
-    setCppType(new CppBuffer());
-    setCType(new CCompType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "B";
-  }
-}

+ 0 - 93
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java

@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Code generator for "byte" type.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JByte extends JType {
-  
-  class JavaByte extends JavaType {
-    
-    JavaByte() {
-      super("byte", "Byte", "Byte", "TypeID.RIOType.BYTE");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.ByteTypeID";
-    }
-
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("if ("+l+"<1) {\n");
-      cb.append("throw new java.io.IOException(\"Byte is exactly 1 byte."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append(s+"++; "+l+"--;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("if (l1<1 || l2<1) {\n");
-      cb.append("throw new java.io.IOException(\"Byte is exactly 1 byte."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append("if (b1[s1] != b2[s2]) {\n");
-      cb.append("return (b1[s1]<b2[s2])?-1:0;\n");
-      cb.append("}\n");
-      cb.append("s1++; s2++; l1--; l2--;\n");
-      cb.append("}\n");
-    }
-  }
-  
-  class CppByte extends CppType {
-    
-    CppByte() {
-      super("int8_t");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BYTE)";
-    }
-  }
-
-  public JByte() {
-    setJavaType(new JavaByte());
-    setCppType(new CppByte());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "b";
-  }
-}

+ 0 - 80
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java

@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-
-/**
- * Abstract base class for all the "compound" types such as ustring,
- * buffer, vector, map, and record.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-abstract class JCompType extends JType {
-  
-  abstract class JavaCompType extends JavaType {
-    
-    JavaCompType(String type, String suffix, String wrapper, 
-        String typeIDByteString) { 
-      super(type, suffix, wrapper, typeIDByteString);
-    }
-    
-    @Override
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n");
-    }
-    
-    @Override
-    void genEquals(CodeBuffer cb, String fname, String peer) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n");
-    }
-    
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n");
-    }
-    
-    @Override
-    void genClone(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "other."+fname+" = ("+getType()+") this."+
-          fname+".clone();\n");
-    }
-  }
-  
-  abstract class CppCompType extends CppType {
-    
-    CppCompType(String type) {
-      super(type);
-    }
-    
-    @Override
-    void genGetSet(CodeBuffer cb, String fname) {
-      cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-      cb.append("virtual "+getType()+"& get"+toCamelCase(fname)+"() {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-    }
-  }
-  
-  class CCompType extends CType {
-    
-  }
-}

+ 0 - 102
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java

@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JDouble extends JType {
-  
-  class JavaDouble extends JavaType {
-    
-    JavaDouble() {
-      super("double", "Double", "Double", "TypeID.RIOType.DOUBLE");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.DoubleTypeID";
-    }
-
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      String tmp = "Double.doubleToLongBits("+fname+")";
-      cb.append(Consts.RIO_PREFIX + "ret = (int)("+tmp+"^("+tmp+">>>32));\n");
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("if ("+l+"<8) {\n");
-      cb.append("throw new java.io.IOException(\"Double is exactly 8 bytes."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append(s+"+=8; "+l+"-=8;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("if (l1<8 || l2<8) {\n");
-      cb.append("throw new java.io.IOException(\"Double is exactly 8 bytes."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append("double d1 = org.apache.hadoop.record.Utils.readDouble(b1, s1);\n");
-      cb.append("double d2 = org.apache.hadoop.record.Utils.readDouble(b2, s2);\n");
-      cb.append("if (d1 != d2) {\n");
-      cb.append("return ((d1-d2) < 0) ? -1 : 0;\n");
-      cb.append("}\n");
-      cb.append("s1+=8; s2+=8; l1-=8; l2-=8;\n");
-      cb.append("}\n");
-    }
-  }
-
-  class CppDouble extends CppType {
-    
-    CppDouble() {
-      super("double");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_DOUBLE)";
-    }
-  }
-
-  
-  /** Creates a new instance of JDouble */
-  public JDouble() {
-    setJavaType(new JavaDouble());
-    setCppType(new CppDouble());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "d";
-  }
-}

+ 0 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JField.java

@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * A thin wrappper around record field.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JField<T> {
-  
-  private String name;
-  private T type;
-  
-  /**
-   * Creates a new instance of JField
-   */
-  public JField(String name, T type) {
-    this.type = type;
-    this.name = name;
-  }
-  
-  String getName() {
-    return name;
-  }
-  
-  T getType() {
-    return type;
-  }
-}

+ 0 - 78
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFile.java

@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Container for the Hadoop Record DDL.
- * The main components of the file are filename, list of included files,
- * and records defined in that file.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JFile {
-  /** Possibly full name of the file */
-  private String mName;
-  /** Ordered list of included files */
-  private ArrayList<JFile> mInclFiles;
-  /** Ordered list of records declared in this file */
-  private ArrayList<JRecord> mRecords;
-    
-  /** Creates a new instance of JFile
-   *
-   * @param name possibly full pathname to the file
-   * @param inclFiles included files (as JFile)
-   * @param recList List of records defined within this file
-   */
-  public JFile(String name, ArrayList<JFile> inclFiles,
-               ArrayList<JRecord> recList) {
-    mName = name;
-    mInclFiles = inclFiles;
-    mRecords = recList;
-  }
-    
-  /** Strip the other pathname components and return the basename */
-  String getName() {
-    int idx = mName.lastIndexOf('/');
-    return (idx > 0) ? mName.substring(idx) : mName; 
-  }
-    
-  /** Generate record code in given language. Language should be all
-   *  lowercase.
-   */
-  public int genCode(String language, String destDir, ArrayList<String> options)
-    throws IOException {
-    CodeGenerator gen = CodeGenerator.get(language);
-    if (gen != null) {
-      gen.genCode(mName, mInclFiles, mRecords, destDir, options);
-    } else {
-      System.err.println("Cannnot recognize language:"+language);
-      return 1;
-    }
-    return 0;
-  }
-}

+ 0 - 99
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java

@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JFloat extends JType {
-  
-  class JavaFloat extends JavaType {
-    
-    JavaFloat() {
-      super("float", "Float", "Float", "TypeID.RIOType.FLOAT");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.FloatTypeID";
-    }
-
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = Float.floatToIntBits("+fname+");\n");
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("if ("+l+"<4) {\n");
-      cb.append("throw new java.io.IOException(\"Float is exactly 4 bytes."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append(s+"+=4; "+l+"-=4;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("if (l1<4 || l2<4) {\n");
-      cb.append("throw new java.io.IOException(\"Float is exactly 4 bytes."+
-                " Provided buffer is smaller.\");\n");
-      cb.append("}\n");
-      cb.append("float f1 = org.apache.hadoop.record.Utils.readFloat(b1, s1);\n");
-      cb.append("float f2 = org.apache.hadoop.record.Utils.readFloat(b2, s2);\n");
-      cb.append("if (f1 != f2) {\n");
-      cb.append("return ((f1-f2) < 0) ? -1 : 0;\n");
-      cb.append("}\n");
-      cb.append("s1+=4; s2+=4; l1-=4; l2-=4;\n");
-      cb.append("}\n");
-    }
-  }
-
-  class CppFloat extends CppType {
-    
-    CppFloat() {
-      super("float");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_FLOAT)";
-    }
-  }
-
-  /** Creates a new instance of JFloat */
-  public JFloat() {
-    setJavaType(new JavaFloat());
-    setCppType(new CppFloat());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "f";
-  }
-}

+ 0 - 93
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java

@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * Code generator for "int" type
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JInt extends JType {
-  
-  class JavaInt extends JavaType {
-    
-    JavaInt() {
-      super("int", "Int", "Integer", "TypeID.RIOType.INT");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.IntTypeID";
-    }
-
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n");
-      cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n");
-      cb.append(s+"+=z; "+l+"-=z;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n");
-      cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n");
-      cb.append("if (i1 != i2) {\n");
-      cb.append("return ((i1-i2) < 0) ? -1 : 0;\n");
-      cb.append("}\n");
-      cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n");
-      cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n");
-      cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n");
-      cb.append("}\n");
-    }
-  }
-
-  class CppInt extends CppType {
-    
-    CppInt() {
-      super("int32_t");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_INT)";
-    }
-  }
-
-  /** Creates a new instance of JInt */
-  public JInt() {
-    setJavaType(new JavaInt());
-    setCppType(new CppInt());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "i";
-  }
-}

+ 0 - 98
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java

@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Code generator for "long" type
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JLong extends JType {
-  
-  class JavaLong extends JavaType {
-    
-    JavaLong() {
-      super("long", "Long", "Long", "TypeID.RIOType.LONG");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.LongTypeID";
-    }
-
-    @Override
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = (int) ("+fname+"^("+
-          fname+">>>32));\n");
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("long i = org.apache.hadoop.record.Utils.readVLong("+b+", "+s+");\n");
-      cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n");
-      cb.append(s+"+=z; "+l+"-=z;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);\n");
-      cb.append("long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);\n");
-      cb.append("if (i1 != i2) {\n");
-      cb.append("return ((i1-i2) < 0) ? -1 : 0;\n");
-      cb.append("}\n");
-      cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n");
-      cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n");
-      cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n");
-      cb.append("}\n");
-    }
-  }
-
-  class CppLong extends CppType {
-    
-    CppLong() {
-      super("int64_t");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_LONG)";
-    }
-  }
-
-  /** Creates a new instance of JLong */
-  public JLong() {
-    setJavaType(new JavaLong());
-    setCppType(new CppLong());
-    setCType(new CType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "l";
-  }
-}

+ 0 - 246
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java

@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JMap extends JCompType {
-  
-  static private int level = 0;
-  
-  static private String getLevel() { return Integer.toString(level); }
-  
-  static private void incrLevel() { level++; }
-  
-  static private void decrLevel() { level--; }
-  
-  static private String getId(String id) { return id+getLevel(); }
-  
-  private JType keyType;
-  private JType valueType;
-  
-  class JavaMap extends JavaCompType {
-    
-    JType.JavaType key;
-    JType.JavaType value;
-    
-    JavaMap(JType.JavaType key, JType.JavaType value) {
-      super("java.util.TreeMap<"+key.getWrapperType()+","+value.getWrapperType()+">",
-            "Map",
-            "java.util.TreeMap<"+key.getWrapperType()+","+value.getWrapperType()+">",
-            "TypeID.RIOType.MAP");
-      this.key = key;
-      this.value = value;
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new org.apache.hadoop.record.meta.MapTypeID(" + 
-        key.getTypeIDObjectString() + ", " + 
-        value.getTypeIDObjectString() + ")";
-    }
-
-    @Override
-    void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) {
-      key.genSetRTIFilter(cb, nestedStructMap);
-      value.genSetRTIFilter(cb, nestedStructMap);
-    }
-
-    @Override
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      String setType = "java.util.Set<"+key.getWrapperType()+"> ";
-      String iterType = "java.util.Iterator<"+key.getWrapperType()+"> ";
-      cb.append("{\n");
-      cb.append(setType+getId(Consts.RIO_PREFIX + "set1")+" = "+
-          fname+".keySet();\n");
-      cb.append(setType+getId(Consts.RIO_PREFIX + "set2")+" = "+
-          other+".keySet();\n");
-      cb.append(iterType+getId(Consts.RIO_PREFIX + "miter1")+" = "+
-                getId(Consts.RIO_PREFIX + "set1")+".iterator();\n");
-      cb.append(iterType+getId(Consts.RIO_PREFIX + "miter2")+" = "+
-                getId(Consts.RIO_PREFIX + "set2")+".iterator();\n");
-      cb.append("for(; "+getId(Consts.RIO_PREFIX + "miter1")+".hasNext() && "+
-                getId(Consts.RIO_PREFIX + "miter2")+".hasNext();) {\n");
-      cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k1")+
-                " = "+getId(Consts.RIO_PREFIX + "miter1")+".next();\n");
-      cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k2")+
-                " = "+getId(Consts.RIO_PREFIX + "miter2")+".next();\n");
-      key.genCompareTo(cb, getId(Consts.RIO_PREFIX + "k1"), 
-          getId(Consts.RIO_PREFIX + "k2"));
-      cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) { return " + 
-          Consts.RIO_PREFIX + "ret; }\n");
-      cb.append("}\n");
-      cb.append(Consts.RIO_PREFIX + "ret = ("+getId(Consts.RIO_PREFIX + "set1")+
-          ".size() - "+getId(Consts.RIO_PREFIX + "set2")+".size());\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) {
-      if (decl) {
-        cb.append(getType()+" "+fname+";\n");
-      }
-      cb.append("{\n");
-      incrLevel();
-      cb.append("org.apache.hadoop.record.Index " + 
-          getId(Consts.RIO_PREFIX + "midx")+" = " + 
-          Consts.RECORD_INPUT + ".startMap(\""+tag+"\");\n");
-      cb.append(fname+"=new "+getType()+"();\n");
-      cb.append("for (; !"+getId(Consts.RIO_PREFIX + "midx")+".done(); "+
-          getId(Consts.RIO_PREFIX + "midx")+".incr()) {\n");
-      key.genReadMethod(cb, getId(Consts.RIO_PREFIX + "k"),
-          getId(Consts.RIO_PREFIX + "k"), true);
-      value.genReadMethod(cb, getId(Consts.RIO_PREFIX + "v"), 
-          getId(Consts.RIO_PREFIX + "v"), true);
-      cb.append(fname+".put("+getId(Consts.RIO_PREFIX + "k")+","+
-          getId(Consts.RIO_PREFIX + "v")+");\n");
-      cb.append("}\n");
-      cb.append(Consts.RECORD_INPUT + ".endMap(\""+tag+"\");\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genWriteMethod(CodeBuffer cb, String fname, String tag) {
-      String setType = "java.util.Set<java.util.Map.Entry<"+
-        key.getWrapperType()+","+value.getWrapperType()+">> ";
-      String entryType = "java.util.Map.Entry<"+
-        key.getWrapperType()+","+value.getWrapperType()+"> ";
-      String iterType = "java.util.Iterator<java.util.Map.Entry<"+
-        key.getWrapperType()+","+value.getWrapperType()+">> ";
-      cb.append("{\n");
-      incrLevel();
-      cb.append(Consts.RECORD_OUTPUT + ".startMap("+fname+",\""+tag+"\");\n");
-      cb.append(setType+getId(Consts.RIO_PREFIX + "es")+" = "+
-          fname+".entrySet();\n");
-      cb.append("for("+iterType+getId(Consts.RIO_PREFIX + "midx")+" = "+
-          getId(Consts.RIO_PREFIX + "es")+".iterator(); "+
-          getId(Consts.RIO_PREFIX + "midx")+".hasNext();) {\n");
-      cb.append(entryType+getId(Consts.RIO_PREFIX + "me")+" = "+
-          getId(Consts.RIO_PREFIX + "midx")+".next();\n");
-      cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k")+" = "+
-          getId(Consts.RIO_PREFIX + "me")+".getKey();\n");
-      cb.append(value.getType()+" "+getId(Consts.RIO_PREFIX + "v")+" = "+
-          getId(Consts.RIO_PREFIX + "me")+".getValue();\n");
-      key.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "k"), 
-          getId(Consts.RIO_PREFIX + "k"));
-      value.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "v"), 
-          getId(Consts.RIO_PREFIX + "v"));
-      cb.append("}\n");
-      cb.append(Consts.RECORD_OUTPUT + ".endMap("+fname+",\""+tag+"\");\n");
-      cb.append("}\n");
-      decrLevel();
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append("int "+getId("mi")+
-                " = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n");
-      cb.append("int "+getId("mz")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi")+");\n");
-      cb.append(s+"+="+getId("mz")+"; "+l+"-="+getId("mz")+";\n");
-      cb.append("for (int "+getId("midx")+" = 0; "+getId("midx")+
-                " < "+getId("mi")+"; "+getId("midx")+"++) {");
-      key.genSlurpBytes(cb, b, s, l);
-      value.genSlurpBytes(cb, b, s, l);
-      cb.append("}\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append("int "+getId("mi1")+
-                " = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n");
-      cb.append("int "+getId("mi2")+
-                " = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n");
-      cb.append("int "+getId("mz1")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi1")+");\n");
-      cb.append("int "+getId("mz2")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi2")+");\n");
-      cb.append("s1+="+getId("mz1")+"; s2+="+getId("mz2")+
-                "; l1-="+getId("mz1")+"; l2-="+getId("mz2")+";\n");
-      cb.append("for (int "+getId("midx")+" = 0; "+getId("midx")+
-                " < "+getId("mi1")+" && "+getId("midx")+" < "+getId("mi2")+
-                "; "+getId("midx")+"++) {");
-      key.genCompareBytes(cb);
-      value.genSlurpBytes(cb, "b1", "s1", "l1");
-      value.genSlurpBytes(cb, "b2", "s2", "l2");
-      cb.append("}\n");
-      cb.append("if ("+getId("mi1")+" != "+getId("mi2")+
-                ") { return ("+getId("mi1")+"<"+getId("mi2")+")?-1:0; }\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-  }
-  
-  class CppMap extends CppCompType {
-    
-    JType.CppType key;
-    JType.CppType value;
-    
-    CppMap(JType.CppType key, JType.CppType value) {
-      super("::std::map< "+key.getType()+", "+ value.getType()+" >");
-      this.key = key;
-      this.value = value;
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::MapTypeID(" + 
-        key.getTypeIDObjectString() + ", " + 
-        value.getTypeIDObjectString() + ")";
-    }
-
-    @Override
-    void genSetRTIFilter(CodeBuffer cb) {
-      key.genSetRTIFilter(cb);
-      value.genSetRTIFilter(cb);
-    }
-
-  }
-  
-  /** Creates a new instance of JMap */
-  public JMap(JType t1, JType t2) {
-    setJavaType(new JavaMap(t1.getJavaType(), t2.getJavaType()));
-    setCppType(new CppMap(t1.getCppType(), t2.getCppType()));
-    setCType(new CType());
-    keyType = t1;
-    valueType = t2;
-  }
-  
-  @Override
-  String getSignature() {
-    return "{" + keyType.getSignature() + valueType.getSignature() +"}";
-  }
-}

+ 0 - 822
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java

@@ -1,822 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JRecord extends JCompType {
-  
-  class JavaRecord extends JavaCompType {
-    
-    private String fullName;
-    private String name;
-    private String module;
-    private ArrayList<JField<JavaType>> fields =
-      new ArrayList<JField<JavaType>>();
-    
-    JavaRecord(String name, ArrayList<JField<JType>> flist) {
-      super(name, "Record", name, "TypeID.RIOType.STRUCT");
-      this.fullName = name;
-      int idx = name.lastIndexOf('.');
-      this.name = name.substring(idx+1);
-      this.module = name.substring(0, idx);
-      for (Iterator<JField<JType>> iter = flist.iterator(); iter.hasNext();) {
-        JField<JType> f = iter.next();
-        fields.add(new JField<JavaType>(f.getName(), f.getType().getJavaType()));
-      }
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new org.apache.hadoop.record.meta.StructTypeID(" + 
-      fullName + ".getTypeInfo())";
-    }
-
-    @Override
-    void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) {
-      // ignore, if we'ev already set the type filter for this record
-      if (!nestedStructMap.containsKey(fullName)) {
-        // we set the RTI filter here
-        cb.append(fullName + ".setTypeFilter(rti.getNestedStructTypeInfo(\""+
-            name + "\"));\n");
-        nestedStructMap.put(fullName, null);
-      }
-    }
-
-    // for each typeInfo in the filter, we see if there's a similar one in the record. 
-    // Since we store typeInfos in ArrayLists, thsi search is O(n squared). We do it faster
-    // if we also store a map (of TypeInfo to index), but since setupRtiFields() is called
-    // only once when deserializing, we're sticking with the former, as the code is easier.  
-    void genSetupRtiFields(CodeBuffer cb) {
-      cb.append("private static void setupRtiFields()\n{\n");
-      cb.append("if (null == " + Consts.RTI_FILTER + ") return;\n");
-      cb.append("// we may already have done this\n");
-      cb.append("if (null != " + Consts.RTI_FILTER_FIELDS + ") return;\n");
-      cb.append("int " + Consts.RIO_PREFIX + "i, " + Consts.RIO_PREFIX + "j;\n");
-      cb.append(Consts.RTI_FILTER_FIELDS + " = new int [" + 
-          Consts.RIO_PREFIX + "rtiFilter.getFieldTypeInfos().size()];\n");
-      cb.append("for (" + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<"+
-          Consts.RTI_FILTER_FIELDS + ".length; " + Consts.RIO_PREFIX + "i++) {\n");
-      cb.append(Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = 0;\n");
-      cb.append("}\n");
-      cb.append("java.util.Iterator<org.apache.hadoop.record.meta." +
-          "FieldTypeInfo> " + Consts.RIO_PREFIX + "itFilter = " + 
-          Consts.RIO_PREFIX + "rtiFilter.getFieldTypeInfos().iterator();\n");
-      cb.append(Consts.RIO_PREFIX + "i=0;\n");
-      cb.append("while (" + Consts.RIO_PREFIX + "itFilter.hasNext()) {\n");
-      cb.append("org.apache.hadoop.record.meta.FieldTypeInfo " + 
-          Consts.RIO_PREFIX + "tInfoFilter = " + 
-          Consts.RIO_PREFIX + "itFilter.next();\n");
-      cb.append("java.util.Iterator<org.apache.hadoop.record.meta." + 
-          "FieldTypeInfo> " + Consts.RIO_PREFIX + "it = " + Consts.RTI_VAR + 
-          ".getFieldTypeInfos().iterator();\n");
-      cb.append(Consts.RIO_PREFIX + "j=1;\n");
-      cb.append("while (" + Consts.RIO_PREFIX + "it.hasNext()) {\n");
-      cb.append("org.apache.hadoop.record.meta.FieldTypeInfo " + 
-          Consts.RIO_PREFIX + "tInfo = " + Consts.RIO_PREFIX + "it.next();\n");
-      cb.append("if (" + Consts.RIO_PREFIX + "tInfo.equals(" +  
-          Consts.RIO_PREFIX + "tInfoFilter)) {\n");
-      cb.append(Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = " +
-          Consts.RIO_PREFIX + "j;\n");
-      cb.append("break;\n");
-      cb.append("}\n");
-      cb.append(Consts.RIO_PREFIX + "j++;\n");
-      cb.append("}\n");
-      /*int ct = 0;
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        ct++;
-        JField<JavaType> jf = i.next();
-        JavaType type = jf.getType();
-        String name = jf.getName();
-        if (ct != 1) {
-          cb.append("else ");
-        }
-        type.genRtiFieldCondition(cb, name, ct);
-      }
-      if (ct != 0) {
-        cb.append("else {\n");
-        cb.append("rtiFilterFields[i] = 0;\n");
-        cb.append("}\n");
-      }*/
-      cb.append(Consts.RIO_PREFIX + "i++;\n");
-      cb.append("}\n");
-      cb.append("}\n");
-    }
-
-    @Override
-    void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) {
-      if (decl) {
-        cb.append(fullName+" "+fname+";\n");
-      }
-      cb.append(fname+"= new "+fullName+"();\n");
-      cb.append(fname+".deserialize(" + Consts.RECORD_INPUT + ",\""+tag+"\");\n");
-    }
-    
-    @Override
-    void genWriteMethod(CodeBuffer cb, String fname, String tag) {
-      cb.append(fname+".serialize(" + Consts.RECORD_OUTPUT + ",\""+tag+"\");\n");
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("int r = "+fullName+
-                ".Comparator.slurpRaw("+b+","+s+","+l+");\n");
-      cb.append(s+"+=r; "+l+"-=r;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("int r1 = "+fullName+
-                ".Comparator.compareRaw(b1,s1,l1,b2,s2,l2);\n");
-      cb.append("if (r1 <= 0) { return r1; }\n");
-      cb.append("s1+=r1; s2+=r1; l1-=r1; l2-=r1;\n");
-      cb.append("}\n");
-    }
-    
-    void genCode(String destDir, ArrayList<String> options) throws IOException {
-      String pkg = module;
-      String pkgpath = pkg.replaceAll("\\.", "/");
-      File pkgdir = new File(destDir, pkgpath);
-
-      final File jfile = new File(pkgdir, name+".java");
-      if (!pkgdir.exists()) {
-        // create the pkg directory
-        boolean ret = pkgdir.mkdirs();
-        if (!ret) {
-          throw new IOException("Cannnot create directory: "+pkgpath);
-        }
-      } else if (!pkgdir.isDirectory()) {
-        // not a directory
-        throw new IOException(pkgpath+" is not a directory.");
-      }
-
-      CodeBuffer cb = new CodeBuffer();
-      cb.append("// File generated by hadoop record compiler. Do not edit.\n");
-      cb.append("package "+module+";\n\n");
-      cb.append("public class "+name+
-                " extends org.apache.hadoop.record.Record {\n");
-      
-      // type information declarations
-      cb.append("private static final " + 
-          "org.apache.hadoop.record.meta.RecordTypeInfo " + 
-          Consts.RTI_VAR + ";\n");
-      cb.append("private static " + 
-          "org.apache.hadoop.record.meta.RecordTypeInfo " + 
-          Consts.RTI_FILTER + ";\n");
-      cb.append("private static int[] " + Consts.RTI_FILTER_FIELDS + ";\n");
-      
-      // static init for type information
-      cb.append("static {\n");
-      cb.append(Consts.RTI_VAR + " = " +
-          "new org.apache.hadoop.record.meta.RecordTypeInfo(\"" +
-          name + "\");\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genStaticTypeInfo(cb, name);
-      }
-      cb.append("}\n\n");
-
-      // field definitions
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genDecl(cb, name);
-      }
-
-      // default constructor
-      cb.append("public "+name+"() { }\n");
-      
-      // constructor
-      cb.append("public "+name+"(\n");
-      int fIdx = 0;
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext(); fIdx++) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genConstructorParam(cb, name);
-        cb.append((!i.hasNext())?"":",\n");
-      }
-      cb.append(") {\n");
-      fIdx = 0;
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext(); fIdx++) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genConstructorSet(cb, name);
-      }
-      cb.append("}\n");
-
-      // getter/setter for type info
-      cb.append("public static org.apache.hadoop.record.meta.RecordTypeInfo"
-              + " getTypeInfo() {\n");
-      cb.append("return " + Consts.RTI_VAR + ";\n");
-      cb.append("}\n");
-      cb.append("public static void setTypeFilter("
-          + "org.apache.hadoop.record.meta.RecordTypeInfo rti) {\n");
-      cb.append("if (null == rti) return;\n");
-      cb.append(Consts.RTI_FILTER + " = rti;\n");
-      cb.append(Consts.RTI_FILTER_FIELDS + " = null;\n");
-      // set RTIFilter for nested structs.
-      // To prevent setting up the type filter for the same struct more than once, 
-      // we use a hash map to keep track of what we've set. 
-      Map<String, Integer> nestedStructMap = new HashMap<String, Integer>();
-      for (JField<JavaType> jf : fields) {
-        JavaType type = jf.getType();
-        type.genSetRTIFilter(cb, nestedStructMap);
-      }
-      cb.append("}\n");
-
-      // setupRtiFields()
-      genSetupRtiFields(cb);
-
-      // getters/setters for member variables
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genGetSet(cb, name);
-      }
-      
-      // serialize()
-      cb.append("public void serialize("+ 
-          "final org.apache.hadoop.record.RecordOutput " + 
-          Consts.RECORD_OUTPUT + ", final String " + Consts.TAG + ")\n"+
-                "throws java.io.IOException {\n");
-      cb.append(Consts.RECORD_OUTPUT + ".startRecord(this," + Consts.TAG + ");\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genWriteMethod(cb, name, name);
-      }
-      cb.append(Consts.RECORD_OUTPUT + ".endRecord(this," + Consts.TAG+");\n");
-      cb.append("}\n");
-
-      // deserializeWithoutFilter()
-      cb.append("private void deserializeWithoutFilter("+
-                "final org.apache.hadoop.record.RecordInput " + 
-                Consts.RECORD_INPUT + ", final String " + Consts.TAG + ")\n"+
-                "throws java.io.IOException {\n");
-      cb.append(Consts.RECORD_INPUT + ".startRecord(" + Consts.TAG + ");\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genReadMethod(cb, name, name, false);
-      }
-      cb.append(Consts.RECORD_INPUT + ".endRecord(" + Consts.TAG+");\n");
-      cb.append("}\n");
-      
-      // deserialize()
-      cb.append("public void deserialize(final " +
-          "org.apache.hadoop.record.RecordInput " + 
-          Consts.RECORD_INPUT + ", final String " + Consts.TAG + ")\n"+
-          "throws java.io.IOException {\n");
-      cb.append("if (null == " + Consts.RTI_FILTER + ") {\n");
-      cb.append("deserializeWithoutFilter(" + Consts.RECORD_INPUT + ", " + 
-          Consts.TAG + ");\n");
-      cb.append("return;\n");
-      cb.append("}\n");
-      cb.append("// if we're here, we need to read based on version info\n");
-      cb.append(Consts.RECORD_INPUT + ".startRecord(" + Consts.TAG + ");\n");
-      cb.append("setupRtiFields();\n");
-      cb.append("for (int " + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + 
-          "i<" + Consts.RTI_FILTER + ".getFieldTypeInfos().size(); " + 
-          Consts.RIO_PREFIX + "i++) {\n");
-      int ct = 0;
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        ct++;
-        if (1 != ct) {
-          cb.append("else ");
-        }
-        cb.append("if (" + ct + " == " + Consts.RTI_FILTER_FIELDS + "[" +
-            Consts.RIO_PREFIX + "i]) {\n");
-        type.genReadMethod(cb, name, name, false);
-        cb.append("}\n");
-      }
-      if (0 != ct) {
-        cb.append("else {\n");
-        cb.append("java.util.ArrayList<"
-                + "org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = "
-                + "(java.util.ArrayList<"
-                + "org.apache.hadoop.record.meta.FieldTypeInfo>)"
-                + "(" + Consts.RTI_FILTER + ".getFieldTypeInfos());\n");
-        cb.append("org.apache.hadoop.record.meta.Utils.skip(" + 
-            Consts.RECORD_INPUT + ", " + "typeInfos.get(" + Consts.RIO_PREFIX + 
-            "i).getFieldID(), typeInfos.get(" + 
-            Consts.RIO_PREFIX + "i).getTypeID());\n");
-        cb.append("}\n");
-      }
-      cb.append("}\n");
-      cb.append(Consts.RECORD_INPUT + ".endRecord(" + Consts.TAG+");\n");
-      cb.append("}\n");
-
-      // compareTo()
-      cb.append("public int compareTo (final Object " + Consts.RIO_PREFIX + 
-          "peer_) throws ClassCastException {\n");
-      cb.append("if (!(" + Consts.RIO_PREFIX + "peer_ instanceof "+name+")) {\n");
-      cb.append("throw new ClassCastException(\"Comparing different types of records.\");\n");
-      cb.append("}\n");
-      cb.append(name+" " + Consts.RIO_PREFIX + "peer = ("+name+") " + 
-          Consts.RIO_PREFIX + "peer_;\n");
-      cb.append("int " + Consts.RIO_PREFIX + "ret = 0;\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genCompareTo(cb, name, Consts.RIO_PREFIX + "peer."+name);
-        cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) return " + 
-            Consts.RIO_PREFIX + "ret;\n");
-      }
-      cb.append("return " + Consts.RIO_PREFIX + "ret;\n");
-      cb.append("}\n");
-      
-      // equals()
-      cb.append("public boolean equals(final Object " + Consts.RIO_PREFIX + 
-          "peer_) {\n");
-      cb.append("if (!(" + Consts.RIO_PREFIX + "peer_ instanceof "+name+")) {\n");
-      cb.append("return false;\n");
-      cb.append("}\n");
-      cb.append("if (" + Consts.RIO_PREFIX + "peer_ == this) {\n");
-      cb.append("return true;\n");
-      cb.append("}\n");
-      cb.append(name+" " + Consts.RIO_PREFIX + "peer = ("+name+") " + 
-          Consts.RIO_PREFIX + "peer_;\n");
-      cb.append("boolean " + Consts.RIO_PREFIX + "ret = false;\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genEquals(cb, name, Consts.RIO_PREFIX + "peer."+name);
-        cb.append("if (!" + Consts.RIO_PREFIX + "ret) return " + 
-            Consts.RIO_PREFIX + "ret;\n");
-      }
-      cb.append("return " + Consts.RIO_PREFIX + "ret;\n");
-      cb.append("}\n");
-
-      // clone()
-      cb.append("public Object clone() throws CloneNotSupportedException {\n");
-      cb.append(name+" " + Consts.RIO_PREFIX + "other = new "+name+"();\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genClone(cb, name);
-      }
-      cb.append("return " + Consts.RIO_PREFIX + "other;\n");
-      cb.append("}\n");
-      
-      cb.append("public int hashCode() {\n");
-      cb.append("int " + Consts.RIO_PREFIX + "result = 17;\n");
-      cb.append("int " + Consts.RIO_PREFIX + "ret;\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genHashCode(cb, name);
-        cb.append(Consts.RIO_PREFIX + "result = 37*" + Consts.RIO_PREFIX + 
-            "result + " + Consts.RIO_PREFIX + "ret;\n");
-      }
-      cb.append("return " + Consts.RIO_PREFIX + "result;\n");
-      cb.append("}\n");
-      
-      cb.append("public static String signature() {\n");
-      cb.append("return \""+getSignature()+"\";\n");
-      cb.append("}\n");
-      
-      cb.append("public static class Comparator extends"+
-                " org.apache.hadoop.record.RecordComparator {\n");
-      cb.append("public Comparator() {\n");
-      cb.append("super("+name+".class);\n");
-      cb.append("}\n");
-      
-      cb.append("static public int slurpRaw(byte[] b, int s, int l) {\n");
-      cb.append("try {\n");
-      cb.append("int os = s;\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genSlurpBytes(cb, "b","s","l");
-      }
-      cb.append("return (os - s);\n");
-      cb.append("} catch(java.io.IOException e) {\n");
-      cb.append("throw new RuntimeException(e);\n");
-      cb.append("}\n");
-      cb.append("}\n");
-      
-      cb.append("static public int compareRaw(byte[] b1, int s1, int l1,\n");
-      cb.append("                             byte[] b2, int s2, int l2) {\n");
-      cb.append("try {\n");
-      cb.append("int os1 = s1;\n");
-      for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) {
-        JField<JavaType> jf = i.next();
-        String name = jf.getName();
-        JavaType type = jf.getType();
-        type.genCompareBytes(cb);
-      }
-      cb.append("return (os1 - s1);\n");
-      cb.append("} catch(java.io.IOException e) {\n");
-      cb.append("throw new RuntimeException(e);\n");
-      cb.append("}\n");
-      cb.append("}\n");
-      cb.append("public int compare(byte[] b1, int s1, int l1,\n");
-      cb.append("                   byte[] b2, int s2, int l2) {\n");
-      cb.append("int ret = compareRaw(b1,s1,l1,b2,s2,l2);\n");
-      cb.append("return (ret == -1)? -1 : ((ret==0)? 1 : 0);");
-      cb.append("}\n");
-      cb.append("}\n\n");
-      cb.append("static {\n");
-      cb.append("org.apache.hadoop.record.RecordComparator.define("
-                +name+".class, new Comparator());\n");
-      cb.append("}\n");
-      cb.append("}\n");
-
-      FileWriter jj = new FileWriter(jfile);
-      try {
-        jj.write(cb.toString());
-      } finally {
-        jj.close();
-      }
-    }
-  }
-  
-  class CppRecord extends CppCompType {
-    
-    private String fullName;
-    private String name;
-    private String module;
-    private ArrayList<JField<CppType>> fields = 
-      new ArrayList<JField<CppType>>();
-    
-    CppRecord(String name, ArrayList<JField<JType>> flist) {
-      super(name.replaceAll("\\.","::"));
-      this.fullName = name.replaceAll("\\.", "::");
-      int idx = name.lastIndexOf('.');
-      this.name = name.substring(idx+1);
-      this.module = name.substring(0, idx).replaceAll("\\.", "::");
-      for (Iterator<JField<JType>> iter = flist.iterator(); iter.hasNext();) {
-        JField<JType> f = iter.next();
-        fields.add(new JField<CppType>(f.getName(), f.getType().getCppType()));
-      }
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::StructTypeID(" + 
-      fullName + "::getTypeInfo().getFieldTypeInfos())";
-    }
-
-    String genDecl(String fname) {
-      return "  "+name+" "+fname+";\n";
-    }
-    
-    @Override
-    void genSetRTIFilter(CodeBuffer cb) {
-      // we set the RTI filter here
-      cb.append(fullName + "::setTypeFilter(rti.getNestedStructTypeInfo(\""+
-          name + "\"));\n");
-    }
-
-    void genSetupRTIFields(CodeBuffer cb) {
-      cb.append("void " + fullName + "::setupRtiFields() {\n");
-      cb.append("if (NULL == p" + Consts.RTI_FILTER + ") return;\n");
-      cb.append("if (NULL != p" + Consts.RTI_FILTER_FIELDS + ") return;\n");
-      cb.append("p" + Consts.RTI_FILTER_FIELDS + " = new int[p" + 
-          Consts.RTI_FILTER + "->getFieldTypeInfos().size()];\n");
-      cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + 
-          Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + 
-          "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n");
-      cb.append("p" + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + 
-          "i] = 0;\n");
-      cb.append("}\n");
-      cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + 
-          Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + 
-          "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n");
-      cb.append("for (unsigned int " + Consts.RIO_PREFIX + "j=0; " + 
-          Consts.RIO_PREFIX + "j<p" + Consts.RTI_VAR + 
-          "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "j++) {\n");
-      cb.append("if (*(p" + Consts.RTI_FILTER + "->getFieldTypeInfos()[" + 
-          Consts.RIO_PREFIX + "i]) == *(p" + Consts.RTI_VAR + 
-          "->getFieldTypeInfos()[" + Consts.RIO_PREFIX + "j])) {\n");
-      cb.append("p" + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + 
-          "i] = " + Consts.RIO_PREFIX + "j+1;\n");
-      cb.append("break;\n");
-      cb.append("}\n");
-      cb.append("}\n");
-      cb.append("}\n");
-      cb.append("}\n");
-    }
-    
-    void genCode(FileWriter hh, FileWriter cc, ArrayList<String> options)
-      throws IOException {
-      CodeBuffer hb = new CodeBuffer();
-      
-      String[] ns = module.split("::");
-      for (int i = 0; i < ns.length; i++) {
-        hb.append("namespace "+ns[i]+" {\n");
-      }
-      
-      hb.append("class "+name+" : public ::hadoop::Record {\n");
-      hb.append("private:\n");
-      
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        type.genDecl(hb, name);
-      }
-      
-      // type info vars
-      hb.append("static ::hadoop::RecordTypeInfo* p" + Consts.RTI_VAR + ";\n");
-      hb.append("static ::hadoop::RecordTypeInfo* p" + Consts.RTI_FILTER + ";\n");
-      hb.append("static int* p" + Consts.RTI_FILTER_FIELDS + ";\n");
-      hb.append("static ::hadoop::RecordTypeInfo* setupTypeInfo();\n");
-      hb.append("static void setupRtiFields();\n");
-      hb.append("virtual void deserializeWithoutFilter(::hadoop::IArchive& " + 
-          Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ");\n");
-      hb.append("public:\n");
-      hb.append("static const ::hadoop::RecordTypeInfo& getTypeInfo() " +
-          "{return *p" + Consts.RTI_VAR + ";}\n");
-      hb.append("static void setTypeFilter(const ::hadoop::RecordTypeInfo& rti);\n");
-      hb.append("static void setTypeFilter(const ::hadoop::RecordTypeInfo* prti);\n");
-      hb.append("virtual void serialize(::hadoop::OArchive& " + 
-          Consts.RECORD_OUTPUT + ", const char* " + Consts.TAG + ") const;\n");
-      hb.append("virtual void deserialize(::hadoop::IArchive& " + 
-          Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ");\n");
-      hb.append("virtual const ::std::string& type() const;\n");
-      hb.append("virtual const ::std::string& signature() const;\n");
-      hb.append("virtual bool operator<(const "+name+"& peer_) const;\n");
-      hb.append("virtual bool operator==(const "+name+"& peer_) const;\n");
-      hb.append("virtual ~"+name+"() {};\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        type.genGetSet(hb, name);
-      }
-      hb.append("}; // end record "+name+"\n");
-      for (int i=ns.length-1; i>=0; i--) {
-        hb.append("} // end namespace "+ns[i]+"\n");
-      }
-      
-      hh.write(hb.toString());
-      
-      CodeBuffer cb = new CodeBuffer();
-
-      // initialize type info vars
-      cb.append("::hadoop::RecordTypeInfo* " + fullName + "::p" + 
-          Consts.RTI_VAR + " = " + fullName + "::setupTypeInfo();\n");
-      cb.append("::hadoop::RecordTypeInfo* " + fullName + "::p" + 
-          Consts.RTI_FILTER + " = NULL;\n");
-      cb.append("int* " + fullName + "::p" + 
-          Consts.RTI_FILTER_FIELDS + " = NULL;\n\n");
-
-      // setupTypeInfo()
-      cb.append("::hadoop::RecordTypeInfo* "+fullName+"::setupTypeInfo() {\n");
-      cb.append("::hadoop::RecordTypeInfo* p = new ::hadoop::RecordTypeInfo(\"" + 
-          name + "\");\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        type.genStaticTypeInfo(cb, name);
-      }
-      cb.append("return p;\n");
-      cb.append("}\n");
-
-      // setTypeFilter()
-      cb.append("void "+fullName+"::setTypeFilter(const " +
-          "::hadoop::RecordTypeInfo& rti) {\n");
-      cb.append("if (NULL != p" + Consts.RTI_FILTER + ") {\n");
-      cb.append("delete p" + Consts.RTI_FILTER + ";\n");
-      cb.append("}\n");
-      cb.append("p" + Consts.RTI_FILTER + " = new ::hadoop::RecordTypeInfo(rti);\n");
-      cb.append("if (NULL != p" + Consts.RTI_FILTER_FIELDS + ") {\n");
-      cb.append("delete p" + Consts.RTI_FILTER_FIELDS + ";\n");
-      cb.append("}\n");
-      cb.append("p" + Consts.RTI_FILTER_FIELDS + " = NULL;\n");
-      // set RTIFilter for nested structs. We may end up with multiple lines that 
-      // do the same thing, if the same struct is nested in more than one field, 
-      // but that's OK. 
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        CppType type = jf.getType();
-        type.genSetRTIFilter(cb);
-      }
-      cb.append("}\n");
-      
-      // setTypeFilter()
-      cb.append("void "+fullName+"::setTypeFilter(const " +
-          "::hadoop::RecordTypeInfo* prti) {\n");
-      cb.append("if (NULL != prti) {\n");
-      cb.append("setTypeFilter(*prti);\n");
-      cb.append("}\n");
-      cb.append("}\n");
-
-      // setupRtiFields()
-      genSetupRTIFields(cb);
-
-      // serialize()
-      cb.append("void "+fullName+"::serialize(::hadoop::OArchive& " + 
-          Consts.RECORD_OUTPUT + ", const char* " + Consts.TAG + ") const {\n");
-      cb.append(Consts.RECORD_OUTPUT + ".startRecord(*this," + 
-          Consts.TAG + ");\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        if (type instanceof JBuffer.CppBuffer) {
-          cb.append(Consts.RECORD_OUTPUT + ".serialize("+name+","+name+
-              ".length(),\""+name+"\");\n");
-        } else {
-          cb.append(Consts.RECORD_OUTPUT + ".serialize("+name+",\""+
-              name+"\");\n");
-        }
-      }
-      cb.append(Consts.RECORD_OUTPUT + ".endRecord(*this," + Consts.TAG + ");\n");
-      cb.append("return;\n");
-      cb.append("}\n");
-      
-      // deserializeWithoutFilter()
-      cb.append("void "+fullName+"::deserializeWithoutFilter(::hadoop::IArchive& " +
-          Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ") {\n");
-      cb.append(Consts.RECORD_INPUT + ".startRecord(*this," + 
-          Consts.TAG + ");\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        if (type instanceof JBuffer.CppBuffer) {
-          cb.append("{\nsize_t len=0; " + Consts.RECORD_INPUT + ".deserialize("+
-              name+",len,\""+name+"\");\n}\n");
-        } else {
-          cb.append(Consts.RECORD_INPUT + ".deserialize("+name+",\""+
-              name+"\");\n");
-        }
-      }
-      cb.append(Consts.RECORD_INPUT + ".endRecord(*this," + Consts.TAG + ");\n");
-      cb.append("return;\n");
-      cb.append("}\n");
-      
-      // deserialize()
-      cb.append("void "+fullName+"::deserialize(::hadoop::IArchive& " +
-          Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ") {\n");
-      cb.append("if (NULL == p" + Consts.RTI_FILTER + ") {\n");
-      cb.append("deserializeWithoutFilter(" + Consts.RECORD_INPUT + ", " + 
-          Consts.TAG + ");\n");
-      cb.append("return;\n");
-      cb.append("}\n");
-      cb.append("// if we're here, we need to read based on version info\n");
-      cb.append(Consts.RECORD_INPUT + ".startRecord(*this," + 
-          Consts.TAG + ");\n");
-      cb.append("setupRtiFields();\n");
-      cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + 
-          Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + 
-          "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n");
-      int ct = 0;
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        CppType type = jf.getType();
-        ct++;
-        if (1 != ct) {
-          cb.append("else ");
-        }
-        cb.append("if (" + ct + " == p" + Consts.RTI_FILTER_FIELDS + "[" +
-            Consts.RIO_PREFIX + "i]) {\n");
-        if (type instanceof JBuffer.CppBuffer) {
-          cb.append("{\nsize_t len=0; " + Consts.RECORD_INPUT + ".deserialize("+
-              name+",len,\""+name+"\");\n}\n");
-        } else {
-          cb.append(Consts.RECORD_INPUT + ".deserialize("+name+",\""+
-              name+"\");\n");
-        }
-        cb.append("}\n");
-      }
-      if (0 != ct) {
-        cb.append("else {\n");
-        cb.append("const std::vector< ::hadoop::FieldTypeInfo* >& typeInfos = p" + 
-            Consts.RTI_FILTER + "->getFieldTypeInfos();\n");
-        cb.append("::hadoop::Utils::skip(" + Consts.RECORD_INPUT + 
-            ", typeInfos[" + Consts.RIO_PREFIX + "i]->getFieldID()->c_str()" + 
-            ", *(typeInfos[" + Consts.RIO_PREFIX + "i]->getTypeID()));\n");
-        cb.append("}\n");
-      }
-      cb.append("}\n");
-      cb.append(Consts.RECORD_INPUT + ".endRecord(*this, " + Consts.TAG+");\n");
-      cb.append("}\n");
-
-      // operator <
-      cb.append("bool "+fullName+"::operator< (const "+fullName+"& peer_) const {\n");
-      cb.append("return (1\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        cb.append("&& ("+name+" < peer_."+name+")\n");
-      }
-      cb.append(");\n");
-      cb.append("}\n");
-      
-      cb.append("bool "+fullName+"::operator== (const "+fullName+"& peer_) const {\n");
-      cb.append("return (1\n");
-      for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) {
-        JField<CppType> jf = i.next();
-        String name = jf.getName();
-        cb.append("&& ("+name+" == peer_."+name+")\n");
-      }
-      cb.append(");\n");
-      cb.append("}\n");
-      
-      cb.append("const ::std::string&"+fullName+"::type() const {\n");
-      cb.append("static const ::std::string type_(\""+name+"\");\n");
-      cb.append("return type_;\n");
-      cb.append("}\n");
-      
-      cb.append("const ::std::string&"+fullName+"::signature() const {\n");
-      cb.append("static const ::std::string sig_(\""+getSignature()+"\");\n");
-      cb.append("return sig_;\n");
-      cb.append("}\n");
-      
-      cc.write(cb.toString());
-    }
-  }
-  
-  class CRecord extends CCompType {
-    
-  }
-  
-  private String signature;
-  
-  /**
-   * Creates a new instance of JRecord
-   */
-  public JRecord(String name, ArrayList<JField<JType>> flist) {
-    setJavaType(new JavaRecord(name, flist));
-    setCppType(new CppRecord(name, flist));
-    setCType(new CRecord());
-    // precompute signature
-    int idx = name.lastIndexOf('.');
-    String recName = name.substring(idx+1);
-    StringBuilder sb = new StringBuilder();
-    sb.append("L").append(recName).append("(");
-    for (Iterator<JField<JType>> i = flist.iterator(); i.hasNext();) {
-      String s = i.next().getType().getSignature();
-      sb.append(s);
-    }
-    sb.append(")");
-    signature = sb.toString();
-  }
-  
-  @Override
-  String getSignature() {
-    return signature;
-  }
-  
-  void genCppCode(FileWriter hh, FileWriter cc, ArrayList<String> options)
-    throws IOException {
-    ((CppRecord)getCppType()).genCode(hh, cc, options);
-  }
-  
-  void genJavaCode(String destDir, ArrayList<String> options)
-    throws IOException {
-    ((JavaRecord)getJavaType()).genCode(destDir, options);
-  }
-}

+ 0 - 96
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java

@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JString extends JCompType {
-    
-  class JavaString extends JavaCompType {
-    
-    JavaString() {
-      super("String", "String", "String", "TypeID.RIOType.STRING");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "org.apache.hadoop.record.meta.TypeID.StringTypeID";
-    }
-
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n");
-      cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n");
-      cb.append(s+"+=(z+i); "+l+"-= (z+i);\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n");
-      cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n");
-      cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n");
-      cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n");
-      cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n");
-      cb.append("int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);\n");
-      cb.append("if (r1 != 0) { return (r1<0)?-1:0; }\n");
-      cb.append("s1+=i1; s2+=i2; l1-=i1; l1-=i2;\n");
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genClone(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "other."+fname+" = this."+fname+";\n");
-    }
-  }
-
-  class CppString extends CppCompType {
-    
-    CppString() {
-      super("::std::string");
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::TypeID(::hadoop::RIOTYPE_STRING)";
-    }
-  }
-  
-  /** Creates a new instance of JString */
-  public JString() {
-    setJavaType(new JavaString());
-    setCppType(new CppString());
-    setCType(new CCompType());
-  }
-    
-  @Override
-  String getSignature() {
-    return "s";
-  }
-}

+ 0 - 230
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JType.java

@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * Abstract Base class for all types supported by Hadoop Record I/O.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-abstract public class JType {
-  
-  static String toCamelCase(String name) {
-    char firstChar = name.charAt(0);
-    if (Character.isLowerCase(firstChar)) {
-      return ""+Character.toUpperCase(firstChar) + name.substring(1);
-    }
-    return name;
-  }
-  
-  JavaType javaType;
-  CppType cppType;
-  CType cType;
-  
-  abstract class JavaType {
-    private String name;
-    private String methodSuffix;
-    private String wrapper;
-    private String typeIDByteString; // points to TypeID.RIOType 
-    
-    JavaType(String javaname,
-        String suffix,
-        String wrapper, 
-        String typeIDByteString) { 
-      this.name = javaname;
-      this.methodSuffix = suffix;
-      this.wrapper = wrapper;
-      this.typeIDByteString = typeIDByteString;
-    }
-
-    void genDecl(CodeBuffer cb, String fname) {
-      cb.append("private "+name+" "+fname+";\n");
-    }
-    
-    void genStaticTypeInfo(CodeBuffer cb, String fname) {
-      cb.append(Consts.RTI_VAR + ".addField(\"" + fname + "\", " +
-          getTypeIDObjectString() + ");\n");
-    }
-    
-    abstract String getTypeIDObjectString();
-    
-    void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) {
-      // do nothing by default
-      return;
-    }
-
-    /*void genRtiFieldCondition(CodeBuffer cb, String fname, int ct) {
-      cb.append("if ((tInfo.fieldID.equals(\"" + fname + "\")) && (typeVal ==" +
-          " org.apache.hadoop.record.meta." + getTypeIDByteString() + ")) {\n");
-      cb.append("rtiFilterFields[i] = " + ct + ";\n");
-      cb.append("}\n");
-    }
-
-    void genRtiNestedFieldCondition(CodeBuffer cb, String varName, int ct) {
-      cb.append("if (" + varName + ".getElementTypeID().getTypeVal() == " +
-          "org.apache.hadoop.record.meta." + getTypeIDByteString() + 
-          ") {\n");
-      cb.append("rtiFilterFields[i] = " + ct + ";\n");
-      cb.append("}\n");  
-    }*/
-
-    void genConstructorParam(CodeBuffer cb, String fname) {
-      cb.append("final "+name+" "+fname);
-    }
-    
-    void genGetSet(CodeBuffer cb, String fname) {
-      cb.append("public "+name+" get"+toCamelCase(fname)+"() {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-      cb.append("public void set"+toCamelCase(fname)+"(final "+name+" "+fname+") {\n");
-      cb.append("this."+fname+"="+fname+";\n");
-      cb.append("}\n");
-    }
-    
-    String getType() {
-      return name;
-    }
-    
-    String getWrapperType() {
-      return wrapper;
-    }
-    
-    String getMethodSuffix() {
-      return methodSuffix;
-    }
-    
-    String getTypeIDByteString() {
-      return typeIDByteString;
-    }
-    
-    void genWriteMethod(CodeBuffer cb, String fname, String tag) {
-      cb.append(Consts.RECORD_OUTPUT + ".write"+methodSuffix + 
-          "("+fname+",\""+tag+"\");\n");
-    }
-    
-    void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) {
-      if (decl) {
-        cb.append(name+" "+fname+";\n");
-      }
-      cb.append(fname+"=" + Consts.RECORD_INPUT + ".read" + 
-          methodSuffix+"(\""+tag+"\");\n");
-    }
-    
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      cb.append(Consts.RIO_PREFIX + "ret = ("+fname+" == "+other+")? 0 :(("+
-          fname+"<"+other+")?-1:1);\n");
-    }
-    
-    abstract void genCompareBytes(CodeBuffer cb);
-    
-    abstract void genSlurpBytes(CodeBuffer cb, String b, String s, String l);
-    
-    void genEquals(CodeBuffer cb, String fname, String peer) {
-      cb.append(Consts.RIO_PREFIX + "ret = ("+fname+"=="+peer+");\n");
-    }
-    
-    void genHashCode(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "ret = (int)"+fname+";\n");
-    }
-    
-    void genConstructorSet(CodeBuffer cb, String fname) {
-      cb.append("this."+fname+" = "+fname+";\n");
-    }
-    
-    void genClone(CodeBuffer cb, String fname) {
-      cb.append(Consts.RIO_PREFIX + "other."+fname+" = this."+fname+";\n");
-    }
-  }
-  
-  abstract class CppType {
-    private String name;
-    
-    CppType(String cppname) {
-      name = cppname;
-    }
-    
-    void genDecl(CodeBuffer cb, String fname) {
-      cb.append(name+" "+fname+";\n");
-    }
-    
-    void genStaticTypeInfo(CodeBuffer cb, String fname) {
-      cb.append("p->addField(new ::std::string(\"" + 
-          fname + "\"), " + getTypeIDObjectString() + ");\n");
-    }
-    
-    void genGetSet(CodeBuffer cb, String fname) {
-      cb.append("virtual "+name+" get"+toCamelCase(fname)+"() const {\n");
-      cb.append("return "+fname+";\n");
-      cb.append("}\n");
-      cb.append("virtual void set"+toCamelCase(fname)+"("+name+" m_) {\n");
-      cb.append(fname+"=m_;\n");
-      cb.append("}\n");
-    }
-    
-    abstract String getTypeIDObjectString();
-
-    void genSetRTIFilter(CodeBuffer cb) {
-      // do nothing by default
-      return;
-    }
-
-    String getType() {
-      return name;
-    }
-  }
-  
-  class CType {
-    
-  }
-  
-  abstract String getSignature();
-  
-  void setJavaType(JavaType jType) {
-    this.javaType = jType;
-  }
-  
-  JavaType getJavaType() {
-    return javaType;
-  }
-  
-  void setCppType(CppType cppType) {
-    this.cppType = cppType;
-  }
-  
-  CppType getCppType() {
-    return cppType;
-  }
-  
-  void setCType(CType cType) {
-    this.cType = cType;
-  }
-  
-  CType getCType() {
-    return cType;
-  }
-}

+ 0 - 214
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java

@@ -1,214 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class JVector extends JCompType {
-  
-  static private int level = 0;
-  
-  static private String getId(String id) { return id+getLevel(); }
-  
-  static private String getLevel() { return Integer.toString(level); }
-  
-  static private void incrLevel() { level++; }
-  
-  static private void decrLevel() { level--; }
-  
-  private JType type;
-  
-  class JavaVector extends JavaCompType {
-    
-    private JType.JavaType element;
-    
-    JavaVector(JType.JavaType t) {
-      super("java.util.ArrayList<"+t.getWrapperType()+">",
-            "Vector", "java.util.ArrayList<"+t.getWrapperType()+">",
-            "TypeID.RIOType.VECTOR");
-      element = t;
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new org.apache.hadoop.record.meta.VectorTypeID(" + 
-      element.getTypeIDObjectString() + ")";
-    }
-
-    @Override
-    void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) {
-      element.genSetRTIFilter(cb, nestedStructMap);
-    }
-
-    @Override
-    void genCompareTo(CodeBuffer cb, String fname, String other) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append("int "+getId(Consts.RIO_PREFIX + "len1")+" = "+fname+
-          ".size();\n");
-      cb.append("int "+getId(Consts.RIO_PREFIX + "len2")+" = "+other+
-          ".size();\n");
-      cb.append("for(int "+getId(Consts.RIO_PREFIX + "vidx")+" = 0; "+
-          getId(Consts.RIO_PREFIX + "vidx")+"<"+getId(Consts.RIO_PREFIX + "len1")+
-          " && "+getId(Consts.RIO_PREFIX + "vidx")+"<"+
-          getId(Consts.RIO_PREFIX + "len2")+"; "+
-          getId(Consts.RIO_PREFIX + "vidx")+"++) {\n");
-      cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e1")+
-                " = "+fname+
-                ".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n");
-      cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e2")+
-                " = "+other+
-                ".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n");
-      element.genCompareTo(cb, getId(Consts.RIO_PREFIX + "e1"), 
-          getId(Consts.RIO_PREFIX + "e2"));
-      cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) { return " +
-          Consts.RIO_PREFIX + "ret; }\n");
-      cb.append("}\n");
-      cb.append(Consts.RIO_PREFIX + "ret = ("+getId(Consts.RIO_PREFIX + "len1")+
-          " - "+getId(Consts.RIO_PREFIX + "len2")+");\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) {
-      if (decl) {
-        cb.append(getType()+" "+fname+";\n");
-      }
-      cb.append("{\n");
-      incrLevel();
-      cb.append("org.apache.hadoop.record.Index "+
-          getId(Consts.RIO_PREFIX + "vidx")+" = " + 
-          Consts.RECORD_INPUT + ".startVector(\""+tag+"\");\n");
-      cb.append(fname+"=new "+getType()+"();\n");
-      cb.append("for (; !"+getId(Consts.RIO_PREFIX + "vidx")+".done(); " + 
-          getId(Consts.RIO_PREFIX + "vidx")+".incr()) {\n");
-      element.genReadMethod(cb, getId(Consts.RIO_PREFIX + "e"), 
-          getId(Consts.RIO_PREFIX + "e"), true);
-      cb.append(fname+".add("+getId(Consts.RIO_PREFIX + "e")+");\n");
-      cb.append("}\n");
-      cb.append(Consts.RECORD_INPUT + ".endVector(\""+tag+"\");\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genWriteMethod(CodeBuffer cb, String fname, String tag) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append(Consts.RECORD_OUTPUT + ".startVector("+fname+",\""+tag+"\");\n");
-      cb.append("int "+getId(Consts.RIO_PREFIX + "len")+" = "+fname+".size();\n");
-      cb.append("for(int "+getId(Consts.RIO_PREFIX + "vidx")+" = 0; " + 
-          getId(Consts.RIO_PREFIX + "vidx")+"<"+getId(Consts.RIO_PREFIX + "len")+
-          "; "+getId(Consts.RIO_PREFIX + "vidx")+"++) {\n");
-      cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e")+" = "+
-          fname+".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n");
-      element.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "e"), 
-          getId(Consts.RIO_PREFIX + "e"));
-      cb.append("}\n");
-      cb.append(Consts.RECORD_OUTPUT + ".endVector("+fname+",\""+tag+"\");\n");
-      cb.append("}\n");
-      decrLevel();
-    }
-    
-    @Override
-    void genSlurpBytes(CodeBuffer cb, String b, String s, String l) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append("int "+getId("vi")+
-                " = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n");
-      cb.append("int "+getId("vz")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi")+");\n");
-      cb.append(s+"+="+getId("vz")+"; "+l+"-="+getId("vz")+";\n");
-      cb.append("for (int "+getId("vidx")+" = 0; "+getId("vidx")+
-                " < "+getId("vi")+"; "+getId("vidx")+"++)");
-      element.genSlurpBytes(cb, b, s, l);
-      decrLevel();
-      cb.append("}\n");
-    }
-    
-    @Override
-    void genCompareBytes(CodeBuffer cb) {
-      cb.append("{\n");
-      incrLevel();
-      cb.append("int "+getId("vi1")+
-                " = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n");
-      cb.append("int "+getId("vi2")+
-                " = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n");
-      cb.append("int "+getId("vz1")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi1")+");\n");
-      cb.append("int "+getId("vz2")+
-                " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi2")+");\n");
-      cb.append("s1+="+getId("vz1")+"; s2+="+getId("vz2")+
-                "; l1-="+getId("vz1")+"; l2-="+getId("vz2")+";\n");
-      cb.append("for (int "+getId("vidx")+" = 0; "+getId("vidx")+
-                " < "+getId("vi1")+" && "+getId("vidx")+" < "+getId("vi2")+
-                "; "+getId("vidx")+"++)");
-      element.genCompareBytes(cb);
-      cb.append("if ("+getId("vi1")+" != "+getId("vi2")+
-                ") { return ("+getId("vi1")+"<"+getId("vi2")+")?-1:0; }\n");
-      decrLevel();
-      cb.append("}\n");
-    }
-  }
-  
-  class CppVector extends CppCompType {
-    
-    private JType.CppType element;
-    
-    CppVector(JType.CppType t) {
-      super("::std::vector< "+t.getType()+" >");
-      element = t;
-    }
-    
-    @Override
-    String getTypeIDObjectString() {
-      return "new ::hadoop::VectorTypeID(" +    
-      element.getTypeIDObjectString() + ")";
-    }
-
-    @Override
-    void genSetRTIFilter(CodeBuffer cb) {
-      element.genSetRTIFilter(cb);
-    }
-
-  }
-  
-  /** Creates a new instance of JVector */
-  public JVector(JType t) {
-    type = t;
-    setJavaType(new JavaVector(t.getJavaType()));
-    setCppType(new CppVector(t.getCppType()));
-    setCType(new CCompType());
-  }
-  
-  @Override
-  String getSignature() {
-    return "[" + type.getSignature() + "]";
-  }
-}

+ 0 - 51
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java

@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler;
-
-import java.util.ArrayList;
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * Java Code generator front-end for Hadoop record I/O.
- */
-class JavaGenerator extends CodeGenerator {
-  
-  JavaGenerator() {
-  }
-  
-  /**
-   * Generate Java code for records. This method is only a front-end to
-   * JRecord, since one file is generated for each record.
-   *
-   * @param name possibly full pathname to the file
-   * @param ilist included files (as JFile)
-   * @param rlist List of records defined within this file
-   * @param destDir output directory
-   */
-  @Override
-  void genCode(String name, ArrayList<JFile> ilist,
-               ArrayList<JRecord> rlist, String destDir, ArrayList<String> options)
-    throws IOException {
-    for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) {
-      JRecord rec = iter.next();
-      rec.genJavaCode(destDir, options);
-    }
-  }
-}

+ 0 - 145
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java

@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.record.compiler.ant;
-
-import java.io.File;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.compiler.generated.Rcc;
-import org.apache.tools.ant.BuildException;
-import org.apache.tools.ant.DirectoryScanner;
-import org.apache.tools.ant.Project;
-import org.apache.tools.ant.Task;
-import org.apache.tools.ant.types.FileSet;
-
-/**
- * Hadoop record compiler ant Task
- *<p> This task takes the given record definition files and compiles them into
- * java or c++
- * files. It is then up to the user to compile the generated files.
- *
- * <p> The task requires the <code>file</code> or the nested fileset element to be
- * specified. Optional attributes are <code>language</code> (set the output
- * language, default is "java"),
- * <code>destdir</code> (name of the destination directory for generated java/c++
- * code, default is ".") and <code>failonerror</code> (specifies error handling
- * behavior. default is true).
- * <p><h4>Usage</h4>
- * <pre>
- * &lt;recordcc
- *       destdir="${basedir}/gensrc"
- *       language="java"&gt;
- *   &lt;fileset include="**\/*.jr" /&gt;
- * &lt;/recordcc&gt;
- * </pre>
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RccTask extends Task {
-  
-  private String language = "java";
-  private File src;
-  private File dest = new File(".");
-  private final ArrayList<FileSet> filesets = new ArrayList<FileSet>();
-  private boolean failOnError = true;
-  
-  /** Creates a new instance of RccTask */
-  public RccTask() {
-  }
-  
-  /**
-   * Sets the output language option
-   * @param language "java"/"c++"
-   */
-  public void setLanguage(String language) {
-    this.language = language;
-  }
-  
-  /**
-   * Sets the record definition file attribute
-   * @param file record definition file
-   */
-  public void setFile(File file) {
-    this.src = file;
-  }
-  
-  /**
-   * Given multiple files (via fileset), set the error handling behavior
-   * @param flag true will throw build exception in case of failure (default)
-   */
-  public void setFailonerror(boolean flag) {
-    this.failOnError = flag;
-  }
-  
-  /**
-   * Sets directory where output files will be generated
-   * @param dir output directory
-   */
-  public void setDestdir(File dir) {
-    this.dest = dir;
-  }
-  
-  /**
-   * Adds a fileset that can consist of one or more files
-   * @param set Set of record definition files
-   */
-  public void addFileset(FileSet set) {
-    filesets.add(set);
-  }
-  
-  /**
-   * Invoke the Hadoop record compiler on each record definition file
-   */
-  @Override
-  public void execute() throws BuildException {
-    if (src == null && filesets.size()==0) {
-      throw new BuildException("There must be a file attribute or a fileset child element");
-    }
-    if (src != null) {
-      doCompile(src);
-    }
-    Project myProject = getProject();
-    for (int i = 0; i < filesets.size(); i++) {
-      FileSet fs = filesets.get(i);
-      DirectoryScanner ds = fs.getDirectoryScanner(myProject);
-      File dir = fs.getDir(myProject);
-      String[] srcs = ds.getIncludedFiles();
-      for (int j = 0; j < srcs.length; j++) {
-        doCompile(new File(dir, srcs[j]));
-      }
-    }
-  }
-  
-  private void doCompile(File file) throws BuildException {
-    String[] args = new String[5];
-    args[0] = "--language";
-    args[1] = this.language;
-    args[2] = "--destdir";
-    args[3] = this.dest.getPath();
-    args[4] = file.getPath();
-    int retVal = Rcc.driver(args);
-    if (retVal != 0 && failOnError) {
-      throw new BuildException("Hadoop record compiler returned error code "+retVal);
-    }
-  }
-}

+ 0 - 219
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java

@@ -1,219 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 3.0 */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * This exception is thrown when parse errors are encountered.
- * You can explicitly create objects of this exception type by
- * calling the method generateParseException in the generated
- * parser.
- *
- * You can modify this class to customize your error reporting
- * mechanisms so long as you retain the public fields.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class ParseException extends Exception {
-
-  /**
-   * This constructor is used by the method "generateParseException"
-   * in the generated parser.  Calling this constructor generates
-   * a new object of this type with the fields "currentToken",
-   * "expectedTokenSequences", and "tokenImage" set.  The boolean
-   * flag "specialConstructor" is also set to true to indicate that
-   * this constructor was used to create this object.
-   * This constructor calls its super class with the empty string
-   * to force the "toString" method of parent class "Throwable" to
-   * print the error message in the form:
-   *     ParseException: <result of getMessage>
-   */
-  public ParseException(Token currentTokenVal,
-                        int[][] expectedTokenSequencesVal,
-                        String[] tokenImageVal
-                        )
-  {
-    super("");
-    specialConstructor = true;
-    currentToken = currentTokenVal;
-    expectedTokenSequences = expectedTokenSequencesVal;
-    tokenImage = tokenImageVal;
-  }
-
-  /**
-   * The following constructors are for use by you for whatever
-   * purpose you can think of.  Constructing the exception in this
-   * manner makes the exception behave in the normal way - i.e., as
-   * documented in the class "Throwable".  The fields "errorToken",
-   * "expectedTokenSequences", and "tokenImage" do not contain
-   * relevant information.  The JavaCC generated code does not use
-   * these constructors.
-   */
-
-  public ParseException() {
-    super();
-    specialConstructor = false;
-  }
-
-  public ParseException(String message) {
-    super(message);
-    specialConstructor = false;
-  }
-
-  /**
-   * This variable determines which constructor was used to create
-   * this object and thereby affects the semantics of the
-   * "getMessage" method (see below).
-   */
-  protected boolean specialConstructor;
-
-  /**
-   * This is the last token that has been consumed successfully.  If
-   * this object has been created due to a parse error, the token
-   * followng this token will (therefore) be the first error token.
-   */
-  public Token currentToken;
-
-  /**
-   * Each entry in this array is an array of integers.  Each array
-   * of integers represents a sequence of tokens (by their ordinal
-   * values) that is expected at this point of the parse.
-   */
-  public int[][] expectedTokenSequences;
-
-  /**
-   * This is a reference to the "tokenImage" array of the generated
-   * parser within which the parse error occurred.  This array is
-   * defined in the generated ...Constants interface.
-   */
-  public String[] tokenImage;
-
-  /**
-   * This method has the standard behavior when this object has been
-   * created using the standard constructors.  Otherwise, it uses
-   * "currentToken" and "expectedTokenSequences" to generate a parse
-   * error message and returns it.  If this object has been created
-   * due to a parse error, and you do not catch it (it gets thrown
-   * from the parser), then this method is called during the printing
-   * of the final stack trace, and hence the correct error message
-   * gets displayed.
-   */
-  @Override
-  public String getMessage() {
-    if (!specialConstructor) {
-      return super.getMessage();
-    }
-    StringBuffer expected = new StringBuffer();
-    int maxSize = 0;
-    for (int i = 0; i < expectedTokenSequences.length; i++) {
-      if (maxSize < expectedTokenSequences[i].length) {
-        maxSize = expectedTokenSequences[i].length;
-      }
-      for (int j = 0; j < expectedTokenSequences[i].length; j++) {
-        expected.append(tokenImage[expectedTokenSequences[i][j]]).append(" ");
-      }
-      if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
-        expected.append("...");
-      }
-      expected.append(eol).append("    ");
-    }
-    String retval = "Encountered \"";
-    Token tok = currentToken.next;
-    for (int i = 0; i < maxSize; i++) {
-      if (i != 0) retval += " ";
-      if (tok.kind == 0) {
-        retval += tokenImage[0];
-        break;
-      }
-      retval += add_escapes(tok.image);
-      tok = tok.next; 
-    }
-    retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
-    retval += "." + eol;
-    if (expectedTokenSequences.length == 1) {
-      retval += "Was expecting:" + eol + "    ";
-    } else {
-      retval += "Was expecting one of:" + eol + "    ";
-    }
-    retval += expected.toString();
-    return retval;
-  }
-
-  /**
-   * The end of line string for this machine.
-   */
-  protected String eol = System.getProperty("line.separator", "\n");
- 
-  /**
-   * Used to convert raw characters to their escaped version
-   * when these raw version cannot be used as part of an ASCII
-   * string literal.
-   */
-  protected String add_escapes(String str) {
-    StringBuffer retval = new StringBuffer();
-    char ch;
-    for (int i = 0; i < str.length(); i++) {
-      switch (str.charAt(i))
-        {
-        case 0 :
-          continue;
-        case '\b':
-          retval.append("\\b");
-          continue;
-        case '\t':
-          retval.append("\\t");
-          continue;
-        case '\n':
-          retval.append("\\n");
-          continue;
-        case '\f':
-          retval.append("\\f");
-          continue;
-        case '\r':
-          retval.append("\\r");
-          continue;
-        case '\"':
-          retval.append("\\\"");
-          continue;
-        case '\'':
-          retval.append("\\\'");
-          continue;
-        case '\\':
-          retval.append("\\\\");
-          continue;
-        default:
-          if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) {
-            String s = "0000" + Integer.toString(ch, 16);
-            retval.append("\\u" + s.substring(s.length() - 4, s.length()));
-          } else {
-            retval.append(ch);
-          }
-          continue;
-        }
-    }
-    return retval.toString();
-  }
-
-}

+ 0 - 542
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java

@@ -1,542 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. Rcc.java */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.compiler.*;
-import java.util.ArrayList;
-import java.util.Hashtable;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Rcc implements RccConstants {
-  private static String language = "java";
-  private static String destDir = ".";
-  private static ArrayList<String> recFiles = new ArrayList<String>();
-  private static ArrayList<String> cmdargs = new ArrayList<String>();
-  private static JFile curFile;
-  private static Hashtable<String,JRecord> recTab;
-  private static String curDir = ".";
-  private static String curFileName;
-  private static String curModuleName;
-
-  public static void main(String[] args) {
-    System.exit(driver(args));
-  }
-
-  public static void usage() {
-    System.err.println("Usage: rcc --language [java|c++] ddl-files");
-  }
-
-  public static int driver(String[] args) {
-    for (int i=0; i<args.length; i++) {
-      if ("-l".equalsIgnoreCase(args[i]) ||
-          "--language".equalsIgnoreCase(args[i])) {
-        language = args[i+1].toLowerCase();
-        i++;
-      } else if ("-d".equalsIgnoreCase(args[i]) ||
-                 "--destdir".equalsIgnoreCase(args[i])) {
-        destDir = args[i+1];
-        i++;
-      } else if (args[i].startsWith("-")) {
-        String arg = args[i].substring(1);
-        if (arg.startsWith("-")) {
-          arg = arg.substring(1);
-        }
-        cmdargs.add(arg.toLowerCase());
-      } else {
-        recFiles.add(args[i]);
-      }
-    }
-    if (recFiles.size() == 0) {
-      usage();
-      return 1;
-    }
-    for (int i=0; i<recFiles.size(); i++) {
-      curFileName = recFiles.get(i);
-      File file = new File(curFileName);
-      try {
-        FileReader reader = new FileReader(file);
-        Rcc parser = new Rcc(reader);
-        try {
-          recTab = new Hashtable<String,JRecord>();
-          curFile = parser.Input();
-        } catch (ParseException e) {
-          System.err.println(e.toString());
-          return 1;
-        }
-        try {
-          reader.close();
-        } catch (IOException e) {
-        }
-      } catch (FileNotFoundException e) {
-        System.err.println("File " + recFiles.get(i) +
-                           " Not found.");
-        return 1;
-      }
-      try {
-        int retCode = curFile.genCode(language, destDir, cmdargs);
-        if (retCode != 0) { return retCode; }
-      } catch (IOException e) {
-        System.err.println(e.toString());
-        return 1;
-      }
-    }
-    return 0;
-  }
-
-  final public JFile Input() throws ParseException {
-    ArrayList<JFile> ilist = new ArrayList<JFile>();
-    ArrayList<JRecord> rlist = new ArrayList<JRecord>();
-    JFile i;
-    ArrayList<JRecord> l;
-    label_1:
-    while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case INCLUDE_TKN:
-        i = Include();
-        ilist.add(i);
-        break;
-      case MODULE_TKN:
-        l = Module();
-        rlist.addAll(l);
-        break;
-      default:
-        jj_la1[0] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case MODULE_TKN:
-      case INCLUDE_TKN:
-        ;
-        break;
-      default:
-        jj_la1[1] = jj_gen;
-        break label_1;
-      }
-    }
-    jj_consume_token(0);
-    {if (true) return new JFile(curFileName, ilist, rlist);}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JFile Include() throws ParseException {
-    String fname;
-    Token t;
-    jj_consume_token(INCLUDE_TKN);
-    t = jj_consume_token(CSTRING_TKN);
-    JFile ret = null;
-    fname = t.image.replaceAll("^\"", "").replaceAll("\"$","");
-    File file = new File(curDir, fname);
-    String tmpDir = curDir;
-    String tmpFile = curFileName;
-    curDir = file.getParent();
-    curFileName = file.getName();
-    try {
-      FileReader reader = new FileReader(file);
-      Rcc parser = new Rcc(reader);
-      try {
-        ret = parser.Input();
-        System.out.println(fname + " Parsed Successfully");
-      } catch (ParseException e) {
-        System.out.println(e.toString());
-        System.exit(1);
-      }
-      try {
-        reader.close();
-      } catch (IOException e) {
-      }
-    } catch (FileNotFoundException e) {
-      System.out.println("File " + fname +
-                         " Not found.");
-      System.exit(1);
-    }
-    curDir = tmpDir;
-    curFileName = tmpFile;
-    {if (true) return ret;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public ArrayList<JRecord> Module() throws ParseException {
-    String mName;
-    ArrayList<JRecord> rlist;
-    jj_consume_token(MODULE_TKN);
-    mName = ModuleName();
-    curModuleName = mName;
-    jj_consume_token(LBRACE_TKN);
-    rlist = RecordList();
-    jj_consume_token(RBRACE_TKN);
-    {if (true) return rlist;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public String ModuleName() throws ParseException {
-    String name = "";
-    Token t;
-    t = jj_consume_token(IDENT_TKN);
-    name += t.image;
-    label_2:
-    while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case DOT_TKN:
-        ;
-        break;
-      default:
-        jj_la1[2] = jj_gen;
-        break label_2;
-      }
-      jj_consume_token(DOT_TKN);
-      t = jj_consume_token(IDENT_TKN);
-      name += "." + t.image;
-    }
-    {if (true) return name;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public ArrayList<JRecord> RecordList() throws ParseException {
-    ArrayList<JRecord> rlist = new ArrayList<JRecord>();
-    JRecord r;
-    label_3:
-    while (true) {
-      r = Record();
-      rlist.add(r);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RECORD_TKN:
-        ;
-        break;
-      default:
-        jj_la1[3] = jj_gen;
-        break label_3;
-      }
-    }
-    {if (true) return rlist;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JRecord Record() throws ParseException {
-    String rname;
-    ArrayList<JField<JType>> flist = new ArrayList<JField<JType>>();
-    Token t;
-    JField<JType> f;
-    jj_consume_token(RECORD_TKN);
-    t = jj_consume_token(IDENT_TKN);
-    rname = t.image;
-    jj_consume_token(LBRACE_TKN);
-    label_4:
-    while (true) {
-      f = Field();
-      flist.add(f);
-      jj_consume_token(SEMICOLON_TKN);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case BYTE_TKN:
-      case BOOLEAN_TKN:
-      case INT_TKN:
-      case LONG_TKN:
-      case FLOAT_TKN:
-      case DOUBLE_TKN:
-      case USTRING_TKN:
-      case BUFFER_TKN:
-      case VECTOR_TKN:
-      case MAP_TKN:
-      case IDENT_TKN:
-        ;
-        break;
-      default:
-        jj_la1[4] = jj_gen;
-        break label_4;
-      }
-    }
-    jj_consume_token(RBRACE_TKN);
-    String fqn = curModuleName + "." + rname;
-    JRecord r = new JRecord(fqn, flist);
-    recTab.put(fqn, r);
-    {if (true) return r;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JField<JType> Field() throws ParseException {
-    JType jt;
-    Token t;
-    jt = Type();
-    t = jj_consume_token(IDENT_TKN);
-    {if (true) return new JField<JType>(t.image, jt);}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JType Type() throws ParseException {
-    JType jt;
-    Token t;
-    String rname;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case MAP_TKN:
-      jt = Map();
-      {if (true) return jt;}
-      break;
-    case VECTOR_TKN:
-      jt = Vector();
-      {if (true) return jt;}
-      break;
-    case BYTE_TKN:
-      jj_consume_token(BYTE_TKN);
-      {if (true) return new JByte();}
-      break;
-    case BOOLEAN_TKN:
-      jj_consume_token(BOOLEAN_TKN);
-      {if (true) return new JBoolean();}
-      break;
-    case INT_TKN:
-      jj_consume_token(INT_TKN);
-      {if (true) return new JInt();}
-      break;
-    case LONG_TKN:
-      jj_consume_token(LONG_TKN);
-      {if (true) return new JLong();}
-      break;
-    case FLOAT_TKN:
-      jj_consume_token(FLOAT_TKN);
-      {if (true) return new JFloat();}
-      break;
-    case DOUBLE_TKN:
-      jj_consume_token(DOUBLE_TKN);
-      {if (true) return new JDouble();}
-      break;
-    case USTRING_TKN:
-      jj_consume_token(USTRING_TKN);
-      {if (true) return new JString();}
-      break;
-    case BUFFER_TKN:
-      jj_consume_token(BUFFER_TKN);
-      {if (true) return new JBuffer();}
-      break;
-    case IDENT_TKN:
-      rname = ModuleName();
-      if (rname.indexOf('.', 0) < 0) {
-        rname = curModuleName + "." + rname;
-      }
-      JRecord r = recTab.get(rname);
-      if (r == null) {
-        System.out.println("Type " + rname + " not known. Exiting.");
-        System.exit(1);
-      }
-      {if (true) return r;}
-      break;
-    default:
-      jj_la1[5] = jj_gen;
-      jj_consume_token(-1);
-      throw new ParseException();
-    }
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JMap Map() throws ParseException {
-    JType jt1;
-    JType jt2;
-    jj_consume_token(MAP_TKN);
-    jj_consume_token(LT_TKN);
-    jt1 = Type();
-    jj_consume_token(COMMA_TKN);
-    jt2 = Type();
-    jj_consume_token(GT_TKN);
-    {if (true) return new JMap(jt1, jt2);}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public JVector Vector() throws ParseException {
-    JType jt;
-    jj_consume_token(VECTOR_TKN);
-    jj_consume_token(LT_TKN);
-    jt = Type();
-    jj_consume_token(GT_TKN);
-    {if (true) return new JVector(jt);}
-    throw new Error("Missing return statement in function");
-  }
-
-  public RccTokenManager token_source;
-  SimpleCharStream jj_input_stream;
-  public Token token, jj_nt;
-  private int jj_ntk;
-  private int jj_gen;
-  final private int[] jj_la1 = new int[6];
-  static private int[] jj_la1_0;
-  static private int[] jj_la1_1;
-  static {
-    jj_la1_0();
-    jj_la1_1();
-  }
-  private static void jj_la1_0() {
-    jj_la1_0 = new int[] {0x2800, 0x2800, 0x40000000, 0x1000, 0xffc000, 0xffc000,};
-  }
-  private static void jj_la1_1() {
-    jj_la1_1 = new int[] {0x0, 0x0, 0x0, 0x0, 0x1, 0x1,};
-  }
-
-  public Rcc(java.io.InputStream stream) {
-    this(stream, null);
-  }
-  public Rcc(java.io.InputStream stream, String encoding) {
-    try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
-    token_source = new RccTokenManager(jj_input_stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  public void ReInit(java.io.InputStream stream) {
-    ReInit(stream, null);
-  }
-  public void ReInit(java.io.InputStream stream, String encoding) {
-    try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
-    token_source.ReInit(jj_input_stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  public Rcc(java.io.Reader stream) {
-    jj_input_stream = new SimpleCharStream(stream, 1, 1);
-    token_source = new RccTokenManager(jj_input_stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  public void ReInit(java.io.Reader stream) {
-    jj_input_stream.ReInit(stream, 1, 1);
-    token_source.ReInit(jj_input_stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  public Rcc(RccTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  public void ReInit(RccTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 6; i++) jj_la1[i] = -1;
-  }
-
-  final private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
-  }
-
-  final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
-  }
-
-  final public Token getToken(int index) {
-    Token t = token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
-  }
-
-  final private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
-  }
-
-  private java.util.Vector<int[]> jj_expentries = new java.util.Vector<int[]>();
-  private int[] jj_expentry;
-  private int jj_kind = -1;
-
-  public ParseException generateParseException() {
-    jj_expentries.removeAllElements();
-    boolean[] la1tokens = new boolean[33];
-    for (int i = 0; i < 33; i++) {
-      la1tokens[i] = false;
-    }
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 6; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-          if ((jj_la1_1[i] & (1<<j)) != 0) {
-            la1tokens[32+j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 33; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.addElement(jj_expentry);
-      }
-    }
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.elementAt(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
-  }
-
-  final public void enable_tracing() {
-  }
-
-  final public void disable_tracing() {
-  }
-
-}

+ 0 - 97
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccConstants.java

@@ -1,97 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. RccConstants.java */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public interface RccConstants {
-
-  int EOF = 0;
-  int MODULE_TKN = 11;
-  int RECORD_TKN = 12;
-  int INCLUDE_TKN = 13;
-  int BYTE_TKN = 14;
-  int BOOLEAN_TKN = 15;
-  int INT_TKN = 16;
-  int LONG_TKN = 17;
-  int FLOAT_TKN = 18;
-  int DOUBLE_TKN = 19;
-  int USTRING_TKN = 20;
-  int BUFFER_TKN = 21;
-  int VECTOR_TKN = 22;
-  int MAP_TKN = 23;
-  int LBRACE_TKN = 24;
-  int RBRACE_TKN = 25;
-  int LT_TKN = 26;
-  int GT_TKN = 27;
-  int SEMICOLON_TKN = 28;
-  int COMMA_TKN = 29;
-  int DOT_TKN = 30;
-  int CSTRING_TKN = 31;
-  int IDENT_TKN = 32;
-
-  int DEFAULT = 0;
-  int WithinOneLineComment = 1;
-  int WithinMultiLineComment = 2;
-
-  String[] tokenImage = {
-    "<EOF>",
-    "\" \"",
-    "\"\\t\"",
-    "\"\\n\"",
-    "\"\\r\"",
-    "\"//\"",
-    "<token of kind 6>",
-    "<token of kind 7>",
-    "\"/*\"",
-    "\"*/\"",
-    "<token of kind 10>",
-    "\"module\"",
-    "\"class\"",
-    "\"include\"",
-    "\"byte\"",
-    "\"boolean\"",
-    "\"int\"",
-    "\"long\"",
-    "\"float\"",
-    "\"double\"",
-    "\"ustring\"",
-    "\"buffer\"",
-    "\"vector\"",
-    "\"map\"",
-    "\"{\"",
-    "\"}\"",
-    "\"<\"",
-    "\">\"",
-    "\";\"",
-    "\",\"",
-    "\".\"",
-    "<CSTRING_TKN>",
-    "<IDENT_TKN>",
-  };
-
-}

+ 0 - 833
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java

@@ -1,833 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. RccTokenManager.java */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RccTokenManager implements RccConstants
-{
-  public  java.io.PrintStream debugStream = System.out;
-  public  void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }
-  private final int jjMoveStringLiteralDfa0_1()
-  {
-    return jjMoveNfa_1(0, 0);
-  }
-  private final void jjCheckNAdd(int state)
-  {
-    if (jjrounds[state] != jjround)
-      {
-        jjstateSet[jjnewStateCnt++] = state;
-        jjrounds[state] = jjround;
-      }
-  }
-  private final void jjAddStates(int start, int end)
-  {
-    do {
-      jjstateSet[jjnewStateCnt++] = jjnextStates[start];
-    } while (start++ != end);
-  }
-  private final void jjCheckNAddTwoStates(int state1, int state2)
-  {
-    jjCheckNAdd(state1);
-    jjCheckNAdd(state2);
-  }
-  private final void jjCheckNAddStates(int start, int end)
-  {
-    do {
-      jjCheckNAdd(jjnextStates[start]);
-    } while (start++ != end);
-  }
-  private final void jjCheckNAddStates(int start)
-  {
-    jjCheckNAdd(jjnextStates[start]);
-    jjCheckNAdd(jjnextStates[start + 1]);
-  }
-  private final int jjMoveNfa_1(int startState, int curPos)
-  {
-    int[] nextStates;
-    int startsAt = 0;
-    jjnewStateCnt = 3;
-    int i = 1;
-    jjstateSet[0] = startState;
-    int j, kind = 0x7fffffff;
-    for (;;)
-      {
-        if (++jjround == 0x7fffffff)
-          ReInitRounds();
-        if (curChar < 64)
-          {
-            long l = 1L << curChar;
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  case 0:
-                    if ((0x2400L & l) != 0L)
-                      {
-                        if (kind > 6)
-                          kind = 6;
-                      }
-                    if (curChar == 13)
-                      jjstateSet[jjnewStateCnt++] = 1;
-                    break;
-                  case 1:
-                    if (curChar == 10 && kind > 6)
-                      kind = 6;
-                    break;
-                  case 2:
-                    if (curChar == 13)
-                      jjstateSet[jjnewStateCnt++] = 1;
-                    break;
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        else if (curChar < 128)
-          {
-            long l = 1L << (curChar & 077);
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        else
-          {
-            int i2 = (curChar & 0xff) >> 6;
-            long l2 = 1L << (curChar & 077);
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        if (kind != 0x7fffffff)
-          {
-            jjmatchedKind = kind;
-            jjmatchedPos = curPos;
-            kind = 0x7fffffff;
-          }
-        ++curPos;
-        if ((i = jjnewStateCnt) == (startsAt = 3 - (jjnewStateCnt = startsAt)))
-          return curPos;
-        try { curChar = input_stream.readChar(); }
-        catch(java.io.IOException e) { return curPos; }
-      }
-  }
-  private final int jjStopStringLiteralDfa_0(int pos, long active0)
-  {
-    switch (pos)
-      {
-      case 0:
-        if ((active0 & 0xfff800L) != 0L)
-          {
-            jjmatchedKind = 32;
-            return 4;
-          }
-        return -1;
-      case 1:
-        if ((active0 & 0xfff800L) != 0L)
-          {
-            jjmatchedKind = 32;
-            jjmatchedPos = 1;
-            return 4;
-          }
-        return -1;
-      case 2:
-        if ((active0 & 0x7ef800L) != 0L)
-          {
-            jjmatchedKind = 32;
-            jjmatchedPos = 2;
-            return 4;
-          }
-        if ((active0 & 0x810000L) != 0L)
-          return 4;
-        return -1;
-      case 3:
-        if ((active0 & 0x24000L) != 0L)
-          return 4;
-        if ((active0 & 0x7cb800L) != 0L)
-          {
-            jjmatchedKind = 32;
-            jjmatchedPos = 3;
-            return 4;
-          }
-        return -1;
-      case 4:
-        if ((active0 & 0x41000L) != 0L)
-          return 4;
-        if ((active0 & 0x78a800L) != 0L)
-          {
-            jjmatchedKind = 32;
-            jjmatchedPos = 4;
-            return 4;
-          }
-        return -1;
-      case 5:
-        if ((active0 & 0x680800L) != 0L)
-          return 4;
-        if ((active0 & 0x10a000L) != 0L)
-          {
-            jjmatchedKind = 32;
-            jjmatchedPos = 5;
-            return 4;
-          }
-        return -1;
-      default :
-        return -1;
-      }
-  }
-  private final int jjStartNfa_0(int pos, long active0)
-  {
-    return jjMoveNfa_0(jjStopStringLiteralDfa_0(pos, active0), pos + 1);
-  }
-  private final int jjStopAtPos(int pos, int kind)
-  {
-    jjmatchedKind = kind;
-    jjmatchedPos = pos;
-    return pos + 1;
-  }
-  private final int jjStartNfaWithStates_0(int pos, int kind, int state)
-  {
-    jjmatchedKind = kind;
-    jjmatchedPos = pos;
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) { return pos + 1; }
-    return jjMoveNfa_0(state, pos + 1);
-  }
-  private final int jjMoveStringLiteralDfa0_0()
-  {
-    switch(curChar)
-      {
-      case 44:
-        return jjStopAtPos(0, 29);
-      case 46:
-        return jjStopAtPos(0, 30);
-      case 47:
-        return jjMoveStringLiteralDfa1_0(0x120L);
-      case 59:
-        return jjStopAtPos(0, 28);
-      case 60:
-        return jjStopAtPos(0, 26);
-      case 62:
-        return jjStopAtPos(0, 27);
-      case 98:
-        return jjMoveStringLiteralDfa1_0(0x20c000L);
-      case 99:
-        return jjMoveStringLiteralDfa1_0(0x1000L);
-      case 100:
-        return jjMoveStringLiteralDfa1_0(0x80000L);
-      case 102:
-        return jjMoveStringLiteralDfa1_0(0x40000L);
-      case 105:
-        return jjMoveStringLiteralDfa1_0(0x12000L);
-      case 108:
-        return jjMoveStringLiteralDfa1_0(0x20000L);
-      case 109:
-        return jjMoveStringLiteralDfa1_0(0x800800L);
-      case 117:
-        return jjMoveStringLiteralDfa1_0(0x100000L);
-      case 118:
-        return jjMoveStringLiteralDfa1_0(0x400000L);
-      case 123:
-        return jjStopAtPos(0, 24);
-      case 125:
-        return jjStopAtPos(0, 25);
-      default :
-        return jjMoveNfa_0(0, 0);
-      }
-  }
-  private final int jjMoveStringLiteralDfa1_0(long active0)
-  {
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(0, active0);
-      return 1;
-    }
-    switch(curChar)
-      {
-      case 42:
-        if ((active0 & 0x100L) != 0L)
-          return jjStopAtPos(1, 8);
-        break;
-      case 47:
-        if ((active0 & 0x20L) != 0L)
-          return jjStopAtPos(1, 5);
-        break;
-      case 97:
-        return jjMoveStringLiteralDfa2_0(active0, 0x800000L);
-      case 101:
-        return jjMoveStringLiteralDfa2_0(active0, 0x400000L);
-      case 108:
-        return jjMoveStringLiteralDfa2_0(active0, 0x41000L);
-      case 110:
-        return jjMoveStringLiteralDfa2_0(active0, 0x12000L);
-      case 111:
-        return jjMoveStringLiteralDfa2_0(active0, 0xa8800L);
-      case 115:
-        return jjMoveStringLiteralDfa2_0(active0, 0x100000L);
-      case 117:
-        return jjMoveStringLiteralDfa2_0(active0, 0x200000L);
-      case 121:
-        return jjMoveStringLiteralDfa2_0(active0, 0x4000L);
-      default :
-        break;
-      }
-    return jjStartNfa_0(0, active0);
-  }
-  private final int jjMoveStringLiteralDfa2_0(long old0, long active0)
-  {
-    if (((active0 &= old0)) == 0L)
-      return jjStartNfa_0(0, old0); 
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(1, active0);
-      return 2;
-    }
-    switch(curChar)
-      {
-      case 97:
-        return jjMoveStringLiteralDfa3_0(active0, 0x1000L);
-      case 99:
-        return jjMoveStringLiteralDfa3_0(active0, 0x402000L);
-      case 100:
-        return jjMoveStringLiteralDfa3_0(active0, 0x800L);
-      case 102:
-        return jjMoveStringLiteralDfa3_0(active0, 0x200000L);
-      case 110:
-        return jjMoveStringLiteralDfa3_0(active0, 0x20000L);
-      case 111:
-        return jjMoveStringLiteralDfa3_0(active0, 0x48000L);
-      case 112:
-        if ((active0 & 0x800000L) != 0L)
-          return jjStartNfaWithStates_0(2, 23, 4);
-        break;
-      case 116:
-        if ((active0 & 0x10000L) != 0L)
-          return jjStartNfaWithStates_0(2, 16, 4);
-        return jjMoveStringLiteralDfa3_0(active0, 0x104000L);
-      case 117:
-        return jjMoveStringLiteralDfa3_0(active0, 0x80000L);
-      default :
-        break;
-      }
-    return jjStartNfa_0(1, active0);
-  }
-  private final int jjMoveStringLiteralDfa3_0(long old0, long active0)
-  {
-    if (((active0 &= old0)) == 0L)
-      return jjStartNfa_0(1, old0); 
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(2, active0);
-      return 3;
-    }
-    switch(curChar)
-      {
-      case 97:
-        return jjMoveStringLiteralDfa4_0(active0, 0x40000L);
-      case 98:
-        return jjMoveStringLiteralDfa4_0(active0, 0x80000L);
-      case 101:
-        if ((active0 & 0x4000L) != 0L)
-          return jjStartNfaWithStates_0(3, 14, 4);
-        break;
-      case 102:
-        return jjMoveStringLiteralDfa4_0(active0, 0x200000L);
-      case 103:
-        if ((active0 & 0x20000L) != 0L)
-          return jjStartNfaWithStates_0(3, 17, 4);
-        break;
-      case 108:
-        return jjMoveStringLiteralDfa4_0(active0, 0xa000L);
-      case 114:
-        return jjMoveStringLiteralDfa4_0(active0, 0x100000L);
-      case 115:
-        return jjMoveStringLiteralDfa4_0(active0, 0x1000L);
-      case 116:
-        return jjMoveStringLiteralDfa4_0(active0, 0x400000L);
-      case 117:
-        return jjMoveStringLiteralDfa4_0(active0, 0x800L);
-      default :
-        break;
-      }
-    return jjStartNfa_0(2, active0);
-  }
-  private final int jjMoveStringLiteralDfa4_0(long old0, long active0)
-  {
-    if (((active0 &= old0)) == 0L)
-      return jjStartNfa_0(2, old0); 
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(3, active0);
-      return 4;
-    }
-    switch(curChar)
-      {
-      case 101:
-        return jjMoveStringLiteralDfa5_0(active0, 0x208000L);
-      case 105:
-        return jjMoveStringLiteralDfa5_0(active0, 0x100000L);
-      case 108:
-        return jjMoveStringLiteralDfa5_0(active0, 0x80800L);
-      case 111:
-        return jjMoveStringLiteralDfa5_0(active0, 0x400000L);
-      case 115:
-        if ((active0 & 0x1000L) != 0L)
-          return jjStartNfaWithStates_0(4, 12, 4);
-        break;
-      case 116:
-        if ((active0 & 0x40000L) != 0L)
-          return jjStartNfaWithStates_0(4, 18, 4);
-        break;
-      case 117:
-        return jjMoveStringLiteralDfa5_0(active0, 0x2000L);
-      default :
-        break;
-      }
-    return jjStartNfa_0(3, active0);
-  }
-  private final int jjMoveStringLiteralDfa5_0(long old0, long active0)
-  {
-    if (((active0 &= old0)) == 0L)
-      return jjStartNfa_0(3, old0); 
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(4, active0);
-      return 5;
-    }
-    switch(curChar)
-      {
-      case 97:
-        return jjMoveStringLiteralDfa6_0(active0, 0x8000L);
-      case 100:
-        return jjMoveStringLiteralDfa6_0(active0, 0x2000L);
-      case 101:
-        if ((active0 & 0x800L) != 0L)
-          return jjStartNfaWithStates_0(5, 11, 4);
-        else if ((active0 & 0x80000L) != 0L)
-          return jjStartNfaWithStates_0(5, 19, 4);
-        break;
-      case 110:
-        return jjMoveStringLiteralDfa6_0(active0, 0x100000L);
-      case 114:
-        if ((active0 & 0x200000L) != 0L)
-          return jjStartNfaWithStates_0(5, 21, 4);
-        else if ((active0 & 0x400000L) != 0L)
-          return jjStartNfaWithStates_0(5, 22, 4);
-        break;
-      default :
-        break;
-      }
-    return jjStartNfa_0(4, active0);
-  }
-  private final int jjMoveStringLiteralDfa6_0(long old0, long active0)
-  {
-    if (((active0 &= old0)) == 0L)
-      return jjStartNfa_0(4, old0); 
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      jjStopStringLiteralDfa_0(5, active0);
-      return 6;
-    }
-    switch(curChar)
-      {
-      case 101:
-        if ((active0 & 0x2000L) != 0L)
-          return jjStartNfaWithStates_0(6, 13, 4);
-        break;
-      case 103:
-        if ((active0 & 0x100000L) != 0L)
-          return jjStartNfaWithStates_0(6, 20, 4);
-        break;
-      case 110:
-        if ((active0 & 0x8000L) != 0L)
-          return jjStartNfaWithStates_0(6, 15, 4);
-        break;
-      default :
-        break;
-      }
-    return jjStartNfa_0(5, active0);
-  }
-  static final long[] jjbitVec0 = {
-    0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL
-  };
-  private final int jjMoveNfa_0(int startState, int curPos)
-  {
-    int[] nextStates;
-    int startsAt = 0;
-    jjnewStateCnt = 5;
-    int i = 1;
-    jjstateSet[0] = startState;
-    int j, kind = 0x7fffffff;
-    for (;;)
-      {
-        if (++jjround == 0x7fffffff)
-          ReInitRounds();
-        if (curChar < 64)
-          {
-            long l = 1L << curChar;
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  case 0:
-                    if (curChar == 34)
-                      jjCheckNAdd(1);
-                    break;
-                  case 1:
-                    if ((0xfffffffbffffffffL & l) != 0L)
-                      jjCheckNAddTwoStates(1, 2);
-                    break;
-                  case 2:
-                    if (curChar == 34 && kind > 31)
-                      kind = 31;
-                    break;
-                  case 4:
-                    if ((0x3ff000000000000L & l) == 0L)
-                      break;
-                    if (kind > 32)
-                      kind = 32;
-                    jjstateSet[jjnewStateCnt++] = 4;
-                    break;
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        else if (curChar < 128)
-          {
-            long l = 1L << (curChar & 077);
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  case 0:
-                    if ((0x7fffffe07fffffeL & l) == 0L)
-                      break;
-                    if (kind > 32)
-                      kind = 32;
-                    jjCheckNAdd(4);
-                    break;
-                  case 1:
-                    jjAddStates(0, 1);
-                    break;
-                  case 4:
-                    if ((0x7fffffe87fffffeL & l) == 0L)
-                      break;
-                    if (kind > 32)
-                      kind = 32;
-                    jjCheckNAdd(4);
-                    break;
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        else
-          {
-            int i2 = (curChar & 0xff) >> 6;
-            long l2 = 1L << (curChar & 077);
-            MatchLoop: do
-              {
-                switch(jjstateSet[--i])
-                  {
-                  case 1:
-                    if ((jjbitVec0[i2] & l2) != 0L)
-                      jjAddStates(0, 1);
-                    break;
-                  default : break;
-                  }
-              } while(i != startsAt);
-          }
-        if (kind != 0x7fffffff)
-          {
-            jjmatchedKind = kind;
-            jjmatchedPos = curPos;
-            kind = 0x7fffffff;
-          }
-        ++curPos;
-        if ((i = jjnewStateCnt) == (startsAt = 5 - (jjnewStateCnt = startsAt)))
-          return curPos;
-        try { curChar = input_stream.readChar(); }
-        catch(java.io.IOException e) { return curPos; }
-      }
-  }
-  private final int jjMoveStringLiteralDfa0_2()
-  {
-    switch(curChar)
-      {
-      case 42:
-        return jjMoveStringLiteralDfa1_2(0x200L);
-      default :
-        return 1;
-      }
-  }
-  private final int jjMoveStringLiteralDfa1_2(long active0)
-  {
-    try { curChar = input_stream.readChar(); }
-    catch(java.io.IOException e) {
-      return 1;
-    }
-    switch(curChar)
-      {
-      case 47:
-        if ((active0 & 0x200L) != 0L)
-          return jjStopAtPos(1, 9);
-        break;
-      default :
-        return 2;
-      }
-    return 2;
-  }
-  static final int[] jjnextStates = {
-    1, 2, 
-  };
-  public static final String[] jjstrLiteralImages = {
-    "", null, null, null, null, null, null, null, null, null, null, 
-    "\155\157\144\165\154\145", "\143\154\141\163\163", "\151\156\143\154\165\144\145", "\142\171\164\145", 
-    "\142\157\157\154\145\141\156", "\151\156\164", "\154\157\156\147", "\146\154\157\141\164", 
-    "\144\157\165\142\154\145", "\165\163\164\162\151\156\147", "\142\165\146\146\145\162", 
-    "\166\145\143\164\157\162", "\155\141\160", "\173", "\175", "\74", "\76", "\73", "\54", "\56", null, null, };
-  public static final String[] lexStateNames = {
-    "DEFAULT", 
-    "WithinOneLineComment", 
-    "WithinMultiLineComment", 
-  };
-  public static final int[] jjnewLexState = {
-    -1, -1, -1, -1, -1, 1, 0, -1, 2, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 
-    -1, -1, -1, -1, -1, -1, -1, -1, 
-  };
-  static final long[] jjtoToken = {
-    0x1fffff801L, 
-  };
-  static final long[] jjtoSkip = {
-    0x37eL, 
-  };
-  static final long[] jjtoSpecial = {
-    0x360L, 
-  };
-  static final long[] jjtoMore = {
-    0x480L, 
-  };
-  protected SimpleCharStream input_stream;
-  private final int[] jjrounds = new int[5];
-  private final int[] jjstateSet = new int[10];
-  StringBuffer image;
-  int jjimageLen;
-  int lengthOfMatch;
-  protected char curChar;
-  public RccTokenManager(SimpleCharStream stream){
-    if (SimpleCharStream.staticFlag)
-      throw new Error("ERROR: Cannot use a static CharStream class with a non-static lexical analyzer.");
-    input_stream = stream;
-  }
-  public RccTokenManager(SimpleCharStream stream, int lexState){
-    this(stream);
-    SwitchTo(lexState);
-  }
-  public void ReInit(SimpleCharStream stream)
-  {
-    jjmatchedPos = jjnewStateCnt = 0;
-    curLexState = defaultLexState;
-    input_stream = stream;
-    ReInitRounds();
-  }
-  private final void ReInitRounds()
-  {
-    int i;
-    jjround = 0x80000001;
-    for (i = 5; i-- > 0;)
-      jjrounds[i] = 0x80000000;
-  }
-  public void ReInit(SimpleCharStream stream, int lexState)
-  {
-    ReInit(stream);
-    SwitchTo(lexState);
-  }
-  public void SwitchTo(int lexState)
-  {
-    if (lexState >= 3 || lexState < 0)
-      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
-    else
-      curLexState = lexState;
-  }
-
-  protected Token jjFillToken()
-  {
-    Token t = Token.newToken(jjmatchedKind);
-    t.kind = jjmatchedKind;
-    String im = jjstrLiteralImages[jjmatchedKind];
-    t.image = (im == null) ? input_stream.GetImage() : im;
-    t.beginLine = input_stream.getBeginLine();
-    t.beginColumn = input_stream.getBeginColumn();
-    t.endLine = input_stream.getEndLine();
-    t.endColumn = input_stream.getEndColumn();
-    return t;
-  }
-
-  int curLexState = 0;
-  int defaultLexState = 0;
-  int jjnewStateCnt;
-  int jjround;
-  int jjmatchedPos;
-  int jjmatchedKind;
-
-  public Token getNextToken() 
-  {
-    int kind;
-    Token specialToken = null;
-    Token matchedToken;
-    int curPos = 0;
-
-    EOFLoop :
-      for (;;)
-        {   
-          try   
-            {     
-              curChar = input_stream.BeginToken();
-            }     
-          catch(java.io.IOException e)
-            {        
-              jjmatchedKind = 0;
-              matchedToken = jjFillToken();
-              matchedToken.specialToken = specialToken;
-              return matchedToken;
-            }
-          image = null;
-          jjimageLen = 0;
-
-          for (;;)
-            {
-              switch(curLexState)
-                {
-                case 0:
-                  try { input_stream.backup(0);
-                  while (curChar <= 32 && (0x100002600L & (1L << curChar)) != 0L)
-                    curChar = input_stream.BeginToken();
-                  }
-                  catch (java.io.IOException e1) { continue EOFLoop; }
-                  jjmatchedKind = 0x7fffffff;
-                  jjmatchedPos = 0;
-                  curPos = jjMoveStringLiteralDfa0_0();
-                  break;
-                case 1:
-                  jjmatchedKind = 0x7fffffff;
-                  jjmatchedPos = 0;
-                  curPos = jjMoveStringLiteralDfa0_1();
-                  if (jjmatchedPos == 0 && jjmatchedKind > 7)
-                    {
-                      jjmatchedKind = 7;
-                    }
-                  break;
-                case 2:
-                  jjmatchedKind = 0x7fffffff;
-                  jjmatchedPos = 0;
-                  curPos = jjMoveStringLiteralDfa0_2();
-                  if (jjmatchedPos == 0 && jjmatchedKind > 10)
-                    {
-                      jjmatchedKind = 10;
-                    }
-                  break;
-                }
-              if (jjmatchedKind != 0x7fffffff)
-                {
-                  if (jjmatchedPos + 1 < curPos)
-                    input_stream.backup(curPos - jjmatchedPos - 1);
-                  if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
-                    {
-                      matchedToken = jjFillToken();
-                      matchedToken.specialToken = specialToken;
-                      if (jjnewLexState[jjmatchedKind] != -1)
-                        curLexState = jjnewLexState[jjmatchedKind];
-                      return matchedToken;
-                    }
-                  else if ((jjtoSkip[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
-                    {
-                      if ((jjtoSpecial[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
-                        {
-                          matchedToken = jjFillToken();
-                          if (specialToken == null)
-                            specialToken = matchedToken;
-                          else
-                            {
-                              matchedToken.specialToken = specialToken;
-                              specialToken = (specialToken.next = matchedToken);
-                            }
-                          SkipLexicalActions(matchedToken);
-                        }
-                      else 
-                        SkipLexicalActions(null);
-                      if (jjnewLexState[jjmatchedKind] != -1)
-                        curLexState = jjnewLexState[jjmatchedKind];
-                      continue EOFLoop;
-                    }
-                  jjimageLen += jjmatchedPos + 1;
-                  if (jjnewLexState[jjmatchedKind] != -1)
-                    curLexState = jjnewLexState[jjmatchedKind];
-                  curPos = 0;
-                  jjmatchedKind = 0x7fffffff;
-                  try {
-                    curChar = input_stream.readChar();
-                    continue;
-                  }
-                  catch (java.io.IOException e1) { }
-                }
-              int error_line = input_stream.getEndLine();
-              int error_column = input_stream.getEndColumn();
-              String error_after = null;
-              boolean EOFSeen = false;
-              try { input_stream.readChar(); input_stream.backup(1); }
-              catch (java.io.IOException e1) {
-                EOFSeen = true;
-                error_after = curPos <= 1 ? "" : input_stream.GetImage();
-                if (curChar == '\n' || curChar == '\r') {
-                  error_line++;
-                  error_column = 0;
-                }
-                else
-                  error_column++;
-              }
-              if (!EOFSeen) {
-                input_stream.backup(1);
-                error_after = curPos <= 1 ? "" : input_stream.GetImage();
-              }
-              throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
-            }
-        }
-  }
-
-  void SkipLexicalActions(Token matchedToken)
-  {
-    switch(jjmatchedKind)
-      {
-      default :
-        break;
-      }
-  }
-}

+ 0 - 446
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/SimpleCharStream.java

@@ -1,446 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 4.0 */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * An implementation of interface CharStream, where the stream is assumed to
- * contain only ASCII characters (without unicode processing).
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class SimpleCharStream
-{
-  public static final boolean staticFlag = false;
-  int bufsize;
-  int available;
-  int tokenBegin;
-  public int bufpos = -1;
-  protected int bufline[];
-  protected int bufcolumn[];
-
-  protected int column = 0;
-  protected int line = 1;
-
-  protected boolean prevCharIsCR = false;
-  protected boolean prevCharIsLF = false;
-
-  protected java.io.Reader inputStream;
-
-  protected char[] buffer;
-  protected int maxNextCharInd = 0;
-  protected int inBuf = 0;
-  protected int tabSize = 8;
-
-  protected void setTabSize(int i) { tabSize = i; }
-  protected int getTabSize(int i) { return tabSize; }
-
-
-  protected void ExpandBuff(boolean wrapAround)
-  {
-    char[] newbuffer = new char[bufsize + 2048];
-    int newbufline[] = new int[bufsize + 2048];
-    int newbufcolumn[] = new int[bufsize + 2048];
-
-    try
-      {
-        if (wrapAround)
-          {
-            System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin);
-            System.arraycopy(buffer, 0, newbuffer,
-                             bufsize - tokenBegin, bufpos);
-            buffer = newbuffer;
-
-            System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin);
-            System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos);
-            bufline = newbufline;
-
-            System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin);
-            System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos);
-            bufcolumn = newbufcolumn;
-
-            maxNextCharInd = (bufpos += (bufsize - tokenBegin));
-          }
-        else
-          {
-            System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin);
-            buffer = newbuffer;
-
-            System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin);
-            bufline = newbufline;
-
-            System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin);
-            bufcolumn = newbufcolumn;
-
-            maxNextCharInd = (bufpos -= tokenBegin);
-          }
-      }
-    catch (Throwable t)
-      {
-        throw new Error(t.getMessage());
-      }
-
-
-    bufsize += 2048;
-    available = bufsize;
-    tokenBegin = 0;
-  }
-
-  protected void FillBuff() throws java.io.IOException
-  {
-    if (maxNextCharInd == available)
-      {
-        if (available == bufsize)
-          {
-            if (tokenBegin > 2048)
-              {
-                bufpos = maxNextCharInd = 0;
-                available = tokenBegin;
-              }
-            else if (tokenBegin < 0)
-              bufpos = maxNextCharInd = 0;
-            else
-              ExpandBuff(false);
-          }
-        else if (available > tokenBegin)
-          available = bufsize;
-        else if ((tokenBegin - available) < 2048)
-          ExpandBuff(true);
-        else
-          available = tokenBegin;
-      }
-
-    int i;
-    try {
-      if ((i = inputStream.read(buffer, maxNextCharInd,
-                                available - maxNextCharInd)) == -1)
-        {
-          inputStream.close();
-          throw new java.io.IOException();
-        }
-      else
-        maxNextCharInd += i;
-      return;
-    }
-    catch(java.io.IOException e) {
-      --bufpos;
-      backup(0);
-      if (tokenBegin == -1)
-        tokenBegin = bufpos;
-      throw e;
-    }
-  }
-
-  public char BeginToken() throws java.io.IOException
-  {
-    tokenBegin = -1;
-    char c = readChar();
-    tokenBegin = bufpos;
-
-    return c;
-  }
-
-  protected void UpdateLineColumn(char c)
-  {
-    column++;
-
-    if (prevCharIsLF)
-      {
-        prevCharIsLF = false;
-        line += (column = 1);
-      }
-    else if (prevCharIsCR)
-      {
-        prevCharIsCR = false;
-        if (c == '\n')
-          {
-            prevCharIsLF = true;
-          }
-        else
-          line += (column = 1);
-      }
-
-    switch (c)
-      {
-      case '\r' :
-        prevCharIsCR = true;
-        break;
-      case '\n' :
-        prevCharIsLF = true;
-        break;
-      case '\t' :
-        column--;
-        column += (tabSize - (column % tabSize));
-        break;
-      default :
-        break;
-      }
-
-    bufline[bufpos] = line;
-    bufcolumn[bufpos] = column;
-  }
-
-  public char readChar() throws java.io.IOException
-  {
-    if (inBuf > 0)
-      {
-        --inBuf;
-
-        if (++bufpos == bufsize)
-          bufpos = 0;
-
-        return buffer[bufpos];
-      }
-
-    if (++bufpos >= maxNextCharInd)
-      FillBuff();
-
-    char c = buffer[bufpos];
-
-    UpdateLineColumn(c);
-    return (c);
-  }
-
-  public int getEndColumn() {
-    return bufcolumn[bufpos];
-  }
-
-  public int getEndLine() {
-    return bufline[bufpos];
-  }
-
-  public int getBeginColumn() {
-    return bufcolumn[tokenBegin];
-  }
-
-  public int getBeginLine() {
-    return bufline[tokenBegin];
-  }
-
-  public void backup(int amount) {
-
-    inBuf += amount;
-    if ((bufpos -= amount) < 0)
-      bufpos += bufsize;
-  }
-
-  public SimpleCharStream(java.io.Reader dstream, int startline,
-                          int startcolumn, int buffersize)
-  {
-    inputStream = dstream;
-    line = startline;
-    column = startcolumn - 1;
-
-    available = bufsize = buffersize;
-    buffer = new char[buffersize];
-    bufline = new int[buffersize];
-    bufcolumn = new int[buffersize];
-  }
-
-  public SimpleCharStream(java.io.Reader dstream, int startline,
-                          int startcolumn)
-  {
-    this(dstream, startline, startcolumn, 4096);
-  }
-
-  public SimpleCharStream(java.io.Reader dstream)
-  {
-    this(dstream, 1, 1, 4096);
-  }
-  public void ReInit(java.io.Reader dstream, int startline,
-                     int startcolumn, int buffersize)
-  {
-    inputStream = dstream;
-    line = startline;
-    column = startcolumn - 1;
-
-    if (buffer == null || buffersize != buffer.length)
-      {
-        available = bufsize = buffersize;
-        buffer = new char[buffersize];
-        bufline = new int[buffersize];
-        bufcolumn = new int[buffersize];
-      }
-    prevCharIsLF = prevCharIsCR = false;
-    tokenBegin = inBuf = maxNextCharInd = 0;
-    bufpos = -1;
-  }
-
-  public void ReInit(java.io.Reader dstream, int startline,
-                     int startcolumn)
-  {
-    ReInit(dstream, startline, startcolumn, 4096);
-  }
-
-  public void ReInit(java.io.Reader dstream)
-  {
-    ReInit(dstream, 1, 1, 4096);
-  }
-  public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline,
-                          int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException
-  {
-    this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize);
-  }
-
-  public SimpleCharStream(java.io.InputStream dstream, int startline,
-                          int startcolumn, int buffersize)
-  {
-    this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize);
-  }
-
-  public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline,
-                          int startcolumn) throws java.io.UnsupportedEncodingException
-  {
-    this(dstream, encoding, startline, startcolumn, 4096);
-  }
-
-  public SimpleCharStream(java.io.InputStream dstream, int startline,
-                          int startcolumn)
-  {
-    this(dstream, startline, startcolumn, 4096);
-  }
-
-  public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException
-  {
-    this(dstream, encoding, 1, 1, 4096);
-  }
-
-  public SimpleCharStream(java.io.InputStream dstream)
-  {
-    this(dstream, 1, 1, 4096);
-  }
-
-  public void ReInit(java.io.InputStream dstream, String encoding, int startline,
-                     int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException
-  {
-    ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize);
-  }
-
-  public void ReInit(java.io.InputStream dstream, int startline,
-                     int startcolumn, int buffersize)
-  {
-    ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize);
-  }
-
-  public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException
-  {
-    ReInit(dstream, encoding, 1, 1, 4096);
-  }
-
-  public void ReInit(java.io.InputStream dstream)
-  {
-    ReInit(dstream, 1, 1, 4096);
-  }
-  public void ReInit(java.io.InputStream dstream, String encoding, int startline,
-                     int startcolumn) throws java.io.UnsupportedEncodingException
-  {
-    ReInit(dstream, encoding, startline, startcolumn, 4096);
-  }
-  public void ReInit(java.io.InputStream dstream, int startline,
-                     int startcolumn)
-  {
-    ReInit(dstream, startline, startcolumn, 4096);
-  }
-  public String GetImage()
-  {
-    if (bufpos >= tokenBegin)
-      return new String(buffer, tokenBegin, bufpos - tokenBegin + 1);
-    else
-      return new String(buffer, tokenBegin, bufsize - tokenBegin) +
-        new String(buffer, 0, bufpos + 1);
-  }
-
-  public char[] GetSuffix(int len)
-  {
-    char[] ret = new char[len];
-
-    if ((bufpos + 1) >= len)
-      System.arraycopy(buffer, bufpos - len + 1, ret, 0, len);
-    else
-      {
-        System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0,
-                         len - bufpos - 1);
-        System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1);
-      }
-
-    return ret;
-  }
-
-  public void Done()
-  {
-    buffer = null;
-    bufline = null;
-    bufcolumn = null;
-  }
-
-  /**
-   * Method to adjust line and column numbers for the start of a token.
-   */
-  public void adjustBeginLineColumn(int newLine, int newCol)
-  {
-    int start = tokenBegin;
-    int len;
-
-    if (bufpos >= tokenBegin)
-      {
-        len = bufpos - tokenBegin + inBuf + 1;
-      }
-    else
-      {
-        len = bufsize - tokenBegin + bufpos + 1 + inBuf;
-      }
-
-    int i = 0, j = 0, k = 0;
-    int nextColDiff = 0, columnDiff = 0;
-
-    while (i < len &&
-           bufline[j = start % bufsize] == bufline[k = ++start % bufsize])
-      {
-        bufline[j] = newLine;
-        nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j];
-        bufcolumn[j] = newCol + columnDiff;
-        columnDiff = nextColDiff;
-        i++;
-      } 
-
-    if (i < len)
-      {
-        bufline[j] = newLine++;
-        bufcolumn[j] = newCol + columnDiff;
-
-        while (i++ < len)
-          {
-            if (bufline[j = start % bufsize] != bufline[++start % bufsize])
-              bufline[j] = newLine++;
-            else
-              bufline[j] = newLine;
-          }
-      }
-
-    line = bufline[j];
-    column = bufcolumn[j];
-  }
-
-}

+ 0 - 107
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java

@@ -1,107 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. Token.java Version 3.0 */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Describes the input token stream.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Token {
-
-  /**
-   * An integer that describes the kind of this token.  This numbering
-   * system is determined by JavaCCParser, and a table of these numbers is
-   * stored in the file ...Constants.java.
-   */
-  public int kind;
-
-  /**
-   * beginLine and beginColumn describe the position of the first character
-   * of this token; endLine and endColumn describe the position of the
-   * last character of this token.
-   */
-  public int beginLine, beginColumn, endLine, endColumn;
-
-  /**
-   * The string image of the token.
-   */
-  public String image;
-
-  /**
-   * A reference to the next regular (non-special) token from the input
-   * stream.  If this is the last token from the input stream, or if the
-   * token manager has not read tokens beyond this one, this field is
-   * set to null.  This is true only if this token is also a regular
-   * token.  Otherwise, see below for a description of the contents of
-   * this field.
-   */
-  public Token next;
-
-  /**
-   * This field is used to access special tokens that occur prior to this
-   * token, but after the immediately preceding regular (non-special) token.
-   * If there are no such special tokens, this field is set to null.
-   * When there are more than one such special token, this field refers
-   * to the last of these special tokens, which in turn refers to the next
-   * previous special token through its specialToken field, and so on
-   * until the first special token (whose specialToken field is null).
-   * The next fields of special tokens refer to other special tokens that
-   * immediately follow it (without an intervening regular token).  If there
-   * is no such token, this field is null.
-   */
-  public Token specialToken;
-
-  /**
-   * Returns the image.
-   */
-  @Override
-  public String toString()
-  {
-    return image;
-  }
-
-  /**
-   * Returns a new Token object, by default. However, if you want, you
-   * can create and return subclass objects based on the value of ofKind.
-   * Simply add the cases to the switch for all those special cases.
-   * For example, if you have a subclass of Token called IDToken that
-   * you want to create if ofKind is ID, simlpy add something like :
-   *
-   *    case MyParserConstants.ID : return new IDToken();
-   *
-   * to the following switch statement. Then you can cast matchedToken
-   * variable to the appropriate type and use it in your lexical actions.
-   */
-  public static final Token newToken(int ofKind)
-  {
-    switch(ofKind)
-      {
-      default : return new Token();
-      }
-  }
-
-}

+ 0 - 161
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java

@@ -1,161 +0,0 @@
-/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 3.0 */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class TokenMgrError extends Error
-{
-  /*
-   * Ordinals for various reasons why an Error of this type can be thrown.
-   */
-
-  /**
-   * Lexical error occured.
-   */
-  static final int LEXICAL_ERROR = 0;
-
-  /**
-   * An attempt wass made to create a second instance of a static token manager.
-   */
-  static final int STATIC_LEXER_ERROR = 1;
-
-  /**
-   * Tried to change to an invalid lexical state.
-   */
-  static final int INVALID_LEXICAL_STATE = 2;
-
-  /**
-   * Detected (and bailed out of) an infinite loop in the token manager.
-   */
-  static final int LOOP_DETECTED = 3;
-
-  /**
-   * Indicates the reason why the exception is thrown. It will have
-   * one of the above 4 values.
-   */
-  int errorCode;
-
-  /**
-   * Replaces unprintable characters by their espaced (or unicode escaped)
-   * equivalents in the given string
-   */
-  protected static final String addEscapes(String str) {
-    StringBuffer retval = new StringBuffer();
-    char ch;
-    for (int i = 0; i < str.length(); i++) {
-      switch (str.charAt(i))
-        {
-        case 0 :
-          continue;
-        case '\b':
-          retval.append("\\b");
-          continue;
-        case '\t':
-          retval.append("\\t");
-          continue;
-        case '\n':
-          retval.append("\\n");
-          continue;
-        case '\f':
-          retval.append("\\f");
-          continue;
-        case '\r':
-          retval.append("\\r");
-          continue;
-        case '\"':
-          retval.append("\\\"");
-          continue;
-        case '\'':
-          retval.append("\\\'");
-          continue;
-        case '\\':
-          retval.append("\\\\");
-          continue;
-        default:
-          if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) {
-            String s = "0000" + Integer.toString(ch, 16);
-            retval.append("\\u" + s.substring(s.length() - 4, s.length()));
-          } else {
-            retval.append(ch);
-          }
-          continue;
-        }
-    }
-    return retval.toString();
-  }
-
-  /**
-   * Returns a detailed message for the Error when it is thrown by the
-   * token manager to indicate a lexical error.
-   * Parameters : 
-   *    EOFSeen     : indicates if EOF caused the lexicl error
-   *    curLexState : lexical state in which this error occured
-   *    errorLine   : line number when the error occured
-   *    errorColumn : column number when the error occured
-   *    errorAfter  : prefix that was seen before this error occured
-   *    curchar     : the offending character
-   * Note: You can customize the lexical error message by modifying this method.
-   */
-  protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
-    return("Lexical error at line " +
-           errorLine + ", column " +
-           errorColumn + ".  Encountered: " +
-           (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
-           "after : \"" + addEscapes(errorAfter) + "\"");
-  }
-
-  /**
-   * You can also modify the body of this method to customize your error messages.
-   * For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
-   * of end-users concern, so you can return something like : 
-   *
-   *     "Internal Error : Please file a bug report .... "
-   *
-   * from this method for such cases in the release version of your parser.
-   */
-  @Override
-  public String getMessage() {
-    return super.getMessage();
-  }
-
-  /*
-   * Constructors of various flavors follow.
-   */
-
-  public TokenMgrError() {
-  }
-
-  public TokenMgrError(String message, int reason) {
-    super(message);
-    errorCode = reason;
-  }
-
-  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
-    this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
-  }
-}

+ 0 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/package.html

@@ -1,35 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-  <head>
-    <title>Hadoop Record Compiler: Parser</title>
-  </head>
-  <body>
-    <p>
-    (DEPRECATED) This package contains code generated by JavaCC from the
-    Hadoop record syntax file rcc.jj. For details about the
-    record file syntax please @see org.apache.hadoop.record.
-    </p>
-  
-    <p>
-    DEPRECATED: Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
-    </p>
-  </body>
-</html>

+ 0 - 384
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/rcc.jj

@@ -1,384 +0,0 @@
-options {
-STATIC=false;
-}
-
-PARSER_BEGIN(Rcc)
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.compiler.generated;
-
-import org.apache.hadoop.record.compiler.*;
-import java.util.ArrayList;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-public class Rcc {
-    private static String language = "java";
-    private static String destDir = ".";
-    private static ArrayList<String> recFiles = new ArrayList<String>();
-    private static ArrayList<String> cmdargs = new ArrayList<String>();
-    private static JFile curFile;
-    private static Hashtable<String,JRecord> recTab;
-    private static String curDir = ".";
-    private static String curFileName;
-    private static String curModuleName;
-
-    public static void main(String[] args) {
-        System.exit(driver(args));
-    }
- 
-    public static void usage() {
-        System.err.println("Usage: rcc --language [java|c++] ddl-files");
-    }
-
-    public static int driver(String[] args) {
-        for (int i=0; i<args.length; i++) {
-            if ("-l".equalsIgnoreCase(args[i]) ||
-                "--language".equalsIgnoreCase(args[i])) {
-                language = args[i+1].toLowerCase();
-                i++;
-            } else if ("-d".equalsIgnoreCase(args[i]) ||
-                "--destdir".equalsIgnoreCase(args[i])) {
-                destDir = args[i+1];
-                i++;
-            } else if (args[i].startsWith("-")) {
-              String arg = args[i].substring(1);
-              if (arg.startsWith("-")) {
-                arg = arg.substring(1);
-              }
-              cmdargs.add(arg.toLowerCase());
-            } else {
-                recFiles.add(args[i]);
-            }
-        }
-        if (recFiles.size() == 0) {
-            usage();
-            return 1;
-        }
-        for (int i=0; i<recFiles.size(); i++) {
-            curFileName = recFiles.get(i);
-            File file = new File(curFileName);
-            try {
-                FileReader reader = new FileReader(file);
-                Rcc parser = new Rcc(reader);
-                try {
-                    recTab = new Hashtable<String,JRecord>();
-                    curFile = parser.Input();
-                } catch (ParseException e) {
-                    System.err.println(e.toString());
-                    return 1;
-                }
-                try {
-                    reader.close();
-                } catch (IOException e) {
-                }
-            } catch (FileNotFoundException e) {
-                System.err.println("File " + (String) recFiles.get(i) +
-                    " Not found.");
-                return 1;
-            }
-            try {
-                int retCode = curFile.genCode(language, destDir, cmdargs);
-                if (retCode != 0) { return retCode; }
-            } catch (IOException e) {
-                System.err.println(e.toString());
-                return 1;
-            }
-        }
-        return 0;
-    }
-}
-
-PARSER_END(Rcc)
-
-SKIP :
-{
-  " "
-| "\t"
-| "\n"
-| "\r"
-}
-
-SPECIAL_TOKEN :
-{
-  "//" : WithinOneLineComment
-}
-
-<WithinOneLineComment> SPECIAL_TOKEN :
-{
-  <("\n" | "\r" | "\r\n" )> : DEFAULT
-}
-
-<WithinOneLineComment> MORE :
-{
-  <~[]>
-}
-
-SPECIAL_TOKEN :
-{
-  "/*" : WithinMultiLineComment
-}
-
-<WithinMultiLineComment> SPECIAL_TOKEN :
-{
-  "*/" : DEFAULT
-}
-
-<WithinMultiLineComment> MORE :
-{
-  <~[]>
-}
-
-TOKEN :
-{
-    <MODULE_TKN: "module">
-|   <RECORD_TKN: "class">
-|   <INCLUDE_TKN: "include">
-|   <BYTE_TKN: "byte">
-|   <BOOLEAN_TKN: "boolean">
-|   <INT_TKN: "int">
-|   <LONG_TKN: "long">
-|   <FLOAT_TKN: "float">
-|   <DOUBLE_TKN: "double">
-|   <USTRING_TKN: "ustring">
-|   <BUFFER_TKN: "buffer">
-|   <VECTOR_TKN: "vector">
-|   <MAP_TKN: "map">
-|   <LBRACE_TKN: "{">
-|   <RBRACE_TKN: "}">
-|   <LT_TKN: "<">
-|   <GT_TKN: ">">
-|   <SEMICOLON_TKN: ";">
-|   <COMMA_TKN: ",">
-|   <DOT_TKN: ".">
-|   <CSTRING_TKN: "\"" ( ~["\""] )+ "\"">
-|   <IDENT_TKN: ["A"-"Z","a"-"z"] (["a"-"z","A"-"Z","0"-"9","_"])*>
-}
-
-JFile Input() :
-{
-    ArrayList<JFile> ilist = new ArrayList<JFile>();
-    ArrayList<JRecord> rlist = new ArrayList<JRecord>();
-    JFile i;
-    ArrayList<JRecord> l;
-}
-{
-    (
-        i = Include()
-        { ilist.add(i); }
-    |   l = Module()
-        { rlist.addAll(l); }
-    )+
-    <EOF>
-    { return new JFile(curFileName, ilist, rlist); }
-}
-
-JFile Include() :
-{
-    String fname;
-    Token t;
-}
-{
-    <INCLUDE_TKN>
-    t = <CSTRING_TKN>
-    {
-        JFile ret = null;
-        fname = t.image.replaceAll("^\"", "").replaceAll("\"$","");
-        File file = new File(curDir, fname);
-        String tmpDir = curDir;
-        String tmpFile = curFileName;
-        curDir = file.getParent();
-        curFileName = file.getName();
-        try {
-            FileReader reader = new FileReader(file);
-            Rcc parser = new Rcc(reader);
-            try {
-                ret = parser.Input();
-                System.out.println(fname + " Parsed Successfully");
-            } catch (ParseException e) {
-                System.out.println(e.toString());
-                System.exit(1);
-            }
-            try {
-                reader.close();
-            } catch (IOException e) {
-            }
-        } catch (FileNotFoundException e) {
-            System.out.println("File " + fname +
-                " Not found.");
-            System.exit(1);
-        }
-        curDir = tmpDir;
-        curFileName = tmpFile;
-        return ret;
-    }
-}
-
-ArrayList<JRecord> Module() :
-{
-    String mName;
-    ArrayList<JRecord> rlist;
-}
-{
-    <MODULE_TKN>
-    mName = ModuleName()
-    { curModuleName = mName; }
-    <LBRACE_TKN>
-    rlist = RecordList()
-    <RBRACE_TKN>
-    { return rlist; }
-}
-
-String ModuleName() :
-{
-    String name = "";
-    Token t;
-}
-{
-    t = <IDENT_TKN>
-    { name += t.image; }
-    (
-        <DOT_TKN>
-        t = <IDENT_TKN>
-        { name += "." + t.image; }
-    )*
-    { return name; }
-}
-
-ArrayList<JRecord> RecordList() :
-{
-    ArrayList<JRecord> rlist = new ArrayList<JRecord>();
-    JRecord r;
-}
-{
-    (
-        r = Record()
-        { rlist.add(r); }
-    )+
-    { return rlist; }
-}
-
-JRecord Record() :
-{
-    String rname;
-    ArrayList<JField<JType>> flist = new ArrayList<JField<JType>>();
-    Token t;
-    JField<JType> f;
-}
-{
-    <RECORD_TKN>
-    t = <IDENT_TKN>
-    { rname = t.image; }
-    <LBRACE_TKN>
-    (
-        f = Field()
-        { flist.add(f); }
-        <SEMICOLON_TKN>
-    )+
-    <RBRACE_TKN>
-    {
-        String fqn = curModuleName + "." + rname;
-        JRecord r = new JRecord(fqn, flist);
-        recTab.put(fqn, r);
-        return r;
-    }
-}
-
-JField<JType> Field() :
-{
-    JType jt;
-    Token t;
-}
-{
-    jt = Type()
-    t = <IDENT_TKN>
-    { return new JField<JType>(t.image, jt); }
-}
-
-JType Type() :
-{
-    JType jt;
-    Token t;
-    String rname;
-}
-{
-    jt = Map()
-    { return jt; }
-|   jt = Vector()
-    { return jt; }
-|   <BYTE_TKN>
-    { return new JByte(); }
-|   <BOOLEAN_TKN>
-    { return new JBoolean(); }
-|   <INT_TKN>
-    { return new JInt(); }
-|   <LONG_TKN>
-    { return new JLong(); }
-|   <FLOAT_TKN>
-    { return new JFloat(); }
-|   <DOUBLE_TKN>
-    { return new JDouble(); }
-|   <USTRING_TKN>
-    { return new JString(); }
-|   <BUFFER_TKN>
-    { return new JBuffer(); }
-|   rname = ModuleName()
-    {
-        if (rname.indexOf('.', 0) < 0) {
-            rname = curModuleName + "." + rname;
-        }
-        JRecord r = recTab.get(rname);
-        if (r == null) {
-            System.out.println("Type " + rname + " not known. Exiting.");
-            System.exit(1);
-        }
-        return r;
-    }
-}
-
-JMap Map() :
-{
-    JType jt1;
-    JType jt2;
-}
-{
-    <MAP_TKN>
-    <LT_TKN>
-    jt1 = Type()
-    <COMMA_TKN>
-    jt2 = Type()
-    <GT_TKN>
-    { return new JMap(jt1, jt2); }
-}
-
-JVector Vector() :
-{
-    JType jt;
-}
-{
-    <VECTOR_TKN>
-    <LT_TKN>
-    jt = Type()
-    <GT_TKN>
-    { return new JVector(jt); }
-}

+ 0 - 37
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/package.html

@@ -1,37 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-  <head>
-    <title>Hadoop Record Compiler</title>
-  </head>
-  <body>
-    <p>
-    (DEPRECATED) This package contains classes needed for code generation
-    from the hadoop record compiler. CppGenerator and JavaGenerator
-    are the main entry points from the parser. There are classes
-    corrsponding to every primitive type and compound type
-    included in Hadoop record I/O syntax.
-    </p>
-    
-    <p>
-    DEPRECATED: Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
-    </p>
-  </body>
-</html>

+ 0 - 107
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java

@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordOutput;
-
-/** 
- * Represents a type information for a field, which is made up of its 
- * ID (name) and its type (a TypeID object).
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class FieldTypeInfo
-{
-
-  private String fieldID;
-  private TypeID typeID;
-
-  /**
-   * Construct a FiledTypeInfo with the given field name and the type
-   */
-  FieldTypeInfo(String fieldID, TypeID typeID) {
-    this.fieldID = fieldID;
-    this.typeID = typeID;
-  }
-
-  /**
-   * get the field's TypeID object
-   */
-  public TypeID getTypeID() {
-    return typeID;
-  }
-  
-  /**
-   * get the field's id (name)
-   */
-  public String getFieldID() {
-    return fieldID;
-  }
-  
-  void write(RecordOutput rout, String tag) throws IOException {
-    rout.writeString(fieldID, tag);
-    typeID.write(rout, tag);
-  }
-  
-  /**
-   * Two FieldTypeInfos are equal if ach of their fields matches
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) 
-      return true;
-    if (!(o instanceof FieldTypeInfo))
-      return false;
-    FieldTypeInfo fti = (FieldTypeInfo) o;
-    // first check if fieldID matches
-    if (!this.fieldID.equals(fti.fieldID)) {
-      return false;
-    }
-    // now see if typeID matches
-    return (this.typeID.equals(fti.typeID));
-  }
-  
-  /**
-   * We use a basic hashcode implementation, since this class will likely not
-   * be used as a hashmap key 
-   */
-  @Override
-  public int hashCode() {
-    return 37*17+typeID.hashCode() + 37*17+fieldID.hashCode();
-  }
-  
-
-  public boolean equals(FieldTypeInfo ti) {
-    // first check if fieldID matches
-    if (!this.fieldID.equals(ti.fieldID)) {
-      return false;
-    }
-    // now see if typeID matches
-    return (this.typeID.equals(ti.typeID));
-  }
-
-}
-

+ 0 - 90
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java

@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordOutput;
-
-/** 
- * Represents typeID for a Map 
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class MapTypeID extends TypeID {
-  
-  private TypeID typeIDKey; 
-  private TypeID typeIDValue; 
-  
-  public MapTypeID(TypeID typeIDKey, TypeID typeIDValue) {
-    super(RIOType.MAP);
-    this.typeIDKey = typeIDKey;
-    this.typeIDValue = typeIDValue;
-  }
-  
-  /**
-   * get the TypeID of the map's key element
-   */
-  public TypeID getKeyTypeID() {
-    return this.typeIDKey;
-  }
-  
-  /**
-   * get the TypeID of the map's value element
-   */
-  public TypeID getValueTypeID() {
-    return this.typeIDValue;
-  }
-  
-  @Override
-  void write(RecordOutput rout, String tag) throws IOException {
-    rout.writeByte(typeVal, tag);
-    typeIDKey.write(rout, tag);
-    typeIDValue.write(rout, tag);
-  }
-  
-  /**
-   * Two map  typeIDs are equal if their constituent elements have the 
-   * same type
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (!super.equals(o))
-      return false;
-
-    MapTypeID mti = (MapTypeID) o;
-
-    return this.typeIDKey.equals(mti.typeIDKey) &&
-           this.typeIDValue.equals(mti.typeIDValue);
-  }
-  
-  /**
-   * We use a basic hashcode implementation, since this class will likely not
-   * be used as a hashmap key 
-   */
-  @Override
-  public int hashCode() {
-    return 37*17+typeIDKey.hashCode() + 37*17+typeIDValue.hashCode();
-  }
-  
-}

+ 0 - 161
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java

@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordInput;
-import org.apache.hadoop.record.RecordOutput;
-
-
-/** 
- * A record's Type Information object which can read/write itself. 
- * 
- * Type information for a record comprises metadata about the record, 
- * as well as a collection of type information for each field in the record. 
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RecordTypeInfo extends org.apache.hadoop.record.Record 
-{
-
-  private String name;
-  // A RecordTypeInfo is really just a wrapper around StructTypeID
-  StructTypeID sTid;
-   // A RecordTypeInfo object is just a collection of TypeInfo objects for each of its fields.  
-  //private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>();
-  // we keep a hashmap of struct/record names and their type information, as we need it to 
-  // set filters when reading nested structs. This map is used during deserialization.
-  //private Map<String, RecordTypeInfo> structRTIs = new HashMap<String, RecordTypeInfo>();
-
-  /**
-   * Create an empty RecordTypeInfo object.
-   */
-  public RecordTypeInfo() {
-    sTid = new StructTypeID();
-  }
-
-  /**
-   * Create a RecordTypeInfo object representing a record with the given name
-   * @param name Name of the record
-   */
-  public RecordTypeInfo(String name) {
-    this.name = name;
-    sTid = new StructTypeID();
-  }
-
-  /*
-   * private constructor
-   */
-  private RecordTypeInfo(String name, StructTypeID stid) {
-    this.sTid = stid;
-    this.name = name;
-  }
-  
-  /**
-   * return the name of the record
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * set the name of the record
-   */
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  /**
-   * Add a field. 
-   * @param fieldName Name of the field
-   * @param tid Type ID of the field
-   */
-  public void addField(String fieldName, TypeID tid) {
-    sTid.getFieldTypeInfos().add(new FieldTypeInfo(fieldName, tid));
-  }
-  
-  private void addAll(Collection<FieldTypeInfo> tis) {
-    sTid.getFieldTypeInfos().addAll(tis);
-  }
-
-  /**
-   * Return a collection of field type infos
-   */
-  public Collection<FieldTypeInfo> getFieldTypeInfos() {
-    return sTid.getFieldTypeInfos();
-  }
-  
-  /**
-   * Return the type info of a nested record. We only consider nesting 
-   * to one level. 
-   * @param name Name of the nested record
-   */
-  public RecordTypeInfo getNestedStructTypeInfo(String name) {
-    StructTypeID stid = sTid.findStruct(name);
-    if (null == stid) return null;
-    return new RecordTypeInfo(name, stid);
-  }
-
-  /**
-   * Serialize the type information for a record
-   */
-  @Override
-  public void serialize(RecordOutput rout, String tag) throws IOException {
-    // write out any header, version info, here
-    rout.startRecord(this, tag);
-    rout.writeString(name, tag);
-    sTid.writeRest(rout, tag);
-    rout.endRecord(this, tag);
-  }
-
-  /**
-   * Deserialize the type information for a record
-   */
-  @Override
-  public void deserialize(RecordInput rin, String tag) throws IOException {
-    // read in any header, version info 
-    rin.startRecord(tag);
-    // name
-    this.name = rin.readString(tag);
-    sTid.read(rin, tag);
-    rin.endRecord(tag);
-  }
-  
-  /**
-   * This class doesn't implement Comparable as it's not meant to be used 
-   * for anything besides de/serializing.
-   * So we always throw an exception.
-   * Not implemented. Always returns 0 if another RecordTypeInfo is passed in. 
-   */
-  @Override
-  public int compareTo (final Object peer_) throws ClassCastException {
-    if (!(peer_ instanceof RecordTypeInfo)) {
-      throw new ClassCastException("Comparing different types of records.");
-    }
-    throw new UnsupportedOperationException("compareTo() is not supported");
-  }
-}
-

+ 0 - 166
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java

@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordInput;
-import org.apache.hadoop.record.RecordOutput;
-
-/** 
- * Represents typeID for a struct 
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class StructTypeID extends TypeID {
-  private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>();
-  
-  StructTypeID() {
-    super(RIOType.STRUCT);
-  }
-  
-  /**
-   * Create a StructTypeID based on the RecordTypeInfo of some record
-   */
-  public StructTypeID(RecordTypeInfo rti) {
-    super(RIOType.STRUCT);
-    typeInfos.addAll(rti.getFieldTypeInfos());
-  }
-
-  void add (FieldTypeInfo ti) {
-    typeInfos.add(ti);
-  }
-  
-  public Collection<FieldTypeInfo> getFieldTypeInfos() {
-    return typeInfos;
-  }
-  
-  /* 
-   * return the StructTypeiD, if any, of the given field 
-   */
-  StructTypeID findStruct(String name) {
-    // walk through the list, searching. Not the most efficient way, but this
-    // in intended to be used rarely, so we keep it simple. 
-    // As an optimization, we can keep a hashmap of record name to its RTI, for later.
-    for (FieldTypeInfo ti : typeInfos) {
-      if ((0 == ti.getFieldID().compareTo(name)) && (ti.getTypeID().getTypeVal() == RIOType.STRUCT)) {
-        return (StructTypeID) ti.getTypeID();
-      }
-    }
-    return null;
-  }
-  
-  @Override
-  void write(RecordOutput rout, String tag) throws IOException {
-    rout.writeByte(typeVal, tag);
-    writeRest(rout, tag);
-  }
-
-  /* 
-   * Writes rest of the struct (excluding type value).
-   * As an optimization, this method is directly called by RTI 
-   * for the top level record so that we don't write out the byte
-   * indicating that this is a struct (since top level records are
-   * always structs).
-   */
-  void writeRest(RecordOutput rout, String tag) throws IOException {
-    rout.writeInt(typeInfos.size(), tag);
-    for (FieldTypeInfo ti : typeInfos) {
-      ti.write(rout, tag);
-    }
-  }
-
-  /* 
-   * deserialize ourselves. Called by RTI. 
-   */
-  void read(RecordInput rin, String tag) throws IOException {
-    // number of elements
-    int numElems = rin.readInt(tag);
-    for (int i=0; i<numElems; i++) {
-      typeInfos.add(genericReadTypeInfo(rin, tag));
-    }
-  }
-  
-  // generic reader: reads the next TypeInfo object from stream and returns it
-  private FieldTypeInfo genericReadTypeInfo(RecordInput rin, String tag) throws IOException {
-    String fieldName = rin.readString(tag);
-    TypeID id = genericReadTypeID(rin, tag);
-    return new FieldTypeInfo(fieldName, id);
-  }
-  
-  // generic reader: reads the next TypeID object from stream and returns it
-  private TypeID genericReadTypeID(RecordInput rin, String tag) throws IOException {
-    byte typeVal = rin.readByte(tag);
-    switch (typeVal) {
-    case TypeID.RIOType.BOOL: 
-      return TypeID.BoolTypeID;
-    case TypeID.RIOType.BUFFER: 
-      return TypeID.BufferTypeID;
-    case TypeID.RIOType.BYTE:
-      return TypeID.ByteTypeID;
-    case TypeID.RIOType.DOUBLE:
-      return TypeID.DoubleTypeID;
-    case TypeID.RIOType.FLOAT:
-      return TypeID.FloatTypeID;
-    case TypeID.RIOType.INT: 
-      return TypeID.IntTypeID;
-    case TypeID.RIOType.LONG:
-      return TypeID.LongTypeID;
-    case TypeID.RIOType.MAP:
-    {
-      TypeID tIDKey = genericReadTypeID(rin, tag);
-      TypeID tIDValue = genericReadTypeID(rin, tag);
-      return new MapTypeID(tIDKey, tIDValue);
-    }
-    case TypeID.RIOType.STRING: 
-      return TypeID.StringTypeID;
-    case TypeID.RIOType.STRUCT: 
-    {
-      StructTypeID stID = new StructTypeID();
-      int numElems = rin.readInt(tag);
-      for (int i=0; i<numElems; i++) {
-        stID.add(genericReadTypeInfo(rin, tag));
-      }
-      return stID;
-    }
-    case TypeID.RIOType.VECTOR: 
-    {
-      TypeID tID = genericReadTypeID(rin, tag);
-      return new VectorTypeID(tID);
-    }
-    default:
-      // shouldn't be here
-      throw new IOException("Unknown type read");
-    }
-  }
-  
-  @Override
-  public boolean equals(Object o) {
-    return super.equals(o);
-  }
-  
-  @Override
-  public int hashCode() { return super.hashCode(); }
-}

+ 0 - 117
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java

@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordOutput;
-
-/** 
- * Represents typeID for basic types.
- *  
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class TypeID {
-
-  /**
-   * constants representing the IDL types we support
-   */
-  public static final class RIOType {
-    public static final byte BOOL   = 1;
-    public static final byte BUFFER = 2;
-    public static final byte BYTE   = 3;
-    public static final byte DOUBLE = 4;
-    public static final byte FLOAT  = 5;
-    public static final byte INT    = 6;
-    public static final byte LONG   = 7;
-    public static final byte MAP    = 8;
-    public static final byte STRING = 9;
-    public static final byte STRUCT = 10;
-    public static final byte VECTOR = 11;
-  }
-
-  /**
-   * Constant classes for the basic types, so we can share them.
-   */
-  public static final TypeID BoolTypeID = new TypeID(RIOType.BOOL);
-  public static final TypeID BufferTypeID = new TypeID(RIOType.BUFFER);
-  public static final TypeID ByteTypeID = new TypeID(RIOType.BYTE);
-  public static final TypeID DoubleTypeID = new TypeID(RIOType.DOUBLE);
-  public static final TypeID FloatTypeID = new TypeID(RIOType.FLOAT);
-  public static final TypeID IntTypeID = new TypeID(RIOType.INT);
-  public static final TypeID LongTypeID = new TypeID(RIOType.LONG);
-  public static final TypeID StringTypeID = new TypeID(RIOType.STRING);
-  
-  protected byte typeVal;
-
-  /**
-   * Create a TypeID object 
-   */
-  TypeID(byte typeVal) {
-    this.typeVal = typeVal;
-  }
-
-  /**
-   * Get the type value. One of the constants in RIOType.
-   */
-  public byte getTypeVal() {
-    return typeVal;
-  }
-
-  /**
-   * Serialize the TypeID object
-   */
-  void write(RecordOutput rout, String tag) throws IOException {
-    rout.writeByte(typeVal, tag);
-  }
-  
-  /**
-   * Two base typeIDs are equal if they refer to the same type
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) 
-      return true;
-
-    if (o == null)
-      return false;
-
-    if (this.getClass() != o.getClass())
-      return false;
-
-    TypeID oTypeID = (TypeID) o;
-    return (this.typeVal == oTypeID.typeVal);
-  }
-  
-  /**
-   * We use a basic hashcode implementation, since this class will likely not
-   * be used as a hashmap key 
-   */
-  @Override
-  public int hashCode() {
-    // See 'Effectve Java' by Joshua Bloch
-    return 37*17+(int)typeVal;
-  }
-}
-

+ 0 - 104
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/Utils.java

@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordInput;
-
-/**
- * Various utility functions for Hadooop record I/O platform.
- * 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Utils {
-  
-  /** Cannot create a new instance of Utils */
-  private Utils() {
-  }
-  
-  /**
-   * read/skip bytes from stream based on a type
-   */
-  public static void skip(RecordInput rin, String tag, TypeID typeID) throws IOException {
-    switch (typeID.typeVal) {
-    case TypeID.RIOType.BOOL: 
-      rin.readBool(tag);
-      break;
-    case TypeID.RIOType.BUFFER: 
-      rin.readBuffer(tag);
-      break;
-    case TypeID.RIOType.BYTE: 
-      rin.readByte(tag);
-      break;
-    case TypeID.RIOType.DOUBLE: 
-      rin.readDouble(tag);
-      break;
-    case TypeID.RIOType.FLOAT: 
-      rin.readFloat(tag);
-      break;
-    case TypeID.RIOType.INT: 
-      rin.readInt(tag);
-      break;
-    case TypeID.RIOType.LONG: 
-      rin.readLong(tag);
-      break;
-    case TypeID.RIOType.MAP: 
-      org.apache.hadoop.record.Index midx1 = rin.startMap(tag);
-      MapTypeID mtID = (MapTypeID) typeID;
-      for (; !midx1.done(); midx1.incr()) {
-        skip(rin, tag, mtID.getKeyTypeID());
-        skip(rin, tag, mtID.getValueTypeID());
-      }
-      rin.endMap(tag);
-      break;
-    case TypeID.RIOType.STRING: 
-      rin.readString(tag);
-      break;
-    case TypeID.RIOType.STRUCT:
-      rin.startRecord(tag);
-      // read past each field in the struct
-      StructTypeID stID = (StructTypeID) typeID;
-      Iterator<FieldTypeInfo> it = stID.getFieldTypeInfos().iterator();
-      while (it.hasNext()) {
-        FieldTypeInfo tInfo = it.next();
-        skip(rin, tag, tInfo.getTypeID());
-      }
-      rin.endRecord(tag);
-      break;
-    case TypeID.RIOType.VECTOR: 
-      org.apache.hadoop.record.Index vidx1 = rin.startVector(tag);
-      VectorTypeID vtID = (VectorTypeID) typeID;
-      for (; !vidx1.done(); vidx1.incr()) {
-        skip(rin, tag, vtID.getElementTypeID());
-      }
-      rin.endVector(tag);
-      break;
-    default: 
-      // shouldn't be here
-      throw new IOException("Unknown typeID when skipping bytes");
-    }
-  }
-}

+ 0 - 74
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java

@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record.meta;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.record.RecordOutput;
-
-/** 
- * Represents typeID for vector. 
- * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class VectorTypeID extends TypeID {
-  private TypeID typeIDElement; 
-  
-  public VectorTypeID(TypeID typeIDElement) {
-    super(RIOType.VECTOR);
-    this.typeIDElement = typeIDElement;
-  }
-  
-  public TypeID getElementTypeID() {
-    return this.typeIDElement;
-  }
-  
-  @Override
-  void write(RecordOutput rout, String tag) throws IOException {
-    rout.writeByte(typeVal, tag);
-    typeIDElement.write(rout, tag);
-  }
-  
-  /**
-   * Two vector typeIDs are equal if their constituent elements have the 
-   * same type
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (!super.equals (o))
-      return false;
-
-    VectorTypeID vti = (VectorTypeID) o;
-    return this.typeIDElement.equals(vti.typeIDElement);
-  }
-  
-  /**
-   * We use a basic hashcode implementation, since this class will likely not
-   * be used as a hashmap key 
-   */
-  @Override
-  public int hashCode() {
-    return 37*17+typeIDElement.hashCode();
-  }
-  
-}

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -31,7 +31,6 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -251,7 +251,6 @@ public class LdapGroupsMapping
     return groups;
   }
 
-  @SuppressWarnings("deprecation")
   DirContext getDirContext() throws NamingException {
     if (ctx == null) {
       // Set up the initial environment for LDAP connectivity

+ 0 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java

@@ -39,9 +39,6 @@ import org.apache.commons.logging.LogFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class NetgroupCache {
-
-  private static final Log LOG = LogFactory.getLog(NetgroupCache.class);
-
   private static boolean netgroupToUsersMapUpdated = true;
   private static Map<String, Set<String>> netgroupToUsersMap =
     new ConcurrentHashMap<String, Set<String>>();

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java

@@ -30,7 +30,6 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
@@ -57,7 +56,6 @@ import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.StringUtils;
 
 /**
  * A utility class for dealing with SASL on RPC server
@@ -67,10 +65,7 @@ import org.apache.hadoop.util.StringUtils;
 public class SaslRpcServer {
   public static final Log LOG = LogFactory.getLog(SaslRpcServer.class);
   public static final String SASL_DEFAULT_REALM = "default";
-  public static final Map<String, String> SASL_PROPS = 
-      new TreeMap<String, String>();
   private static SaslServerFactory saslFactory;
-  private static SaslPropertiesResolver resolver;
 
   public static enum QualityOfProtection {
     AUTHENTICATION("auth"),

+ 18 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.security;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import java.io.File;
 import java.io.IOException;
@@ -30,6 +31,7 @@ import java.security.Principal;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -45,9 +47,9 @@ import javax.security.auth.kerberos.KerberosKey;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
-import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
 import javax.security.auth.spi.LoginModule;
 
 import org.apache.commons.logging.Log;
@@ -68,7 +70,6 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -990,7 +991,9 @@ public class UserGroupInformation {
     // register most recent relogin attempt
     user.setLastLogin(now);
     try {
-      LOG.info("Initiating logout for " + getUserName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Initiating logout for " + getUserName());
+      }
       synchronized (UserGroupInformation.class) {
         // clear up the kerberos state. But the tokens are not cleared! As per
         // the Java kerberos login module code, only the kerberos credentials
@@ -1001,7 +1004,9 @@ public class UserGroupInformation {
         login = newLoginContext(
             HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, getSubject(),
             new HadoopConfiguration());
-        LOG.info("Initiating re-login for " + keytabPrincipal);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Initiating re-login for " + keytabPrincipal);
+        }
         start = Time.now();
         login.login();
         metrics.loginSuccess.add(Time.now() - start);
@@ -1042,7 +1047,9 @@ public class UserGroupInformation {
     // register most recent relogin attempt
     user.setLastLogin(now);
     try {
-      LOG.info("Initiating logout for " + getUserName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Initiating logout for " + getUserName());
+      }
       //clear up the kerberos state. But the tokens are not cleared! As per 
       //the Java kerberos login module code, only the kerberos credentials
       //are cleared
@@ -1052,7 +1059,9 @@ public class UserGroupInformation {
       login = 
         newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, 
             getSubject(), new HadoopConfiguration());
-      LOG.info("Initiating re-login for " + getUserName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Initiating re-login for " + getUserName());
+      }
       login.login();
       setLogin(login);
     } catch (LoginException le) {
@@ -1407,7 +1416,7 @@ public class UserGroupInformation {
   public synchronized
   Collection<Token<? extends TokenIdentifier>> getTokens() {
     return Collections.unmodifiableCollection(
-        getCredentialsInternal().getAllTokens());
+        new ArrayList<Token<?>>(getCredentialsInternal().getAllTokens()));
   }
 
   /**

+ 25 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java

@@ -19,10 +19,12 @@
 package org.apache.hadoop.security.authorize;
 
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
 
@@ -41,12 +43,16 @@ public class ProxyUsers {
   public static final String CONF_GROUPS = ".groups";
   public static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
   public static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
+  public static final String CONF_HADOOP_PROXYSERVERS = "hadoop.proxyservers";
+  
   private static boolean init = false;
   // list of groups and hosts per proxyuser
   private static Map<String, Collection<String>> proxyGroups = 
     new HashMap<String, Collection<String>>();
   private static Map<String, Collection<String>> proxyHosts = 
     new HashMap<String, Collection<String>>();
+  private static Collection<String> proxyServers =
+    new HashSet<String>();
 
   /**
    * reread the conf and get new values for "hadoop.proxyuser.*.groups/hosts"
@@ -62,15 +68,16 @@ public class ProxyUsers {
    */
   public static synchronized void refreshSuperUserGroupsConfiguration(Configuration conf) {
     
-    // remove alle existing stuff
+    // remove all existing stuff
     proxyGroups.clear();
     proxyHosts.clear();
+    proxyServers.clear();
 
     // get all the new keys for groups
     String regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_GROUPS;
     Map<String,String> allMatchKeys = conf.getValByRegex(regex);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
-      Collection<String> groups = StringUtils.getStringCollection(entry.getValue());
+      Collection<String> groups = StringUtils.getTrimmedStringCollection(entry.getValue());
       proxyGroups.put(entry.getKey(), groups );
       //cache the groups. This is needed for NetGroups
       Groups.getUserToGroupsMappingService(conf).cacheGroupsAdd(
@@ -82,12 +89,26 @@ public class ProxyUsers {
     allMatchKeys = conf.getValByRegex(regex);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
       proxyHosts.put(entry.getKey(),
-          StringUtils.getStringCollection(entry.getValue()));
+          StringUtils.getTrimmedStringCollection(entry.getValue()));
     }
     
+    // trusted proxy servers such as http proxies
+    for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
+      InetSocketAddress addr = new InetSocketAddress(host, 0);
+      if (!addr.isUnresolved()) {
+        proxyServers.add(addr.getAddress().getHostAddress());
+      }
+    }
     init = true;
   }
 
+  public static synchronized boolean isProxyServer(String remoteAddr) { 
+    if(!init) {
+      refreshSuperUserGroupsConfiguration(); 
+    }
+    return proxyServers.contains(remoteAddr);
+  }
+
   /**
    * Returns configuration key for effective user groups allowed for a superuser
    * 
@@ -113,11 +134,10 @@ public class ProxyUsers {
    * 
    * @param user ugi of the effective or proxy user which contains a real user
    * @param remoteAddress the ip address of client
-   * @param newConf configuration
    * @throws AuthorizationException
    */
   public static synchronized void authorize(UserGroupInformation user, 
-      String remoteAddress, Configuration newConf) throws AuthorizationException {
+      String remoteAddress) throws AuthorizationException {
 
     if(!init) {
       refreshSuperUserGroupsConfiguration(); 

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java

@@ -127,10 +127,14 @@ public class SSLFactory implements ConnectionConfigurator {
   }
 
   private HostnameVerifier getHostnameVerifier(Configuration conf)
+      throws GeneralSecurityException, IOException {
+    return getHostnameVerifier(conf.get(SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT").
+        trim().toUpperCase());
+  }
+
+  public static HostnameVerifier getHostnameVerifier(String verifier)
     throws GeneralSecurityException, IOException {
     HostnameVerifier hostnameVerifier;
-    String verifier =
-      conf.get(SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT").trim().toUpperCase();
     if (verifier.equals("DEFAULT")) {
       hostnameVerifier = SSLHostnameVerifier.DEFAULT;
     } else if (verifier.equals("DEFAULT_AND_LOCALHOST")) {

+ 12 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java

@@ -31,9 +31,6 @@
 
 package org.apache.hadoop.security.ssl;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.security.cert.Certificate;
@@ -44,6 +41,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.TreeSet;
 
@@ -52,6 +50,9 @@ import javax.net.ssl.SSLPeerUnverifiedException;
 import javax.net.ssl.SSLSession;
 import javax.net.ssl.SSLSocket;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  ************************************************************************
  * Copied from the not-yet-commons-ssl project at
@@ -224,7 +225,6 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
             public final String toString() { return "ALLOW_ALL"; }
         };
 
-    @SuppressWarnings("unchecked")
     abstract class AbstractVerifier implements SSLHostnameVerifier {
 
         /**
@@ -378,7 +378,7 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
             // STRICT implementations of the HostnameVerifier only use the
             // first CN provided.  All other CNs are ignored.
             // (Firefox, wget, curl, Sun Java 1.4, 5, 6 all work this way).
-            TreeSet names = new TreeSet();
+            final Set<String> names = new TreeSet<String>();
             if (cns != null && cns.length > 0 && cns[0] != null) {
                 names.add(cns[0]);
                 if (ie6) {
@@ -404,10 +404,9 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
 
             boolean match = false;
             out:
-            for (Iterator it = names.iterator(); it.hasNext();) {
+            for (Iterator<String> it = names.iterator(); it.hasNext();) {
                 // Don't trim the CN, though!
-                String cn = (String) it.next();
-                cn = cn.toLowerCase();
+                final String cn = it.next().toLowerCase();
                 // Store CN in StringBuffer in case we need to report an error.
                 buf.append(" <");
                 buf.append(cn);
@@ -508,10 +507,9 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
         }
     }
 
-    @SuppressWarnings("unchecked")
     static class Certificates {
       public static String[] getCNs(X509Certificate cert) {
-        LinkedList cnList = new LinkedList();
+        final List<String> cnList = new LinkedList<String>();
         /*
           Sebastian Hauer's original StrictSSLProtocolSocketFactory used
           getName() and had the following comment:
@@ -568,8 +566,8 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
        * @return Array of SubjectALT DNS names stored in the certificate.
        */
       public static String[] getDNSSubjectAlts(X509Certificate cert) {
-          LinkedList subjectAltList = new LinkedList();
-          Collection c = null;
+          final List<String> subjectAltList = new LinkedList<String>();
+          Collection<List<?>> c = null;
           try {
               c = cert.getSubjectAlternativeNames();
           }
@@ -578,9 +576,9 @@ public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
               cpe.printStackTrace();
           }
           if (c != null) {
-              Iterator it = c.iterator();
+              Iterator<List<?>> it = c.iterator();
               while (it.hasNext()) {
-                  List list = (List) it.next();
+                  List<?> list = it.next();
                   int type = ((Integer) list.get(0)).intValue();
                   // If type is 2, then we've got a dNSName
                   if (type == 2) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java

@@ -162,7 +162,7 @@ public class Token<T extends TokenIdentifier> implements Writable {
 
   /**
    * Set the token kind. This is only intended to be used by services that
-   * wrap another service's token, such as HFTP wrapping HDFS.
+   * wrap another service's token.
    * @param newKind
    */
   @InterfaceAudience.Private

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -28,9 +28,11 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -351,12 +353,15 @@ public class StringUtils {
 
   /**
    * Splits a comma separated value <code>String</code>, trimming leading and trailing whitespace on each value.
+   * Duplicate and empty values are removed.
    * @param str a comma separated <String> with values
    * @return a <code>Collection</code> of <code>String</code> values
    */
   public static Collection<String> getTrimmedStringCollection(String str){
-    return new ArrayList<String>(
+    Set<String> set = new LinkedHashSet<String>(
       Arrays.asList(getTrimmedStrings(str)));
+    set.remove("");
+    return set;
   }
   
   /**

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/native/native.vcxproj

@@ -58,6 +58,7 @@
     <SnappyInclude Condition="Exists('$(CustomSnappyInclude)') And '$(SnappyInclude)' == ''">$(CustomSnappyInclude)</SnappyInclude>
     <SnappyEnabled Condition="'$(SnappyLib)' != '' And '$(SnappyInclude)' != ''">true</SnappyEnabled>
     <IncludePath Condition="'$(SnappyEnabled)' == 'true'">$(SnappyInclude);$(IncludePath)</IncludePath>
+    <IncludePath Condition="Exists('$(ZLIB_HOME)')">$(ZLIB_HOME);$(IncludePath)</IncludePath>
   </PropertyGroup>
   <Target Name="CheckRequireSnappy">
     <Error
@@ -92,6 +93,8 @@
     <ClCompile Include="src\org\apache\hadoop\io\compress\snappy\SnappyDecompressor.c" Condition="'$(SnappyEnabled)' == 'true'">
       <AdditionalOptions>/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
     </ClCompile>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\zlib\ZlibCompressor.c" Condition="Exists('$(ZLIB_HOME)')" />
+    <ClCompile Include="src\org\apache\hadoop\io\compress\zlib\ZlibDecompressor.c" Condition="Exists('$(ZLIB_HOME)')" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4hc.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Compressor.c" />
@@ -109,6 +112,9 @@
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32c_tables.h" />
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32_zlib_polynomial_tables.h" />
     <ClInclude Include="src\org\apache\hadoop\io\compress\snappy\org_apache_hadoop_io_compress_snappy.h" />
+    <ClInclude Include="src\org\apache\hadoop\io\compress\zlib\org_apache_hadoop_io_compress_zlib_ZlibCompressor.h" />
+    <ClInclude Include="src\org\apache\hadoop\io\compress\zlib\org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h" />
+    <ClInclude Include="src\org\apache\hadoop\io\compress\zlib\org_apache_hadoop_io_compress_zlib.h" />
     <ClInclude Include="src\org\apache\hadoop\io\nativeio\file_descriptor.h" />
     <ClInclude Include="src\org\apache\hadoop\util\bulk_crc32.h" />
     <ClInclude Include="src\org\apache\hadoop\util\crc32c_tables.h" />

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c

@@ -47,6 +47,7 @@ static int (*dlsym_deflateEnd)(z_streamp);
 #endif
 
 #ifdef WINDOWS
+#include "winutils.h"
 #include <Strsafe.h>
 typedef int (__cdecl *__dlsym_deflateInit2_) (z_streamp, int, int, int, int, int, const char *, int);
 typedef int (__cdecl *__dlsym_deflate) (z_streamp, int);
@@ -379,7 +380,16 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_getLibraryName(JNIEnv *en
     }
   }
 #endif
-  return (*env)->NewStringUTF(env, HADOOP_ZLIB_LIBRARY);
+
+#ifdef WINDOWS
+  LPWSTR filename = NULL;
+  GetLibraryName(dlsym_deflateInit2_, &filename);
+  if (filename != NULL) {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  } else {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
+#endif
 }
 
 /**

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c

@@ -178,6 +178,11 @@ int hadoop_user_info_getgroups(struct hadoop_user_info *uinfo)
       return ret;
     }
     return 0;
+  } else if (ret != -1) {
+    // Any return code that is not -1 is considered as error.
+    // Since the user lookup was successful, there should be at least one
+    // group for this user.
+    return EIO;
   }
   ngids = realloc(uinfo->gids, sizeof(uinfo->gids[0]) * ngroups);
   if (!ngids) {

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -452,6 +452,11 @@
   <description>The AbstractFileSystem for file: uris.</description>
 </property>
 
+<property>
+  <name>fs.AbstractFileSystem.har.impl</name>
+  <value>org.apache.hadoop.fs.HarFs</value>
+  <description>The AbstractFileSystem for har: uris.</description>
+</property> 
 
 <property>
   <name>fs.AbstractFileSystem.hdfs.impl</name>

+ 111 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.crypto.key;
 
+import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.fs.Path;
@@ -24,16 +25,21 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.net.URI;
+import java.security.NoSuchAlgorithmException;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
+import java.util.List;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertArrayEquals;
 
 public class TestKeyProvider {
 
+  private static final String CIPHER = "AES";
+
   @Test
   public void testBuildVersionName() throws Exception {
     assertEquals("/a/b@3", KeyProvider.buildVersionName("/a/b", 3));
@@ -62,23 +68,47 @@ public class TestKeyProvider {
 
   @Test
   public void testMetadata() throws Exception {
+    //Metadata without description
     DateFormat format = new SimpleDateFormat("y/m/d");
     Date date = format.parse("2013/12/25");
-    KeyProvider.Metadata meta = new KeyProvider.Metadata("myCipher", 100,
+    KeyProvider.Metadata meta = new KeyProvider.Metadata("myCipher", 100, null,
         date, 123);
     assertEquals("myCipher", meta.getCipher());
     assertEquals(100, meta.getBitLength());
+    assertNull(meta.getDescription());
     assertEquals(date, meta.getCreated());
     assertEquals(123, meta.getVersions());
     KeyProvider.Metadata second = new KeyProvider.Metadata(meta.serialize());
     assertEquals(meta.getCipher(), second.getCipher());
     assertEquals(meta.getBitLength(), second.getBitLength());
+    assertNull(second.getDescription());
     assertEquals(meta.getCreated(), second.getCreated());
     assertEquals(meta.getVersions(), second.getVersions());
     int newVersion = second.addVersion();
     assertEquals(123, newVersion);
     assertEquals(124, second.getVersions());
     assertEquals(123, meta.getVersions());
+
+    //Metadata with description
+    format = new SimpleDateFormat("y/m/d");
+    date = format.parse("2013/12/25");
+    meta = new KeyProvider.Metadata("myCipher", 100,
+        "description", date, 123);
+    assertEquals("myCipher", meta.getCipher());
+    assertEquals(100, meta.getBitLength());
+    assertEquals("description", meta.getDescription());
+    assertEquals(date, meta.getCreated());
+    assertEquals(123, meta.getVersions());
+    second = new KeyProvider.Metadata(meta.serialize());
+    assertEquals(meta.getCipher(), second.getCipher());
+    assertEquals(meta.getBitLength(), second.getBitLength());
+    assertEquals(meta.getDescription(), second.getDescription());
+    assertEquals(meta.getCreated(), second.getCreated());
+    assertEquals(meta.getVersions(), second.getVersions());
+    newVersion = second.addVersion();
+    assertEquals(123, newVersion);
+    assertEquals(124, second.getVersions());
+    assertEquals(123, meta.getVersions());
   }
 
   @Test
@@ -90,9 +120,11 @@ public class TestKeyProvider {
     assertEquals("myCipher", options.getCipher());
     assertEquals(512, options.getBitLength());
     options.setCipher("yourCipher");
+    options.setDescription("description");
     options.setBitLength(128);
     assertEquals("yourCipher", options.getCipher());
     assertEquals(128, options.getBitLength());
+    assertEquals("description", options.getDescription());
     options = KeyProvider.options(new Configuration());
     assertEquals(KeyProvider.DEFAULT_CIPHER, options.getCipher());
     assertEquals(KeyProvider.DEFAULT_BITLENGTH, options.getBitLength());
@@ -109,4 +141,82 @@ public class TestKeyProvider {
     assertEquals(new Path("user:///"),
         KeyProvider.unnestUri(new URI("outer://user/")));
   }
+
+  private static class MyKeyProvider extends KeyProvider {
+    private String algorithm;
+    private int size;
+    private byte[] material;
+
+    @Override
+    public KeyVersion getKeyVersion(String versionName)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public List<String> getKeys() throws IOException {
+      return null;
+    }
+
+    @Override
+    public List<KeyVersion> getKeyVersions(String name)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public Metadata getMetadata(String name) throws IOException {
+      return new Metadata(CIPHER, 128, "description", new Date(), 0);
+    }
+
+    @Override
+    public KeyVersion createKey(String name, byte[] material,
+        Options options) throws IOException {
+      this.material = material;
+      return null;
+    }
+
+    @Override
+    public void deleteKey(String name) throws IOException {
+
+    }
+
+    @Override
+    public KeyVersion rollNewVersion(String name, byte[] material)
+        throws IOException {
+      this.material = material;
+      return null;
+    }
+
+    @Override
+    public void flush() throws IOException {
+
+    }
+
+    @Override
+    protected byte[] generateKey(int size, String algorithm)
+        throws NoSuchAlgorithmException {
+      this.size = size;
+      this.algorithm = algorithm;
+      return super.generateKey(size, algorithm);
+    }
+  }
+
+  @Test
+  public void testMaterialGeneration() throws Exception {
+    MyKeyProvider kp = new MyKeyProvider();
+    KeyProvider.Options options = new KeyProvider.Options(new Configuration());
+    options.setCipher(CIPHER);
+    options.setBitLength(128);
+    kp.createKey("hello", options);
+    Assert.assertEquals(128, kp.size);
+    Assert.assertEquals(CIPHER, kp.algorithm);
+    Assert.assertNotNull(kp.material);
+
+    kp = new MyKeyProvider();
+    kp.rollNewVersion("hello");
+    Assert.assertEquals(128, kp.size);
+    Assert.assertEquals(CIPHER, kp.algorithm);
+    Assert.assertNotNull(kp.material);
+  }
 }

+ 94 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java

@@ -19,12 +19,21 @@ package org.apache.hadoop.crypto.key;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.URI;
 import java.util.List;
+import java.util.UUID;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -33,8 +42,14 @@ import static org.junit.Assert.assertTrue;
 
 public class TestKeyProviderFactory {
 
-  private static final File tmpDir =
-      new File(System.getProperty("test.build.data", "/tmp"), "key");
+  private static File tmpDir;
+
+  @Before
+  public void setup() {
+    tmpDir = new File(System.getProperty("test.build.data", "target"),
+        UUID.randomUUID().toString());
+    tmpDir.mkdirs();
+  }
 
   @Test
   public void testFactory() throws Exception {
@@ -193,10 +208,87 @@ public class TestKeyProviderFactory {
     Configuration conf = new Configuration();
     final String ourUrl =
         JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks";
+
     File file = new File(tmpDir, "test.jks");
     file.delete();
     conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
     checkSpecificProvider(conf, ourUrl);
+    Path path = KeyProvider.unnestUri(new URI(ourUrl));
+    FileSystem fs = path.getFileSystem(conf);
+    FileStatus s = fs.getFileStatus(path);
+    assertTrue(s.getPermission().toString().equals("rwx------"));
     assertTrue(file + " should exist", file.isFile());
+
+    // check permission retention after explicit change
+    fs.setPermission(path, new FsPermission("777"));
+    checkPermissionRetention(conf, ourUrl, path);
+  }
+
+  public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
+    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
+    // let's add a new key and flush and check that permissions are still set to 777
+    byte[] key = new byte[32];
+    for(int i =0; i < key.length; ++i) {
+      key[i] = (byte) i;
+    }
+    // create a new key
+    try {
+      provider.createKey("key5", key, KeyProvider.options(conf));
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+    provider.flush();
+    // get a new instance of the provider to ensure it was saved correctly
+    provider = KeyProviderFactory.getProviders(conf).get(0);
+    assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
+
+    FileSystem fs = path.getFileSystem(conf);
+    FileStatus s = fs.getFileStatus(path);
+    assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
   }
+
+  @Test
+  public void testJksProviderPasswordViaConfig() throws Exception {
+    Configuration conf = new Configuration();
+    final String ourUrl =
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks";
+    File file = new File(tmpDir, "test.jks");
+    file.delete();
+    try {
+      conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
+      conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,
+          "javakeystoreprovider.password");
+      KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
+      provider.createKey("key3", new byte[32], KeyProvider.options(conf));
+      provider.flush();
+    } catch (Exception ex) {
+      Assert.fail("could not create keystore with password file");
+    }
+    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
+    Assert.assertNotNull(provider.getCurrentKey("key3"));
+
+    try {
+      conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY, "bar");
+      KeyProviderFactory.getProviders(conf).get(0);
+      Assert.fail("using non existing password file, it should fail");
+    } catch (IOException ex) {
+      //NOP
+    }
+    try {
+      conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY, "core-site.xml");
+      KeyProviderFactory.getProviders(conf).get(0);
+      Assert.fail("using different password file, it should fail");
+    } catch (IOException ex) {
+      //NOP
+    }
+    try {
+      conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY);
+      KeyProviderFactory.getProviders(conf).get(0);
+      Assert.fail("No password file property, env not set, it should fail");
+    } catch (IOException ex) {
+      //NOP
+    }
+  }
+
 }

+ 33 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -22,23 +22,42 @@ import static org.junit.Assert.*;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.PrintStream;
+import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestKeyShell {
   private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
   private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
-  private static final File tmpDir =
-      new File(System.getProperty("test.build.data", "/tmp"), "key");
-  
+
+  private static File tmpDir;
+
+  private PrintStream initialStdOut;
+  private PrintStream initialStdErr;
+
   @Before
   public void setup() throws Exception {
+    outContent.reset();
+    errContent.reset();
+    tmpDir = new File(System.getProperty("test.build.data", "target"),
+        UUID.randomUUID().toString());
+    tmpDir.mkdirs();
+    initialStdOut = System.out;
+    initialStdErr = System.err;
     System.setOut(new PrintStream(outContent));
     System.setErr(new PrintStream(errContent));
   }
-  
+
+  @After
+  public void cleanUp() throws Exception {
+    System.setOut(initialStdOut);
+    System.setErr(initialStdErr);
+  }
+
   @Test
   public void testKeySuccessfulKeyLifecycle() throws Exception {
     outContent.reset();
@@ -53,12 +72,21 @@ public class TestKeyShell {
     		"created."));
 
     outContent.reset();
-    String[] args2 = {"list", "--provider", 
+    String[] args2 = {"list", "--provider",
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args2);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("key1"));
 
+    outContent.reset();
+    String[] args2a = {"list", "--metadata", "--provider",
+                      "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args2a);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1"));
+    assertTrue(outContent.toString().contains("description"));
+    assertTrue(outContent.toString().contains("created"));
+
     outContent.reset();
     String[] args3 = {"roll", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};

+ 4 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -90,6 +90,10 @@ public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
   public FSMainOperationsBaseTest() {
   }
   
+  public FSMainOperationsBaseTest(String testRootDir) {
+      super(testRootDir);
+  }
+  
   @Before
   public void setUp() throws Exception {
     fSys = createFileSystem();

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java

@@ -49,7 +49,7 @@ public final class FileContextTestHelper {
   /**
    * Create a context with the given test root
    */
-  private FileContextTestHelper(String testRootDir) {
+  public FileContextTestHelper(String testRootDir) {
     this.testRootDir = testRootDir;
   }
   

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -52,7 +52,7 @@ public class FileSystemTestHelper {
   /**
    * Create helper with the specified test root dir
    */
-  private FileSystemTestHelper(String testRootDir) {
+  public FileSystemTestHelper(String testRootDir) {
       this.testRootDir = testRootDir;
   }
 

+ 7 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java

@@ -25,7 +25,6 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.StringReader;
-import java.util.EnumSet;
 import java.util.Random;
 
 import org.apache.hadoop.test.GenericTestUtils;
@@ -48,16 +47,19 @@ public class TestDFVariations {
   }
 
   @Test(timeout=5000)
-  public void testMountAndFileSystem() throws Exception {
+  public void testMount() throws Exception {
     XXDF df = new XXDF();
     String expectedMount =
         Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar";
-    String expectedFileSystem =
-        Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/dev/sda3";
-
     assertEquals("Invalid mount point",
         expectedMount, df.getMount());
+  }
 
+  @Test(timeout=5000)
+  public void testFileSystem() throws Exception {
+    XXDF df = new XXDF();
+    String expectedFileSystem =
+        Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/dev/sda3";
     assertEquals("Invalid filesystem",
         expectedFileSystem, df.getFilesystem());
   }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java

@@ -589,7 +589,7 @@ public class TestFileUtil {
         // should never happen since that method never throws InterruptedException.      
         assertNull(ie);  
       }
-      assertFalse(notADirectory.canRead());
+      assertFalse(FileUtil.canRead(notADirectory));
       final long du3 = FileUtil.getDU(partitioned);
       assertEquals(expected, du3);
 
@@ -600,7 +600,7 @@ public class TestFileUtil {
         // should never happen since that method never throws InterruptedException.      
         assertNull(ie);  
       }
-      assertFalse(partitioned.canRead());
+      assertFalse(FileUtil.canRead(partitioned));
       final long du4 = FileUtil.getDU(partitioned);
       assertEquals(0, du4);
     } finally {

+ 103 - 71
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.io;
 
+import static org.mockito.Mockito.*;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -31,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionInputStream;
@@ -63,39 +66,44 @@ public class TestBloomMapFile extends TestCase {
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(TEST_DIR);
     conf.setInt("io.mapfile.bloom.size", 2048);
-    BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
-        qualifiedDirName.toString(), IntWritable.class, Text.class);
-    IntWritable key = new IntWritable();
-    Text value = new Text();
-    for (int i = 0; i < 2000; i += 2) {
-      key.set(i);
-      value.set("00" + i);
-      writer.append(key, value);
-    }
-    writer.close();
+    BloomMapFile.Writer writer = null;
+    BloomMapFile.Reader reader = null;
+    try {
+      writer = new BloomMapFile.Writer(conf, fs, qualifiedDirName.toString(),
+        IntWritable.class, Text.class);
+      IntWritable key = new IntWritable();
+      Text value = new Text();
+      for (int i = 0; i < 2000; i += 2) {
+        key.set(i);
+        value.set("00" + i);
+        writer.append(key, value);
+      }
+      writer.close();
 
-    BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
-        qualifiedDirName.toString(), conf);
-    // check false positives rate
-    int falsePos = 0;
-    int falseNeg = 0;
-    for (int i = 0; i < 2000; i++) {
-      key.set(i);
-      boolean exists = reader.probablyHasKey(key);
-      if (i % 2 == 0) {
-        if (!exists)
-          falseNeg++;
-      } else {
-        if (exists)
-          falsePos++;
+      reader = new BloomMapFile.Reader(fs, qualifiedDirName.toString(), conf);
+      // check false positives rate
+      int falsePos = 0;
+      int falseNeg = 0;
+      for (int i = 0; i < 2000; i++) {
+        key.set(i);
+        boolean exists = reader.probablyHasKey(key);
+        if (i % 2 == 0) {
+          if (!exists)
+            falseNeg++;
+        } else {
+          if (exists)
+            falsePos++;
+        }
       }
+      reader.close();
+      fs.delete(qualifiedDirName, true);
+      System.out.println("False negatives: " + falseNeg);
+      assertEquals(0, falseNeg);
+      System.out.println("False positives: " + falsePos);
+      assertTrue(falsePos < 2);
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
-    reader.close();
-    fs.delete(qualifiedDirName, true);
-    System.out.println("False negatives: " + falseNeg);
-    assertEquals(0, falseNeg);
-    System.out.println("False positives: " + falsePos);
-    assertTrue(falsePos < 2);
   }
 
   @SuppressWarnings("deprecation")
@@ -103,23 +111,28 @@ public class TestBloomMapFile extends TestCase {
       throws Exception {
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(TEST_DIR);
-    BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
-        qualifiedDirName.toString(), Text.class, NullWritable.class);
-    for (Text key : keys) {
-      writer.append(key, NullWritable.get());
-    }
-    writer.close();
+    BloomMapFile.Writer writer = null;
+    BloomMapFile.Reader reader = null;
+    try {
+      writer = new BloomMapFile.Writer(conf, fs, qualifiedDirName.toString(),
+        Text.class, NullWritable.class);
+      for (Text key : keys) {
+        writer.append(key, NullWritable.get());
+      }
+      writer.close();
 
-    // will check for membership in the opposite order of how keys were inserted
-    BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
-        qualifiedDirName.toString(), conf);
-    Collections.reverse(keys);
-    for (Text key : keys) {
-      assertTrue("False negative for existing key " + key,
+      // will check for membership in opposite order of how keys were inserted
+      reader = new BloomMapFile.Reader(fs, qualifiedDirName.toString(), conf);
+      Collections.reverse(keys);
+      for (Text key : keys) {
+        assertTrue("False negative for existing key " + key,
           reader.probablyHasKey(key));
+      }
+      reader.close();
+      fs.delete(qualifiedDirName, true);
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
-    reader.close();
-    fs.delete(qualifiedDirName, true);
   }
 
   public void testMembershipVaryingSizedKeysTest1() throws Exception {
@@ -140,15 +153,19 @@ public class TestBloomMapFile extends TestCase {
    * test {@code BloomMapFile.delete()} method
    */
   public void testDeleteFile() {
+    BloomMapFile.Writer writer = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
-      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+      writer = new BloomMapFile.Writer(conf, TEST_FILE,
           MapFile.Writer.keyClass(IntWritable.class),
           MapFile.Writer.valueClass(Text.class));
       assertNotNull("testDeleteFile error !!!", writer);
-      BloomMapFile.delete(fs, "." + TEST_FILE);
+      writer.close();
+      BloomMapFile.delete(fs, TEST_FILE.toString());
     } catch (Exception ex) {
       fail("unexpect ex in testDeleteFile !!!");
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
   
@@ -157,24 +174,26 @@ public class TestBloomMapFile extends TestCase {
    * IOException
    */
   public void testIOExceptionInWriterConstructor() {
-    Path dirNameSpy = org.mockito.Mockito.spy(TEST_FILE);
+    Path dirNameSpy = spy(TEST_FILE);
+    BloomMapFile.Reader reader = null;
+    BloomMapFile.Writer writer = null;
     try {
-      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+      writer = new BloomMapFile.Writer(conf, TEST_FILE,
           MapFile.Writer.keyClass(IntWritable.class),
           MapFile.Writer.valueClass(Text.class));
       writer.append(new IntWritable(1), new Text("123124142"));
       writer.close();
 
-      org.mockito.Mockito.when(dirNameSpy.getFileSystem(conf)).thenThrow(
-          new IOException());
-      BloomMapFile.Reader reader = new BloomMapFile.Reader(dirNameSpy, conf,
+      when(dirNameSpy.getFileSystem(conf)).thenThrow(new IOException());
+      reader = new BloomMapFile.Reader(dirNameSpy, conf,
           MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
 
       assertNull("testIOExceptionInWriterConstructor error !!!",
           reader.getBloomFilter());
-      reader.close();
     } catch (Exception ex) {
       fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
 
@@ -183,8 +202,10 @@ public class TestBloomMapFile extends TestCase {
    */
   public void testGetBloomMapFile() {
     int SIZE = 10;
+    BloomMapFile.Reader reader = null;
+    BloomMapFile.Writer writer = null;
     try {
-      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+      writer = new BloomMapFile.Writer(conf, TEST_FILE,
           MapFile.Writer.keyClass(IntWritable.class),
           MapFile.Writer.valueClass(Text.class));
 
@@ -193,7 +214,7 @@ public class TestBloomMapFile extends TestCase {
       }
       writer.close();
 
-      BloomMapFile.Reader reader = new BloomMapFile.Reader(TEST_FILE, conf,
+      reader = new BloomMapFile.Reader(TEST_FILE, conf,
           MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
 
       for (int i = 0; i < SIZE; i++) {
@@ -203,9 +224,10 @@ public class TestBloomMapFile extends TestCase {
             
       assertNull("testGetBloomMapFile error !!!",
           reader.get(new IntWritable(SIZE + 5), new Text()));
-      reader.close();
     } catch (Exception ex) {
       fail("unexpect ex in testGetBloomMapFile !!!");
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
 
@@ -214,36 +236,46 @@ public class TestBloomMapFile extends TestCase {
    */
   @SuppressWarnings("deprecation")
   public void testBloomMapFileConstructors() {
+    BloomMapFile.Writer writer = null;
     try {
       FileSystem ts = FileSystem.get(conf);
       String testFileName = TEST_FILE.toString();
-      BloomMapFile.Writer writer1 = new BloomMapFile.Writer(conf, ts,
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
           defaultCodec, defaultProgress);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer1);
-      BloomMapFile.Writer writer2 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
           defaultProgress);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer2);
-      BloomMapFile.Writer writer3 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.BLOCK);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer3);
-      BloomMapFile.Writer writer4 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
           defaultCodec, defaultProgress);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer4);
-      BloomMapFile.Writer writer5 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
           defaultProgress);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer5);
-      BloomMapFile.Writer writer6 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, IntWritable.class, Text.class, CompressionType.RECORD);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer6);
-      BloomMapFile.Writer writer7 = new BloomMapFile.Writer(conf, ts,
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
+      writer = new BloomMapFile.Writer(conf, ts,
           testFileName, WritableComparator.get(Text.class), Text.class);
-      assertNotNull("testBloomMapFileConstructors error !!!", writer7);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer);
+      writer.close();
     } catch (Exception ex) {
       fail("testBloomMapFileConstructors error !!!");
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -272,13 +304,13 @@ public class TestBloomMapFile extends TestCase {
     @Override
     public CompressionOutputStream createOutputStream(OutputStream out,
         Compressor compressor) throws IOException {
-      return null;
+      return mock(CompressionOutputStream.class);
     }
 
     @Override
     public CompressionOutputStream createOutputStream(OutputStream out)
         throws IOException {
-      return null;
+      return mock(CompressionOutputStream.class);
     }
 
     @Override

+ 184 - 147
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionInputStream;
@@ -68,13 +69,13 @@ public class TestMapFile {
     @Override
     public CompressionOutputStream createOutputStream(OutputStream out)
         throws IOException {
-      return null;
+      return mock(CompressionOutputStream.class);
     }
 
     @Override
     public CompressionOutputStream createOutputStream(OutputStream out,
         Compressor compressor) throws IOException {
-      return null;
+      return mock(CompressionOutputStream.class);
     }
 
     @Override
@@ -138,46 +139,52 @@ public class TestMapFile {
   @Test
   public void testGetClosestOnCurrentApi() throws Exception {
     final String TEST_PREFIX = "testGetClosestOnCurrentApi.mapfile";
-    MapFile.Writer writer = createWriter(TEST_PREFIX, Text.class, Text.class);
-    int FIRST_KEY = 1;
-    // Test keys: 11,21,31,...,91
-    for (int i = FIRST_KEY; i < 100; i += 10) {      
-      Text t = new Text(Integer.toString(i));
-      writer.append(t, t);
-    }
-    writer.close();
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
+    try {
+      writer = createWriter(TEST_PREFIX, Text.class, Text.class);
+      int FIRST_KEY = 1;
+      // Test keys: 11,21,31,...,91
+      for (int i = FIRST_KEY; i < 100; i += 10) {      
+        Text t = new Text(Integer.toString(i));
+        writer.append(t, t);
+      }
+      writer.close();
 
-    MapFile.Reader reader = createReader(TEST_PREFIX, Text.class);
-    Text key = new Text("55");
-    Text value = new Text();
-
-    // Test get closest with step forward
-    Text closest = (Text) reader.getClosest(key, value);
-    assertEquals(new Text("61"), closest);
-
-    // Test get closest with step back
-    closest = (Text) reader.getClosest(key, value, true);
-    assertEquals(new Text("51"), closest);
-
-    // Test get closest when we pass explicit key
-    final Text explicitKey = new Text("21");
-    closest = (Text) reader.getClosest(explicitKey, value);
-    assertEquals(new Text("21"), explicitKey);
-
-    // Test what happens at boundaries. Assert if searching a key that is
-    // less than first key in the mapfile, that the first key is returned.
-    key = new Text("00");
-    closest = (Text) reader.getClosest(key, value);
-    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
-
-    // Assert that null is returned if key is > last entry in mapfile.
-    key = new Text("92");
-    closest = (Text) reader.getClosest(key, value);
-    assertNull("Not null key in testGetClosestWithNewCode", closest);
-
-    // If we were looking for the key before, we should get the last key
-    closest = (Text) reader.getClosest(key, value, true);
-    assertEquals(new Text("91"), closest);
+      reader = createReader(TEST_PREFIX, Text.class);
+      Text key = new Text("55");
+      Text value = new Text();
+
+      // Test get closest with step forward
+      Text closest = (Text) reader.getClosest(key, value);
+      assertEquals(new Text("61"), closest);
+
+      // Test get closest with step back
+      closest = (Text) reader.getClosest(key, value, true);
+      assertEquals(new Text("51"), closest);
+
+      // Test get closest when we pass explicit key
+      final Text explicitKey = new Text("21");
+      closest = (Text) reader.getClosest(explicitKey, value);
+      assertEquals(new Text("21"), explicitKey);
+
+      // Test what happens at boundaries. Assert if searching a key that is
+      // less than first key in the mapfile, that the first key is returned.
+      key = new Text("00");
+      closest = (Text) reader.getClosest(key, value);
+      assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
+
+      // Assert that null is returned if key is > last entry in mapfile.
+      key = new Text("92");
+      closest = (Text) reader.getClosest(key, value);
+      assertNull("Not null key in testGetClosestWithNewCode", closest);
+
+      // If we were looking for the key before, we should get the last key
+      closest = (Text) reader.getClosest(key, value, true);
+      assertEquals(new Text("91"), closest);
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
+    }
   }
   
   /**
@@ -187,16 +194,21 @@ public class TestMapFile {
   public void testMidKeyOnCurrentApi() throws Exception {
     // Write a mapfile of simple data: keys are
     final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
-    MapFile.Writer writer = createWriter(TEST_PREFIX, IntWritable.class,
-        IntWritable.class);
-    // 0,1,....9
-    int SIZE = 10;
-    for (int i = 0; i < SIZE; i++)
-      writer.append(new IntWritable(i), new IntWritable(i));
-    writer.close();
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
+    try {
+      writer = createWriter(TEST_PREFIX, IntWritable.class, IntWritable.class);
+      // 0,1,....9
+      int SIZE = 10;
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new IntWritable(i), new IntWritable(i));
+      writer.close();
 
-    MapFile.Reader reader = createReader(TEST_PREFIX, IntWritable.class);
-    assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
+      reader = createReader(TEST_PREFIX, IntWritable.class);
+      assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
+    }
   }
   
   /**
@@ -206,16 +218,18 @@ public class TestMapFile {
   public void testRename() {
     final String NEW_FILE_NAME = "test-new.mapfile";
     final String OLD_FILE_NAME = "test-old.mapfile";
+    MapFile.Writer writer = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
-      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
-          IntWritable.class);
+      writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
       writer.close();
       MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(), 
           new Path(TEST_DIR, NEW_FILE_NAME).toString());
       MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
     } catch (IOException ex) {
       fail("testRename error " + ex);
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
   
@@ -228,12 +242,12 @@ public class TestMapFile {
     final String ERROR_MESSAGE = "Can't rename file";
     final String NEW_FILE_NAME = "test-new.mapfile";
     final String OLD_FILE_NAME = "test-old.mapfile";
+    MapFile.Writer writer = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
       FileSystem spyFs = spy(fs);
 
-      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
-          IntWritable.class);
+      writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
       writer.close();
 
       Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
@@ -246,6 +260,8 @@ public class TestMapFile {
     } catch (IOException ex) {
       assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
           ex.getMessage(), ERROR_MESSAGE);
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -254,12 +270,12 @@ public class TestMapFile {
     final String ERROR_MESSAGE = "Could not rename";
     final String NEW_FILE_NAME = "test-new.mapfile";
     final String OLD_FILE_NAME = "test-old.mapfile";
+    MapFile.Writer writer = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
       FileSystem spyFs = spy(fs);
 
-      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
-          IntWritable.class);
+      writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
       writer.close();
 
       Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
@@ -271,6 +287,8 @@ public class TestMapFile {
     } catch (IOException ex) {
       assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
           .getMessage().startsWith(ERROR_MESSAGE));
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
   
@@ -297,11 +315,7 @@ public class TestMapFile {
       assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
           .startsWith(ERROR_MESSAGE));
     } finally {
-      if (writer != null)
-        try {
-          writer.close();
-        } catch (IOException e) {
-        }
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -312,20 +326,24 @@ public class TestMapFile {
   public void testOnFinalKey() {
     final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
     int SIZE = 10;
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
-      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
-          IntWritable.class);
+      writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+        IntWritable.class);
       for (int i = 0; i < SIZE; i++)
         writer.append(new IntWritable(i), new IntWritable(i));
       writer.close();
 
-      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      reader = createReader(TEST_METHOD_KEY, IntWritable.class);
       IntWritable expectedKey = new IntWritable(0);
       reader.finalKey(expectedKey);
       assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
           9));
     } catch (IOException ex) {
       fail("testOnFinalKey error !!!");
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
   
@@ -338,7 +356,8 @@ public class TestMapFile {
     Class<? extends WritableComparable<?>> keyClass = IntWritable.class;
     Class<?> valueClass = Text.class;
     try {
-      createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class);
+      createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class)
+        .close();
       assertNotNull("writer key class null error !!!",
           MapFile.Writer.keyClass(keyClass));
       assertNotNull("writer value class null error !!!",
@@ -354,19 +373,22 @@ public class TestMapFile {
   @Test
   public void testReaderGetClosest() throws Exception {
     final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
-      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
-          Text.class);
+      writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
 
       for (int i = 0; i < 10; i++)
         writer.append(new IntWritable(i), new Text("value" + i));
       writer.close();
 
-      MapFile.Reader reader = createReader(TEST_METHOD_KEY, Text.class);
+      reader = createReader(TEST_METHOD_KEY, Text.class);
       reader.getClosest(new Text("2"), new Text(""));
       fail("no excepted exception in testReaderWithWrongKeyClass !!!");
     } catch (IOException ex) {
       /* Should be thrown to pass the test */
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
   
@@ -376,13 +398,15 @@ public class TestMapFile {
   @Test
   public void testReaderWithWrongValueClass() {
     final String TEST_METHOD_KEY = "testReaderWithWrongValueClass.mapfile";
+    MapFile.Writer writer = null;
     try {
-      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
-          Text.class);
+      writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
       writer.append(new IntWritable(0), new IntWritable(0));
       fail("no excepted exception in testReaderWithWrongKeyClass !!!");
     } catch (IOException ex) {
       /* Should be thrown to pass the test */
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
   
@@ -394,15 +418,16 @@ public class TestMapFile {
     final String TEST_METHOD_KEY = "testReaderKeyIteration.mapfile";
     int SIZE = 10;
     int ITERATIONS = 5;
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
-      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
-          Text.class);
+      writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
       int start = 0;
       for (int i = 0; i < SIZE; i++)
         writer.append(new IntWritable(i), new Text("Value:" + i));
       writer.close();
 
-      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      reader = createReader(TEST_METHOD_KEY, IntWritable.class);
       // test iteration
       Writable startValue = new Text("Value:" + start);
       int i = 0;
@@ -421,6 +446,8 @@ public class TestMapFile {
           reader.seek(new IntWritable(SIZE * 2)));
     } catch (IOException ex) {
       fail("reader seek error !!!");
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
 
@@ -431,11 +458,11 @@ public class TestMapFile {
   public void testFix() {
     final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
     int PAIR_SIZE = 20;
+    MapFile.Writer writer = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
       Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
-      MapFile.Writer writer = createWriter(INDEX_LESS_MAP_FILE,
-          IntWritable.class, Text.class);
+      writer = createWriter(INDEX_LESS_MAP_FILE, IntWritable.class, Text.class);
       for (int i = 0; i < PAIR_SIZE; i++)
         writer.append(new IntWritable(0), new Text("value"));
       writer.close();
@@ -450,6 +477,8 @@ public class TestMapFile {
             MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
     } catch (Exception ex) {
       fail("testFix error !!!");
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
   /**
@@ -459,38 +488,46 @@ public class TestMapFile {
   @SuppressWarnings("deprecation")
   public void testDeprecatedConstructors() {
     String path = new Path(TEST_DIR, "writes.mapfile").toString();
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
       FileSystem fs = FileSystem.getLocal(conf);
-      MapFile.Writer writer = new MapFile.Writer(conf, fs, path,
+      writer = new MapFile.Writer(conf, fs, path,
           IntWritable.class, Text.class, CompressionType.RECORD);
       assertNotNull(writer);
+      writer.close();
       writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
           Text.class, CompressionType.RECORD, defaultProgressable);
       assertNotNull(writer);
+      writer.close();
       writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
           Text.class, CompressionType.RECORD, defaultCodec, defaultProgressable);
       assertNotNull(writer);
+      writer.close();
       writer = new MapFile.Writer(conf, fs, path,
           WritableComparator.get(Text.class), Text.class);
       assertNotNull(writer);
+      writer.close();
       writer = new MapFile.Writer(conf, fs, path,
           WritableComparator.get(Text.class), Text.class,
           SequenceFile.CompressionType.RECORD);
       assertNotNull(writer);
+      writer.close();
       writer = new MapFile.Writer(conf, fs, path,
           WritableComparator.get(Text.class), Text.class,
           CompressionType.RECORD, defaultProgressable);
       assertNotNull(writer);
       writer.close();
 
-      MapFile.Reader reader = new MapFile.Reader(fs, path,
+      reader = new MapFile.Reader(fs, path,
           WritableComparator.get(IntWritable.class), conf);
       assertNotNull(reader);
       assertNotNull("reader key is null !!!", reader.getKeyClass());
       assertNotNull("reader value in null", reader.getValueClass());
-
     } catch (IOException e) {
       fail(e.getMessage());
+    } finally {
+      IOUtils.cleanup(null, writer, reader);
     }
   }
   
@@ -509,11 +546,7 @@ public class TestMapFile {
     } catch (Exception e) {
       fail("fail in testKeyLessWriterCreation. Other ex !!!");
     } finally {
-      if (writer != null)
-        try {
-          writer.close();
-        } catch (IOException e) {
-        }
+      IOUtils.cleanup(null, writer);
     }
   }
   /**
@@ -542,11 +575,7 @@ public class TestMapFile {
     } catch (Exception e) {
       fail("fail in testPathExplosionWriterCreation. Other ex !!!");
     } finally {
-      if (writer != null)
-        try {
-          writer.close();
-        } catch (IOException e) {
-        }
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -555,9 +584,9 @@ public class TestMapFile {
    */
   @Test
   public void testDescOrderWithThrowExceptionWriterAppend() {
+    MapFile.Writer writer = null;
     try {
-      MapFile.Writer writer = createWriter(".mapfile", IntWritable.class,
-          Text.class);
+      writer = createWriter(".mapfile", IntWritable.class, Text.class);
       writer.append(new IntWritable(2), new Text("value: " + 1));
       writer.append(new IntWritable(2), new Text("value: " + 2));
       writer.append(new IntWritable(2), new Text("value: " + 4));
@@ -566,6 +595,8 @@ public class TestMapFile {
     } catch (IOException ex) {
     } catch (Exception e) {
       fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -575,15 +606,17 @@ public class TestMapFile {
     String inFile = "mainMethodMapFile.mapfile";
     String outFile = "mainMethodMapFile.mapfile";
     String[] args = { path, outFile };
+    MapFile.Writer writer = null;
     try {
-      MapFile.Writer writer = createWriter(inFile, IntWritable.class,
-          Text.class);
+      writer = createWriter(inFile, IntWritable.class, Text.class);
       writer.append(new IntWritable(1), new Text("test_text1"));
       writer.append(new IntWritable(2), new Text("test_text2"));
       writer.close();
       MapFile.main(args);
     } catch (Exception ex) {
       fail("testMainMethodMapFile error !!!");
+    } finally {
+      IOUtils.cleanup(null, writer);
     }
   }
 
@@ -601,56 +634,58 @@ public class TestMapFile {
     Path qualifiedDirName = fs.makeQualified(dirName);
     // Make an index entry for every third insertion.
     MapFile.Writer.setIndexInterval(conf, 3);
-    MapFile.Writer writer = new MapFile.Writer(conf, fs,
-        qualifiedDirName.toString(), Text.class, Text.class);
-    // Assert that the index interval is 1
-    assertEquals(3, writer.getIndexInterval());
-    // Add entries up to 100 in intervals of ten.
-    final int FIRST_KEY = 10;
-    for (int i = FIRST_KEY; i < 100; i += 10) {
-      String iStr = Integer.toString(i);
-      Text t = new Text("00".substring(iStr.length()) + iStr);
-      writer.append(t, t);
-    }
-    writer.close();
-    // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
-    Text key = new Text("55");
-    Text value = new Text();
-    Text closest = (Text) reader.getClosest(key, value);
-    // Assert that closest after 55 is 60
-    assertEquals(new Text("60"), closest);
-    // Get closest that falls before the passed key: 50
-    closest = (Text) reader.getClosest(key, value, true);
-    assertEquals(new Text("50"), closest);
-    // Test get closest when we pass explicit key
-    final Text TWENTY = new Text("20");
-    closest = (Text) reader.getClosest(TWENTY, value);
-    assertEquals(TWENTY, closest);
-    closest = (Text) reader.getClosest(TWENTY, value, true);
-    assertEquals(TWENTY, closest);
-    // Test what happens at boundaries. Assert if searching a key that is
-    // less than first key in the mapfile, that the first key is returned.
-    key = new Text("00");
-    closest = (Text) reader.getClosest(key, value);
-    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
-
-    // If we're looking for the first key before, and we pass in a key before
-    // the first key in the file, we should get null
-    closest = (Text) reader.getClosest(key, value, true);
-    assertNull(closest);
-
-    // Assert that null is returned if key is > last entry in mapfile.
-    key = new Text("99");
-    closest = (Text) reader.getClosest(key, value);
-    assertNull(closest);
-
-    // If we were looking for the key before, we should get the last key
-    closest = (Text) reader.getClosest(key, value, true);
-    assertEquals(new Text("90"), closest);
+      writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
+        Text.class, Text.class);
+      // Assert that the index interval is 1
+      assertEquals(3, writer.getIndexInterval());
+      // Add entries up to 100 in intervals of ten.
+      final int FIRST_KEY = 10;
+      for (int i = FIRST_KEY; i < 100; i += 10) {
+        String iStr = Integer.toString(i);
+        Text t = new Text("00".substring(iStr.length()) + iStr);
+        writer.append(t, t);
+      }
+      writer.close();
+      // Now do getClosest on created mapfile.
+      reader = new MapFile.Reader(qualifiedDirName, conf);
+      Text key = new Text("55");
+      Text value = new Text();
+      Text closest = (Text) reader.getClosest(key, value);
+      // Assert that closest after 55 is 60
+      assertEquals(new Text("60"), closest);
+      // Get closest that falls before the passed key: 50
+      closest = (Text) reader.getClosest(key, value, true);
+      assertEquals(new Text("50"), closest);
+      // Test get closest when we pass explicit key
+      final Text TWENTY = new Text("20");
+      closest = (Text) reader.getClosest(TWENTY, value);
+      assertEquals(TWENTY, closest);
+      closest = (Text) reader.getClosest(TWENTY, value, true);
+      assertEquals(TWENTY, closest);
+      // Test what happens at boundaries. Assert if searching a key that is
+      // less than first key in the mapfile, that the first key is returned.
+      key = new Text("00");
+      closest = (Text) reader.getClosest(key, value);
+      assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
+
+      // If we're looking for the first key before, and we pass in a key before
+      // the first key in the file, we should get null
+      closest = (Text) reader.getClosest(key, value, true);
+      assertNull(closest);
+
+      // Assert that null is returned if key is > last entry in mapfile.
+      key = new Text("99");
+      closest = (Text) reader.getClosest(key, value);
+      assertNull(closest);
+
+      // If we were looking for the key before, we should get the last key
+      closest = (Text) reader.getClosest(key, value, true);
+      assertEquals(new Text("90"), closest);
     } finally {
-      reader.close();
+      IOUtils.cleanup(null, writer, reader);
     }
   }
 
@@ -662,16 +697,18 @@ public class TestMapFile {
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
 
-    MapFile.Writer writer = new MapFile.Writer(conf, fs,
-        qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
-    writer.append(new IntWritable(1), new IntWritable(1));
-    writer.close();
-    // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    MapFile.Writer writer = null;
+    MapFile.Reader reader = null;
     try {
+      writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
+        IntWritable.class, IntWritable.class);
+      writer.append(new IntWritable(1), new IntWritable(1));
+      writer.close();
+      // Now do getClosest on created mapfile.
+      reader = new MapFile.Reader(qualifiedDirName, conf);
       assertEquals(new IntWritable(1), reader.midKey());
     } finally {
-      reader.close();
+      IOUtils.cleanup(null, writer, reader);
     }
   }
 

+ 11 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.ipc;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -49,7 +48,7 @@ public class TestCallQueueManager {
     public volatile int callsAdded = 0; // How many calls we added, accurate unless interrupted
     private final int maxCalls;
 
-    private boolean isRunning = true;
+    private volatile boolean isRunning = true;
 
     public Putter(CallQueueManager<FakeCall> aCq, int maxCalls, int tag) {
       this.maxCalls = maxCalls;
@@ -201,16 +200,22 @@ public class TestCallQueueManager {
 
     // Ensure no calls were dropped
     long totalCallsCreated = 0;
-    long totalCallsConsumed = 0;
-
     for (Putter p : producers) {
-      totalCallsCreated += p.callsAdded;
       threads.get(p).interrupt();
     }
+    for (Putter p : producers) {
+      threads.get(p).join();
+      totalCallsCreated += p.callsAdded;
+    }
+    
+    long totalCallsConsumed = 0;
     for (Taker t : consumers) {
-      totalCallsConsumed += t.callsTaken;
       threads.get(t).interrupt();
     }
+    for (Taker t : consumers) {
+      threads.get(t).join();
+      totalCallsConsumed += t.callsTaken;
+    }
 
     assertEquals(totalCallsConsumed, totalCallsCreated);
   }

+ 7 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java

@@ -60,11 +60,12 @@ public class TestGangliaMetrics {
   @Test
   public void testTagsForPrefix() throws Exception {
     ConfigBuilder cb = new ConfigBuilder()
-      .add("test.sink.ganglia.tagsForPrefix.all", "*")
-      .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, NumActiveSources")
-      .add("test.sink.ganglia.tagsForPrefix.none", "");
+      .add("Test.sink.ganglia.tagsForPrefix.all", "*")
+      .add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
+              "NumActiveSources")
+      .add("Test.sink.ganglia.tagsForPrefix.none", "");
     GangliaSink30 sink = new GangliaSink30();
-    sink.init(cb.subset("test.sink.ganglia"));
+    sink.init(cb.subset("Test.sink.ganglia"));
 
     List<MetricsTag> tags = new ArrayList<MetricsTag>();
     tags.add(new MetricsTag(MsInfo.Context, "all"));
@@ -97,8 +98,8 @@ public class TestGangliaMetrics {
   
   @Test public void testGangliaMetrics2() throws Exception {
     ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
-        .add("test.sink.gsink30.context", "test") // filter out only "test"
-        .add("test.sink.gsink31.context", "test") // filter out only "test"
+        .add("Test.sink.gsink30.context", "test") // filter out only "test"
+        .add("Test.sink.gsink31.context", "test") // filter out only "test"
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
 
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");

+ 17 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java

@@ -88,11 +88,11 @@ public class TestMetricsSystemImpl {
     DefaultMetricsSystem.shutdown();
     new ConfigBuilder().add("*.period", 8)
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
-        .add("test.sink.test.class", TestSink.class.getName())
-        .add("test.*.source.filter.exclude", "s0")
-        .add("test.source.s1.metric.filter.exclude", "X*")
-        .add("test.sink.sink1.metric.filter.exclude", "Y*")
-        .add("test.sink.sink2.metric.filter.exclude", "Y*")
+        .add("Test.sink.test.class", TestSink.class.getName())
+        .add("Test.*.source.filter.exclude", "s0")
+        .add("Test.source.s1.metric.filter.exclude", "X*")
+        .add("Test.sink.sink1.metric.filter.exclude", "Y*")
+        .add("Test.sink.sink2.metric.filter.exclude", "Y*")
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
@@ -130,11 +130,11 @@ public class TestMetricsSystemImpl {
     DefaultMetricsSystem.shutdown(); 
     new ConfigBuilder().add("*.period", 8)
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
-        .add("test.sink.test.class", TestSink.class.getName())
-        .add("test.*.source.filter.exclude", "s0")
-        .add("test.source.s1.metric.filter.exclude", "X*")
-        .add("test.sink.sink1.metric.filter.exclude", "Y*")
-        .add("test.sink.sink2.metric.filter.exclude", "Y*")
+        .add("Test.sink.test.class", TestSink.class.getName())
+        .add("Test.*.source.filter.exclude", "s0")
+        .add("Test.source.s1.metric.filter.exclude", "X*")
+        .add("Test.sink.sink1.metric.filter.exclude", "Y*")
+        .add("Test.sink.sink2.metric.filter.exclude", "Y*")
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
@@ -167,12 +167,13 @@ public class TestMetricsSystemImpl {
   }
   
   @Test public void testMultiThreadedPublish() throws Exception {
+    final int numThreads = 10;
     new ConfigBuilder().add("*.period", 80)
-      .add("test.sink.Collector.queue.capacity", "20")
+      .add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
+              numThreads)
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
-    final int numThreads = 10;
     final CollectingSink sink = new CollectingSink(numThreads);
     ms.registerSink("Collector",
         "Collector of values from all threads.", sink);
@@ -279,10 +280,10 @@ public class TestMetricsSystemImpl {
 
   @Test public void testHangingSink() {
     new ConfigBuilder().add("*.period", 8)
-      .add("test.sink.test.class", TestSink.class.getName())
-      .add("test.sink.hanging.retry.delay", "1")
-      .add("test.sink.hanging.retry.backoff", "1.01")
-      .add("test.sink.hanging.retry.count", "0")
+      .add("Test.sink.test.class", TestSink.class.getName())
+      .add("Test.sink.hanging.retry.delay", "1")
+      .add("Test.sink.hanging.retry.backoff", "1.01")
+      .add("Test.sink.hanging.retry.count", "0")
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();

+ 12 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java

@@ -105,11 +105,18 @@ public class TestFileSink {
     ms.publishMetricsNow(); // publish the metrics
     ms.stop();
     ms.shutdown();
-    
-    InputStream is = new FileInputStream(outFile);
-    ByteArrayOutputStream baos = new ByteArrayOutputStream((int)outFile.length());
-    IOUtils.copyBytes(is, baos, 1024, true);
-    String outFileContent = new String(baos.toByteArray(), "UTF-8");
+
+    InputStream is = null;
+    ByteArrayOutputStream baos = null;
+    String outFileContent = null;
+    try {
+      is = new FileInputStream(outFile);
+      baos = new ByteArrayOutputStream((int)outFile.length());
+      IOUtils.copyBytes(is, baos, 1024, true);
+      outFileContent = new String(baos.toByteArray(), "UTF-8");
+    } finally {
+      IOUtils.cleanup(null, baos, is);
+    }
 
     // Check the out file content. Should be something like the following:
     //1360244820087 test1.testRecord1: Context=test1, testTag1=testTagValue1, testTag2=testTagValue2, Hostname=myhost, testMetric1=1, testMetric2=2

+ 6 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java

@@ -74,8 +74,9 @@ public class TestDoAsEffectiveUser {
   }
 
   @Before
-  public void setMasterConf() {
+  public void setMasterConf() throws IOException {
     UserGroupInformation.setConfiguration(masterConf);
+    refreshConf(masterConf);
   }
 
   private void configureSuperUserIPAddresses(Configuration conf,
@@ -297,6 +298,8 @@ public class TestDoAsEffectiveUser {
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(2).setVerbose(false).build();
 
+    refreshConf(conf);
+
     try {
       server.start();
 
@@ -379,6 +382,8 @@ public class TestDoAsEffectiveUser {
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(2).setVerbose(false).build();
     
+    refreshConf(conf);
+
     try {
       server.start();
 

+ 48 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.security.authorize;
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -223,9 +224,54 @@ public class TestProxyUsers {
     assertNotAuthorized(proxyUserUgi, "1.2.3.5");
   }
 
+  @Test
+  public void testWithDuplicateProxyGroups() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(
+      ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      StringUtils.join(",", Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
+    conf.set(
+      ProxyUsers.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      PROXY_IP);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+    
+    Collection<String> groupsToBeProxied = ProxyUsers.getProxyGroups().get(
+        ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_NAME));
+    
+    assertEquals (1,groupsToBeProxied.size());
+  }
+  
+  @Test
+  public void testWithDuplicateProxyHosts() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(
+      ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
+    conf.set(
+      ProxyUsers.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      StringUtils.join(",", Arrays.asList(PROXY_IP,PROXY_IP)));
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+    
+    Collection<String> hosts = ProxyUsers.getProxyHosts().get(
+        ProxyUsers.getProxySuperuserIpConfKey(REAL_USER_NAME));
+    
+    assertEquals (1,hosts.size());
+  }
+
+  @Test
+  public void testProxyServer() {
+    Configuration conf = new Configuration();
+    assertFalse(ProxyUsers.isProxyServer("1.1.1.1"));
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYSERVERS, "2.2.2.2, 3.3.3.3");
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+    assertFalse(ProxyUsers.isProxyServer("1.1.1.1"));
+    assertTrue(ProxyUsers.isProxyServer("2.2.2.2"));
+    assertTrue(ProxyUsers.isProxyServer("3.3.3.3"));
+  }
+
   private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
     try {
-      ProxyUsers.authorize(proxyUgi, host, null);
+      ProxyUsers.authorize(proxyUgi, host);
       fail("Allowed authorization of " + proxyUgi + " from " + host);
     } catch (AuthorizationException e) {
       // Expected
@@ -234,7 +280,7 @@ public class TestProxyUsers {
   
   private void assertAuthorized(UserGroupInformation proxyUgi, String host) {
     try {
-      ProxyUsers.authorize(proxyUgi, host, null);
+      ProxyUsers.authorize(proxyUgi, host);
     } catch (AuthorizationException e) {
       fail("Did not allowed authorization of " + proxyUgi + " from " + host);
     }

+ 11 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java

@@ -22,9 +22,12 @@ import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2St
 import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -401,6 +404,14 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
       "begin %foo%_%bar%_%baz% end", pattern, replacements));
   }
 
+  @Test 
+  public void testGetUniqueNonEmptyTrimmedStrings (){
+    final String TO_SPLIT = ",foo, bar,baz,,blah,blah,bar,";
+    Collection<String> col = StringUtils.getTrimmedStringCollection(TO_SPLIT);
+    assertEquals(4, col.size());
+    assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"})));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
     final String TO_SPLIT = "foo,bar,baz,blah,blah";

+ 79 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java

@@ -273,6 +273,85 @@ public class TestWinUtils {
     assertTrue(aExe.delete());
   }
 
+  /** Validate behavior of chmod commands on directories on Windows. */
+  @Test (timeout = 30000)
+  public void testBasicChmodOnDir() throws IOException {
+    // Validate that listing a directory with no read permission fails
+    File a = new File(TEST_DIR, "a");
+    File b = new File(a, "b");
+    a.mkdirs();
+    assertTrue(b.createNewFile());
+
+    // Remove read permissions on directory a
+    chmod("300", a);
+    String[] files = a.list();
+    assertTrue("Listing a directory without read permission should fail",
+        null == files);
+
+    // restore permissions
+    chmod("700", a);
+    // validate that the directory can be listed now
+    files = a.list();
+    assertEquals("b", files[0]);
+
+    // Remove write permissions on the directory and validate the
+    // behavior for adding, deleting and renaming files
+    chmod("500", a);
+    File c = new File(a, "c");
+ 
+    try {
+      // Adding a new file will fail as expected because the
+      // FILE_WRITE_DATA/FILE_ADD_FILE privilege is denied on
+      // the dir.
+      c.createNewFile();
+      assertFalse("writeFile should have failed!", true);
+    } catch (IOException ex) {
+      LOG.info("Expected: Failed to create a file when directory "
+          + "permissions are 577");
+    }
+
+    // Deleting a file will succeed even if write permissions are not present
+    // on the parent dir. Check the following link for additional details:
+    // http://support.microsoft.com/kb/238018
+    assertTrue("Special behavior: deleting a file will succeed on Windows "
+        + "even if a user does not have write permissions on the parent dir",
+        b.delete());
+
+    assertFalse("Renaming a file should fail on the dir where a user does "
+        + "not have write permissions", b.renameTo(new File(a, "d")));
+
+    // restore permissions
+    chmod("700", a);
+
+    // Make sure adding new files and rename succeeds now
+    assertTrue(c.createNewFile());
+    File d = new File(a, "d");
+    assertTrue(c.renameTo(d));
+    // at this point in the test, d is the only remaining file in directory a
+
+    // Removing execute permissions does not have the same behavior on
+    // Windows as on Linux. Adding, renaming, deleting and listing files
+    // will still succeed. Windows default behavior is to bypass directory
+    // traverse checking (BYPASS_TRAVERSE_CHECKING privilege) for all users.
+    // See the following link for additional details:
+    // http://msdn.microsoft.com/en-us/library/windows/desktop/aa364399(v=vs.85).aspx
+    chmod("600", a);
+
+    // validate directory listing
+    files = a.list();
+    assertEquals("d", files[0]);
+    // validate delete
+    assertTrue(d.delete());
+    // validate add
+    File e = new File(a, "e");
+    assertTrue(e.createNewFile());
+    // validate rename
+    assertTrue(e.renameTo(new File(a, "f")));
+
+    // restore permissions
+    chmod("700", a);
+  }
+
   @Test (timeout = 30000)
   public void testChmod() throws IOException {
     testChmodInternal("7", "-------rwx");

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/resources/javakeystoreprovider.password

@@ -0,0 +1 @@
+foo

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -349,7 +349,7 @@ public class TestHttpFSServer extends HFSTestCase {
     url = new URL(TestJettyHelper.getJettyURL(),
                   "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
     conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+    Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
                         conn.getResponseCode());
   }
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.nfs.mount;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_USER_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY;
 
 import java.io.IOException;
 import java.net.InetAddress;
@@ -89,7 +89,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
     UserGroupInformation.setConfiguration(config);
     SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY,
-            DFS_NFS_USER_NAME_KEY);
+            DFS_NFS_KERBEROS_PRINCIPAL_KEY);
     this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
   }
   

Some files were not shown because too many files changed in this diff