Przeglądaj źródła

svn merge -c 1461040 from trunk for HDFS-4598. Fix the default value of ConcatSourcesParam and the WebHDFS doc.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1461042 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 lat temu
rodzic
commit
17a304973c

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -106,6 +106,9 @@ Release 2.0.5-beta - UNRELEASED
     HDFS-4609. TestAuditLogs should release log handles between tests. 
     (Ivan Mitic via szetszwo)
 
+    HDFS-4598. Fix the default value of ConcatSourcesParam and the WebHDFS doc.
+    (szetszwo)
+
 Release 2.0.4-alpha - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -312,13 +312,14 @@ public class DistributedFileSystem extends FileSystem {
   }
   
   /**
-   * Move blocks from srcs to trg
-   * and delete srcs afterwards
-   * RESTRICTION: all blocks should be the same size
+   * Move blocks from srcs to trg and delete srcs afterwards.
+   * The file block sizes must be the same.
+   * 
    * @param trg existing file to append to
    * @param psrcs list of files (same block size, same replication)
    * @throws IOException
    */
+  @Override
   public void concat(Path trg, Path [] psrcs) throws IOException {
     String [] srcs = new String [psrcs.length];
     for(int i=0; i<psrcs.length; i++) {

+ 28 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -30,7 +30,6 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -64,7 +63,33 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
-import org.apache.hadoop.hdfs.web.resources.*;
+import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
+import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
+import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
+import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
+import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
+import org.apache.hadoop.hdfs.web.resources.DestinationParam;
+import org.apache.hadoop.hdfs.web.resources.DoAsParam;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.GroupParam;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
+import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
+import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
+import org.apache.hadoop.hdfs.web.resources.OwnerParam;
+import org.apache.hadoop.hdfs.web.resources.Param;
+import org.apache.hadoop.hdfs.web.resources.PermissionParam;
+import org.apache.hadoop.hdfs.web.resources.PostOpParam;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
+import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
+import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryUtils;
@@ -82,7 +107,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -729,17 +753,10 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   @Override
-  public void concat(final Path trg, final Path [] psrcs) throws IOException {
+  public void concat(final Path trg, final Path [] srcs) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PostOpParam.Op.CONCAT;
 
-    List<String> strPaths = new ArrayList<String>(psrcs.length);
-    for(Path psrc : psrcs) {
-       strPaths.add(psrc.toUri().getPath());
-    }
-
-    String srcs = StringUtils.join(",", strPaths);
-
     ConcatSourcesParam param = new ConcatSourcesParam(srcs);
     run(op, trg, param);
   }

+ 18 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java

@@ -18,15 +18,28 @@
 
 package org.apache.hadoop.hdfs.web.resources;
 
+import org.apache.hadoop.fs.Path;
+
 /** The concat source paths parameter. */
 public class ConcatSourcesParam extends StringParam {
   /** Parameter name. */
   public static final String NAME = "sources";
 
-  public static final String DEFAULT = NULL;
+  public static final String DEFAULT = "";
 
   private static final Domain DOMAIN = new Domain(NAME, null);
 
+  private static String paths2String(Path[] paths) {
+    if (paths == null || paths.length == 0) {
+      return "";
+    }
+    final StringBuilder b = new StringBuilder(paths[0].toUri().getPath());
+    for(int i = 1; i < paths.length; i++) {
+      b.append(',').append(paths[i].toUri().getPath());
+    }
+    return b.toString();
+  }
+
   /**
    * Constructor.
    * @param str a string representation of the parameter value.
@@ -35,6 +48,10 @@ public class ConcatSourcesParam extends StringParam {
     super(DOMAIN, str);
   }
 
+  public ConcatSourcesParam(Path[] paths) {
+    this(paths2String(paths));
+  }
+
   @Override
   public String getName() {
     return NAME;

+ 4 - 11
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm

@@ -109,7 +109,7 @@ WebHDFS REST API
     * {{{Append to a File}<<<APPEND>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
 
-    * {{{Concat File(s)}<<<CONCAT>>>}}
+    * {{{Concatenate Files}<<<CONCAT>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
 
   * HTTP DELETE
@@ -307,7 +307,7 @@ Content-Length: 0
   * Submit a HTTP POST request.
 
 +---------------------------------
-curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>"
+curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<PATHS>"
 +---------------------------------
 
   The client receives a response with zero content length:
@@ -319,10 +319,6 @@ Content-Length: 0
 
   []
 
-  This REST API call is available as of Hadoop version 2.0.3.
-  Please note that <SOURCES> is a comma seperated list of absolute paths.
-  (Example: sources=/test/file1,/test/file2,/test/file3)
-
   See also:
   {{{Sources}<<<sources>>>}},
    {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
@@ -1761,7 +1757,7 @@ var tokenProperties =
 *----------------+-------------------------------------------------------------------+
 || Name          | <<<sources>>> |
 *----------------+-------------------------------------------------------------------+
-|| Description   | The comma seperated absolute paths used for concatenation. |
+|| Description   | A list of source paths. |
 *----------------+-------------------------------------------------------------------+
 || Type          | String |
 *----------------+-------------------------------------------------------------------+
@@ -1769,12 +1765,9 @@ var tokenProperties =
 *----------------+-------------------------------------------------------------------+
 || Valid Values  | A list of comma seperated absolute FileSystem paths without scheme and authority. |
 *----------------+-------------------------------------------------------------------+
-|| Syntax        | See the note in {{Delegation}}. |
+|| Syntax        | Any string. |
 *----------------+-------------------------------------------------------------------+
 
-  <<Note>> that sources are absolute FileSystem paths.
-
-
   See also:
   {{{Concat File(s)}<<<CONCAT>>>}}
 

+ 23 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -17,18 +17,22 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.util.Arrays;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
 public class TestParam {
   public static final Log LOG = LogFactory.getLog(TestParam.class);
 
@@ -265,4 +269,20 @@ public class TestParam {
     UserParam userParam = new UserParam("a$");
     assertNotNull(userParam.getValue());
   }
+  
+  @Test
+  public void testConcatSourcesParam() {
+    final String[] strings = {"/", "/foo", "/bar"};
+    for(int n = 0; n < strings.length; n++) {
+      final String[] sub = new String[n]; 
+      final Path[] paths = new Path[n];
+      for(int i = 0; i < paths.length; i++) {
+        paths[i] = new Path(sub[i] = strings[i]);
+      }
+
+      final String expected = StringUtils.join(",", Arrays.asList(sub));
+      final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
+      Assert.assertEquals(expected, computed.getValue());
+    }
+  }
 }