瀏覽代碼

HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1227165 13f79535-47bb-0310-9956-ffa450edef68
Harsh J 13 年之前
父節點
當前提交
075122690c

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml

@@ -628,8 +628,11 @@
                   <tr>
                     <td>conf/hdfs-site.xml</td>
                     <td>dfs.blocksize</td>
-                    <td>134217728</td>
-                    <td>HDFS blocksize of 128MB for large file-systems.</td>
+                    <td>128m</td>
+                    <td>
+                        HDFS blocksize of 128 MB for large file-systems. Sizes can be provided
+                        in size-prefixed values (10k, 128m, 1g, etc.) or simply in bytes (134217728 for 128 MB, etc.).
+                    </td>
                   </tr>
                   <tr>
                     <td>conf/hdfs-site.xml</td>

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -121,6 +121,8 @@ Trunk (unreleased changes)
 
     HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
 
+    HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
+
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the
     namenode state. (Tomasz Nykiel via hairong)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -180,7 +180,7 @@ public class DFSClient implements java.io.Closeable {
       /** dfs.write.packet.size is an internal config variable */
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-      defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
+      defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
           DFS_BLOCK_SIZE_DEFAULT);
       defaultReplication = (short) conf.getInt(
           DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java

@@ -119,7 +119,7 @@ class DataXceiverServer implements Runnable {
       conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
                   DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
     
-    this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+    this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
     
     //set up parameter for cluster balancing

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -528,7 +528,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
     
     this.serverDefaults = new FsServerDefaults(
-        conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
+        conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
         conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
         conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
         (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -529,7 +529,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public long getDefaultBlockSize() {
-    return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+    return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java

@@ -55,6 +55,6 @@ public class BlockSizeParam extends LongParam {
   /** @return the value or, if it is null, return the default from conf. */
   public long getValue(final Configuration conf) {
     return getValue() != null? getValue()
-        : conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
+        : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
   }
 }

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -329,7 +329,12 @@ creations/deletions), or "all".</description>
 <property>
   <name>dfs.blocksize</name>
   <value>67108864</value>
-  <description>The default block size for new files.</description>
+  <description>
+      The default block size for new files, in bytes.
+      You can use the following suffix (case insensitive):
+      k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
+      Or provide complete size in bytes (such as 134217728 for 128 MB).
+  </description>
 </property>
 
 <property>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -51,7 +51,7 @@ public class TestParam {
     final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
     Assert.assertEquals(null, p.getValue());
     Assert.assertEquals(
-        conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+        conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
             DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
         p.getValue(conf));