ソースを参照

HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault methods that don't take a Path argument. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1342495 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 年 前
コミット
22cb0ec82a

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -173,6 +173,9 @@ Release 2.0.1-alpha - UNRELEASED
 
     HADOOP-8398. Cleanup BlockLocation. (eli)
 
+    HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
+    methods that don't take a Path argument. (eli)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -615,7 +615,9 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Return a set of server default configuration values
    * @return server default configuration values
    * @throws IOException
+   * @deprecated use {@link #getServerDefaults(Path)} instead
    */
+  @Deprecated
   public FsServerDefaults getServerDefaults() throws IOException {
     Configuration conf = getConf();
     return new FsServerDefaults(getDefaultBlockSize(), 
@@ -1939,8 +1941,12 @@ public abstract class FileSystem extends Configured implements Closeable {
     return getFileStatus(f).getBlockSize();
   }
 
-  /** Return the number of bytes that large input files should be optimally
-   * be split into to minimize i/o time. */
+  /**
+   * Return the number of bytes that large input files should be optimally
+   * be split into to minimize i/o time.
+   * @deprecated use {@link #getDefaultBlockSize(Path)} instead
+   */
+  @Deprecated
   public long getDefaultBlockSize() {
     // default to 32MB: large enough to minimize the impact of seeks
     return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
@@ -1958,7 +1964,9 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Get the default replication.
+   * @deprecated use {@link #getDefaultReplication(Path)} instead
    */
+  @Deprecated
   public short getDefaultReplication() { return 1; }
 
   /**

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -316,7 +316,7 @@ public class RawLocalFileSystem extends FileSystem {
     }
     if (localf.isFile()) {
       return new FileStatus[] {
-        new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
+        new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
     }
 
     String[] names = localf.list();
@@ -444,7 +444,7 @@ public class RawLocalFileSystem extends FileSystem {
   public FileStatus getFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     if (path.exists()) {
-      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
+      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
     } else {
       throw new FileNotFoundException("File " + f + " does not exist");
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -113,7 +113,7 @@ public final class FileSystemTestHelper {
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
       int blockSize, boolean createParent) throws IOException {
-      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true);
+      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
   }
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,

+ 2 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java

@@ -47,11 +47,9 @@ public abstract class S3FileSystemContractBaseTest
   }
   
   public void testBlockSize() throws Exception {
-    
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
-    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
-    
     Path file = path("/test/hadoop/file");
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
+    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
     createFile(file);
     assertEquals("Double default block size", newBlockSize,
 	fs.getFileStatus(file).getBlockSize());

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java

@@ -141,11 +141,11 @@ public abstract class NativeS3FileSystemContractBaseTest
   public void testBlockSize() throws Exception {
     Path file = path("/test/hadoop/file");
     createFile(file);
-    assertEquals("Default block size", fs.getDefaultBlockSize(),
+    assertEquals("Default block size", fs.getDefaultBlockSize(file),
     fs.getFileStatus(file).getBlockSize());
 
     // Block size is determined at read time
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
     fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
     assertEquals("Double default block size", newBlockSize,
     fs.getFileStatus(file).getBlockSize());

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

@@ -288,10 +288,10 @@ public class FSOperations {
     @Override
     public Void execute(FileSystem fs) throws IOException {
       if (replication == -1) {
-        replication = fs.getDefaultReplication();
+        replication = fs.getDefaultReplication(path);
       }
       if (blockSize == -1) {
-        blockSize = fs.getDefaultBlockSize();
+        blockSize = fs.getDefaultBlockSize(path);
       }
       FsPermission fsPermission = getPermission(permission);
       int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -202,7 +202,7 @@ public class TestDFSPermission extends TestCase {
     case CREATE:
       FSDataOutputStream out = fs.create(name, permission, true, 
           conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+          fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
       out.close();
       break;
     case MKDIRS:

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

@@ -875,8 +875,8 @@ public class TestQuota {
       // 6kb block
       // 192kb quota
       final int FILE_SIZE = 1024;
-      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize();
-      assertEquals(6 * 1024, fs.getDefaultBlockSize());
+      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
+      assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
       assertEquals(192 * 1024, QUOTA_SIZE);
 
       // Create the dir and set the quota. We need to enable the quota before
@@ -903,7 +903,7 @@ public class TestQuota {
       assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
           c.getSpaceConsumed());
       assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
-          3 * (fs.getDefaultBlockSize() - FILE_SIZE));
+          3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
 
       // Now check that trying to create another file violates the quota
       try {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java

@@ -123,9 +123,10 @@ public class TestPermission extends TestCase {
       checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
 
       FsPermission filePerm = new FsPermission((short)0444);
-      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
+      Path p = new Path("/b1/b2/b3.txt");
+      FSDataOutputStream out = fs.create(p, filePerm,
           true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+          fs.getDefaultReplication(p), fs.getDefaultBlockSize(p), null);
       out.write(123);
       out.close();
       checkPermission(fs, "/b1", inheritPerm);