浏览代码

HADOOP-1647. FileSystem.getFileStatus returns valid values for "/".
(Dhruba Borthakur via dhruba)



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@561443 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 18 年之前
父节点
当前提交
3d5a75df33
共有 3 个文件被更改,包括 125 次插入2 次删除
  1. 3 0
      CHANGES.txt
  2. 3 2
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  3. 119 0
      src/test/org/apache/hadoop/dfs/TestFileStatus.java

+ 3 - 0
CHANGES.txt

@@ -427,6 +427,9 @@ Branch 0.14 (unreleased changes)
 141. HADOOP-1551.  libhdfs supports setting replication factor and
      retrieving modification time of files.  (Sameer Paranjpye via dhruba)
 
+141. HADOOP-1647.  FileSystem.getFileStatus returns valid values for "/".
+     (Dhruba Borthakur via dhruba)
+
 
 Release 0.13.0 - 2007-06-08
 

+ 3 - 2
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -110,6 +110,9 @@ class FSDirectory implements FSConstants {
      * @return the string representation of the absolute path of this file
      */
     String getAbsoluteName() {
+      if (this.parent == null) {    
+        return Path.SEPARATOR;       // root directory is "/"
+      }
       return internalGetAbsolutePathName().toString();
     }
 
@@ -301,8 +304,6 @@ class FSDirectory implements FSConstants {
       return total + 1;
     }
 
-    /**
-     */
     long computeFileLength() {
       long total = 0;
       if (blocks != null) {

+ 119 - 0
src/test/org/apache/hadoop/dfs/TestFileStatus.java

@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+/**
+ * This class tests the FileStatus API.
+ */
+public class TestFileStatus extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+
+  private static String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+  
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    boolean done = false;
+    while (!done) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      done = true;
+      String[][] locations = fileSys.getFileCacheHints(name, 0, fileSize);
+      for (int idx = 0; idx < locations.length; idx++) {
+        if (locations[idx].length < repl) {
+          done = false;
+          break;
+        }
+      }
+    }
+  }
+
+
+  /**
+   * Tests various options of DFSShell.
+   */
+  public void testFileStatus() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      System.out.println("Path : \"" + path.toString() + "\"");
+      System.out.println(fs.isDirectory(path));
+      System.out.println(fs.getFileStatus(path).isDir()); 
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      
+      // create a file in home directory
+      //
+      Path file1 = new Path("filestatus.dat");
+      writeFile(fs, file1, 1);
+      System.out.println("Created file filestatus.dat with one "
+                         + " replicas.");
+      checkFile(fs, file1, 1);
+      assertTrue(file1 + " should be a file", 
+                  fs.getFileStatus(file1).isDir() == false);
+      System.out.println("Path : \"" + file1 + "\"");
+
+      // create a directory
+      //
+      Path dir = new Path("/test/mkdirs");
+      assertTrue(fs.mkdirs(dir));
+      assertTrue(fs.exists(dir));
+      assertTrue(dir + " should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      System.out.println("Dir : \"" + dir + "\"");
+    
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}