瀏覽代碼

Improved javadoc, starting overview and package documentation. Also moved DF from dfs to fs package.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@375662 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 年之前
父節點
當前提交
a76b8a871e

+ 5 - 0
src/java/org/apache/hadoop/conf/package.html

@@ -0,0 +1,5 @@
+<html>
+<body>
+Configuration of system parameters.
+</body>
+</html>

+ 9 - 0
src/java/org/apache/hadoop/dfs/package.html

@@ -0,0 +1,9 @@
+<html>
+<body>
+
+<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
+
+</body>
+</html>

+ 1 - 1
src/java/org/apache/hadoop/dfs/DF.java → src/java/org/apache/hadoop/fs/DF.java

@@ -13,7 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.dfs;
+package org.apache.hadoop.fs;
 
 import java.io.IOException;
 import java.io.InputStreamReader;

+ 2 - 7
src/java/org/apache/hadoop/fs/FileSystem.java

@@ -88,7 +88,7 @@ public abstract class FileSystem {
 
     protected Configuration conf;
     /** Returns a name for this filesystem, suitable to pass to {@link
-     * FileSystem#getNamed(String).*/
+     * FileSystem#getNamed(String,Configuration)}.*/
     public abstract String getName();
   
     /** Returns a named filesystem.  Names are either the string "local" or a
@@ -142,8 +142,6 @@ public abstract class FileSystem {
     /**
      * Opens an FSDataInputStream at the indicated File.
      * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
      * @param bufferSize the size of the buffer to be used.
      */
     public FSDataInputStream open(File f, int bufferSize) throws IOException {
@@ -152,10 +150,7 @@ public abstract class FileSystem {
     
     /**
      * Opens an FSDataInputStream at the indicated File.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
+     * @param f the file to open
      */
     public FSDataInputStream open(File f) throws IOException {
       return new FSDataInputStream(this, f, conf);

+ 2 - 22
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -20,9 +20,7 @@ import java.io.*;
 import java.util.*;
 import java.nio.channels.*;
 
-import org.apache.hadoop.dfs.DFSFile;
-import org.apache.hadoop.dfs.DF;
-import org.apache.hadoop.dfs.DFSFileInfo;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.UTF8;
 
@@ -211,25 +209,7 @@ public class LocalFileSystem extends FileSystem {
     /**
      */
     public File[] listFilesRaw(File f) throws IOException {
-        File[] files = f.listFiles();
-        if (files == null) return null;
-        // 20041022, xing, Watch out here:
-        // currently DFSFile.java does not support those methods
-        //    public boolean canRead()
-        //    public boolean canWrite()
-        //    public boolean createNewFile()
-        //    public boolean delete()
-        //    public void deleteOnExit()
-        //    public boolean isHidden()
-        // so you can not rely on returned list for these operations.
-        DFSFile[] nfiles = new DFSFile[files.length];
-        for (int i = 0; i < files.length; i++) {
-            long len = files[i].length();
-            UTF8 name = new UTF8(files[i].toString());
-            DFSFileInfo info = new DFSFileInfo(name, len, len, files[i].isDirectory());
-            nfiles[i] = new DFSFile(info);
-        }
-        return nfiles;
+        return f.listFiles();
     }
 
     /**

+ 1 - 1
src/java/org/apache/hadoop/fs/Seekable.java

@@ -17,7 +17,7 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 
-/* Stream which permits seeking. */
+/** Stream that permits seeking. */
 public interface Seekable {
   /**
    * Seek to the given offset from the start of the file.

+ 5 - 0
src/java/org/apache/hadoop/fs/package.html

@@ -0,0 +1,5 @@
+<html>
+<body>
+An abstract file system API.
+</body>
+</html>

+ 2 - 1
src/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java

@@ -37,7 +37,8 @@ public interface TaskUmbilicalProtocol {
 
   /** Report error messages back to parent.  Calls should be sparing, since all
    *  such messages are held in the job tracker.
-   *  @param trace, the stack trace text
+   *  @param taskid the id of the task involved
+   *  @param trace the text to report
    */
   void reportDiagnosticInfo(String taskid, String trace) throws IOException;
 

+ 5 - 0
src/java/org/apache/hadoop/mapred/demo/package.html

@@ -0,0 +1,5 @@
+<html>
+<body>
+MapReduce examples.
+</body>
+</html>

+ 4 - 4
src/java/org/apache/hadoop/mapred/package.html

@@ -4,10 +4,10 @@
 <p>A system for scalable, fault-tolerant, distributed computation over
 large data collections.</p>
 
-<p>Applications implement {@link org.apache.nutch.mapReduce.Mapper} and
-{@link org.apache.nutch.mapReduce.Reducer} interfaces.  These are submitted
-as a {@link org.apache.nutch.mapred.JobConf} and are applied to data
-stored in a {@link org.apache.nutch.fs.NutchFileSystem}.</p>
+<p>Applications implement {@link org.apache.hadoop.mapred.Mapper} and
+{@link org.apache.hadoop.mapred.Reducer} interfaces.  These are submitted
+as a {@link org.apache.hadoop.mapred.JobConf} and are applied to data
+stored in a {@link org.apache.hadoop.fs.FileSystem}.</p>
 
 <p>See <a href="http://labs.google.com/papers/mapreduce.html">Google's
 original Map/Reduce paper</a> for background information.</p>

+ 5 - 0
src/java/org/apache/hadoop/util/package.html

@@ -0,0 +1,5 @@
+<html>
+<body>
+Common utilities.
+</body>
+</html>

+ 16 - 0
src/java/overview.html

@@ -0,0 +1,16 @@
+<html>
+<head>
+   <title>Hadoop</title>
+</head>
+<body>
+
+Hadoop is a distributed computing platform.  It primarily consists of
+a distributed filesystem (in <a
+href="org/apache/hadoop/dfs/package-summary.html">org.apache.hadoop.dfs</a>)
+and an implementation of a MapReduce distributed data processor (in <a
+href="org/apache/hadoop/mapred/package-summary.html">org.apache.hadoop.mapred</a>)
+
+<p>
+</body>
+</html>
+