Browse Source

Add WritableFactory mechanism, to permit one to use ObjectWritable with non-public classes. Register factories for DFS implementation classes and make them non-public. This greatly simplifies the javadoc.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@375671 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 năm trước cách đây
mục cha
commit
95a8baf33e

+ 10 - 1
src/java/org/apache/hadoop/dfs/Block.java

@@ -26,7 +26,16 @@ import java.util.*;
  *
  * @author Mike Cafarella
  **************************************************/
-public class Block implements Writable, Comparable {
+class Block implements Writable, Comparable {
+
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (Block.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new Block(); }
+         });
+    }
+
     static Random r = new Random();
 
     /**

+ 10 - 1
src/java/org/apache/hadoop/dfs/BlockCommand.java

@@ -25,7 +25,16 @@ import java.io.*;
  * 
  * @author Michael Cafarella
  ****************************************************/
-public class BlockCommand implements Writable {
+class BlockCommand implements Writable {
+
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (BlockCommand.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new BlockCommand(); }
+         });
+    }
+  
     boolean transferBlocks = false;
     boolean invalidateBlocks = false;
     Block blocks[];

+ 1 - 1
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -23,7 +23,7 @@ import java.io.*;
  *
  * @author Mike Cafarella
  **********************************************************************/
-public interface ClientProtocol {
+interface ClientProtocol {
 
     /**
      * Open an existing file.  Get back block and datanode info

+ 1 - 1
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -31,7 +31,7 @@ import java.util.logging.*;
  * Connects to a namenode daemon.
  * @author Mike Cafarella, Tessa MacDuff
  ********************************************************/
-public class DFSClient implements FSConstants {
+class DFSClient implements FSConstants {
     public static final Logger LOG = LogFormatter.getLogger("org.apache.hadoop.fs.DFSClient");
     static int MAX_BLOCK_ACQUIRE_FAILURES = 10;
     ClientProtocol namenode;

+ 1 - 1
src/java/org/apache/hadoop/dfs/DFSFile.java

@@ -24,7 +24,7 @@ import java.io.*;
  *
  * @author Mike Cafarella
  *****************************************************************/
-public class DFSFile extends File {
+class DFSFile extends File {
     DFSFileInfo info;
 
     /** Separator used in DFS filenames. */

+ 9 - 1
src/java/org/apache/hadoop/dfs/DFSFileInfo.java

@@ -25,7 +25,15 @@ import java.io.*;
  * 
  * @author Mike Cafarella
  ******************************************************/
-public class DFSFileInfo implements Writable {
+class DFSFileInfo implements Writable {
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (DFSFileInfo.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new DFSFileInfo(); }
+         });
+    }
+
     UTF8 path;
     long len;
     long contentsLen;

+ 10 - 1
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -25,7 +25,16 @@ import java.util.*;
  *
  * @author Mike Cafarella
  **************************************************/
-public class DatanodeInfo implements Writable, Comparable {
+class DatanodeInfo implements Writable, Comparable {
+
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (DatanodeInfo.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new DatanodeInfo(); }
+         });
+    }
+
     private UTF8 name;
     private long capacityBytes, remainingBytes, lastUpdate;
     private volatile TreeSet blocks;

+ 1 - 1
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -24,7 +24,7 @@ import java.io.*;
  *
  * @author Michael Cafarella
  **********************************************************************/
-public interface DatanodeProtocol {
+interface DatanodeProtocol {
 
     public void sendHeartbeat(String sender, long capacity, long remaining) throws IOException;
     public Block[] blockReport(String sender, Block blocks[]) throws IOException;

+ 1 - 1
src/java/org/apache/hadoop/dfs/FSConstants.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
  *
  * @author Mike Cafarella
  ************************************/
-public interface FSConstants {
+interface FSConstants {
     public static int BLOCK_SIZE = 32 * 1000 * 1000;
     //public static int BLOCK_SIZE = 19;
 

+ 1 - 1
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.*;
  *
  * @author Mike Cafarella
  ***************************************************/
-public class FSDataset implements FSConstants {
+class FSDataset implements FSConstants {
     static final double USABLE_DISK_PCT = 0.98;
 
   /**

+ 1 - 1
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -30,7 +30,7 @@ import java.util.*;
  * 
  * @author Mike Cafarella
  *************************************************/
-public class FSDirectory implements FSConstants {
+class FSDirectory implements FSConstants {
     static String FS_IMAGE = "fsimage";
     static String NEW_FS_IMAGE = "fsimage.new";
     static String OLD_FS_IMAGE = "fsimage.old";

+ 1 - 1
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -32,7 +32,7 @@ import java.util.logging.*;
  * 4)  machine --> blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
-public class FSNamesystem implements FSConstants {
+class FSNamesystem implements FSConstants {
     public static final Logger LOG = LogFormatter.getLogger("org.apache.hadoop.fs.FSNamesystem");
 
    

+ 10 - 1
src/java/org/apache/hadoop/dfs/LocatedBlock.java

@@ -25,7 +25,16 @@ import java.io.*;
  * 
  * @author Michael Cafarella
  ****************************************************/
-public class LocatedBlock implements Writable {
+class LocatedBlock implements Writable {
+
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (LocatedBlock.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new LocatedBlock(); }
+         });
+    }
+
     Block b;
     DatanodeInfo locs[];
 

+ 1 - 8
src/java/org/apache/hadoop/io/ArrayWritable.java

@@ -78,14 +78,7 @@ public class ArrayWritable implements Writable {
   public void readFields(DataInput in) throws IOException {
     values = new Writable[in.readInt()];          // construct values
     for (int i = 0; i < values.length; i++) {
-      Writable value;                             // construct value
-      try {
-        value = (Writable)valueClass.newInstance();
-      } catch (InstantiationException e) {
-        throw new RuntimeException(e.toString());
-      } catch (IllegalAccessException e) {
-        throw new RuntimeException(e.toString());
-      }
+      Writable value = WritableFactories.newInstance(valueClass);
       value.readFields(in);                       // read a value
       values[i] = value;                          // store it in values
     }

+ 5 - 11
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -227,18 +227,12 @@ public class ObjectWritable implements Writable, Configurable {
       instance = UTF8.readString(in);
       
     } else {                                      // Writable
-      try {
-        Writable writable = (Writable)declaredClass.newInstance();
-        if(writable instanceof Configurable) {
-          ((Configurable) writable).setConf(conf);
-        }
-        writable.readFields(in);
-        instance = writable;
-      } catch (InstantiationException e) {
-        throw new RuntimeException(e);
-      } catch (IllegalAccessException e) {
-        throw new RuntimeException(e);
+      Writable writable = WritableFactories.newInstance(declaredClass);
+      if(writable instanceof Configurable) {
+        ((Configurable) writable).setConf(conf);
       }
+      writable.readFields(in);
+      instance = writable;
     }
 
     if (objectWritable != null) {                 // store values

+ 55 - 0
src/java/org/apache/hadoop/io/WritableFactories.java

@@ -0,0 +1,55 @@
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.util.HashMap;
+
+/** Factories for non-public writables.  Defining a factory permits {@link
+ * ObjectWritable} to be able to construct instances of non-public classes. */
+public class WritableFactories {
+  private static final HashMap CLASS_TO_FACTORY = new HashMap();
+
+  private WritableFactories() {}                  // singleton
+
+  /** Define a factory for a class. */
+  public static synchronized void setFactory(Class c, WritableFactory factory) {
+    CLASS_TO_FACTORY.put(c, factory);
+  }
+
+  /** Define a factory for a class. */
+  public static synchronized WritableFactory getFactory(Class c) {
+    return (WritableFactory)CLASS_TO_FACTORY.get(c);
+  }
+
+  /** Create a new instance of a class with a defined factory. */
+  public static Writable newInstance(Class c) {
+    WritableFactory factory = WritableFactories.getFactory(c);
+    if (factory != null) {
+      return factory.newInstance();
+    } else {
+      try {
+        return (Writable)c.newInstance();
+      } catch (InstantiationException e) {
+        throw new RuntimeException(e);
+      } catch (IllegalAccessException e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+
+}
+

+ 26 - 0
src/java/org/apache/hadoop/io/WritableFactory.java

@@ -0,0 +1,26 @@
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+/** A factory for a class of Writable.
+ * @see WritableFactories
+ */
+public interface WritableFactory {
+  /** Return a new instance. */
+  Writable newInstance();
+}
+