Ver código fonte

merge -r 441655:443039, from trunk to 0.6 branch, preparing for 0.6.1 release.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/branches/branch-0.6@443040 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 anos atrás
pai
commit
99ce6b3067

+ 20 - 0
CHANGES.txt

@@ -1,6 +1,26 @@
 Hadoop Change Log
 
 
+Release 0.6.1 - 2006-08-13
+
+ 1. HADOOP-520.  Fix a bug in libhdfs, where write failures were not
+    correctly returning error codes.  (Arun C Murthy via cutting)
+
+ 2. HADOOP-523.  Fix a NullPointerException when TextInputFormat is
+    explicitly specified.  Also add a test case for this.
+    (omalley via cutting)
+
+ 3. HADOOP-521.  Fix another NullPointerException finding the
+    ClassLoader when using libhdfs.  (omalley via cutting)
+
+ 4. HADOOP-526.  Fix a NullPointerException when attempting to start
+    two datanodes in the same directory.  (Milind Bhandarkar via cutting)
+
+ 5. HADOOP-529.  Fix a NullPointerException when opening
+    value-compressed sequence files generated by pre-0.6.0 Hadoop.
+    (omalley via cutting)
+
+
 Release 0.6.0 - 2006-08-08
 
  1. HADOOP-427.  Replace some uses of DatanodeDescriptor in the DFS

+ 1 - 1
build.xml

@@ -9,7 +9,7 @@
  
   <property name="Name" value="Hadoop"/>
   <property name="name" value="hadoop"/>
-  <property name="version" value="0.6.1-dev"/>
+  <property name="version" value="0.6.2-dev"/>
   <property name="final.name" value="${name}-${version}"/>
   <property name="year" value="2006"/>
   <property name="libhdfs.version" value="1"/>

+ 22 - 13
site/index.html

@@ -122,6 +122,9 @@ document.write("<text>Last Published:</text> " + document.lastModified);
 <a href="#News">News</a>
 <ul class="minitoc">
 <li>
+<a href="#13+September%2C+2006%3A+release+0.6.1+available">13 September, 2006: release 0.6.1 available</a>
+</li>
+<li>
 <a href="#8+September%2C+2006%3A+release+0.6.0+available">8 September, 2006: release 0.6.0 available</a>
 </li>
 <li>
@@ -169,55 +172,61 @@ document.write("<text>Last Published:</text> " + document.lastModified);
 <a name="N1000C"></a><a name="News"></a>
 <h2 class="h3">News</h2>
 <div class="section">
-<a name="N10012"></a><a name="8+September%2C+2006%3A+release+0.6.0+available"></a>
+<a name="N10012"></a><a name="13+September%2C+2006%3A+release+0.6.1+available"></a>
+<h3 class="h4">13 September, 2006: release 0.6.1 available</h3>
+<p>For details see the <a href="http://tinyurl.com/lykp4">release notes</a>. The release can
+      be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
+      nearby mirror</a>.
+      </p>
+<a name="N10024"></a><a name="8+September%2C+2006%3A+release+0.6.0+available"></a>
 <h3 class="h4">8 September, 2006: release 0.6.0 available</h3>
 <p>For details see the <a href="http://tinyurl.com/r3zoj">release notes</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N10024"></a><a name="4+August%2C+2006%3A+release+0.5.0+available"></a>
+<a name="N10036"></a><a name="4+August%2C+2006%3A+release+0.5.0+available"></a>
 <h3 class="h4">4 August, 2006: release 0.5.0 available</h3>
 <p>For details see the <a href="http://tinyurl.com/pnml2">release notes</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N10036"></a><a name="28+June%2C+2006%3A+release+0.4.0+available"></a>
+<a name="N10048"></a><a name="28+June%2C+2006%3A+release+0.4.0+available"></a>
 <h3 class="h4">28 June, 2006: release 0.4.0 available</h3>
 <p>For details see the <a href="http://tinyurl.com/o35b6">change log</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N10048"></a><a name="9+June%2C+2006%3A+release+0.3.2+available"></a>
+<a name="N1005A"></a><a name="9+June%2C+2006%3A+release+0.3.2+available"></a>
 <h3 class="h4">9 June, 2006: release 0.3.2 available</h3>
 <p>This is a bugfix release.  For details see the <a href="http://tinyurl.com/k9g5c">change log</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N1005A"></a><a name="8+June%2C+2006%3A+FAQ+added+to+Wiki"></a>
+<a name="N1006C"></a><a name="8+June%2C+2006%3A+FAQ+added+to+Wiki"></a>
 <h3 class="h4">8 June, 2006: FAQ added to Wiki</h3>
 <p>Hadoop now has a <a href="http://wiki.apache.org/lucene-hadoop/FAQ">FAQ</a>.  Please
       help make this more complete!
       </p>
-<a name="N10068"></a><a name="5+June%2C+2006%3A+release+0.3.1+available"></a>
+<a name="N1007A"></a><a name="5+June%2C+2006%3A+release+0.3.1+available"></a>
 <h3 class="h4">5 June, 2006: release 0.3.1 available</h3>
 <p>This is a bugfix release.  For details see the <a href="http://tinyurl.com/l6on4">change log</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N1007A"></a><a name="2+June%2C+2006%3A+release+0.3.0+available"></a>
+<a name="N1008C"></a><a name="2+June%2C+2006%3A+release+0.3.0+available"></a>
 <h3 class="h4">2 June, 2006: release 0.3.0 available</h3>
 <p>This includes many fixes, improving performance, scalability
       and reliability and adding new features.  For details see the <a href="http://tinyurl.com/rq3f7">change log</a>. The release can
       be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N1008C"></a><a name="12+May%2C+2006%3A+release+0.2.1+available"></a>
+<a name="N1009E"></a><a name="12+May%2C+2006%3A+release+0.2.1+available"></a>
 <h3 class="h4">12 May, 2006: release 0.2.1 available</h3>
 <p>This fixes a few bugs in release 0.2.0, listed in the <a href="http://tinyurl.com/rnnvz">change log</a>. The
       release can be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N1009E"></a><a name="5+May%2C+2006%3A+release+0.2.0+available"></a>
+<a name="N100B0"></a><a name="5+May%2C+2006%3A+release+0.2.0+available"></a>
 <h3 class="h4">5 May, 2006: release 0.2.0 available</h3>
 <p>We are now aiming for monthly releases.  There have been many
       bug fixes and improvements in the past month.  MapReduce and DFS
@@ -226,24 +235,24 @@ document.write("<text>Last Published:</text> " + document.lastModified);
       details. The release can be obtained from <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
       nearby mirror</a>.
       </p>
-<a name="N100B0"></a><a name="2+April%2C+2006%3A+release+0.1.0+available"></a>
+<a name="N100C2"></a><a name="2+April%2C+2006%3A+release+0.1.0+available"></a>
 <h3 class="h4">2 April, 2006: release 0.1.0 available</h3>
 <p>This is the first Hadoop release.  The release is available
       <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/">
       here</a>.</p>
-<a name="N100BE"></a><a name="6+February%2C+2006%3A+nightly+builds"></a>
+<a name="N100D0"></a><a name="6+February%2C+2006%3A+nightly+builds"></a>
 <h3 class="h4">6 February, 2006: nightly builds</h3>
 <p>Hadoop now has nightly builds.  This automatically creates a
       <a href="http://cvs.apache.org/dist/lucene/hadoop/nightly/">downloadable version of Hadoop every
       night</a>.  All unit tests must pass, or a message is sent to
       the developers mailing list and no new version is created.  This
       also updates the <a href="docs/api/">javadoc</a>.</p>
-<a name="N100D0"></a><a name="3+February%2C+2006%3A+Hadoop+code+moved+out+of+Nutch"></a>
+<a name="N100E2"></a><a name="3+February%2C+2006%3A+Hadoop+code+moved+out+of+Nutch"></a>
 <h3 class="h4">3 February, 2006: Hadoop code moved out of Nutch</h3>
 <p>The Hadoop code has now been moved into its own Subversion
       tree, renamed into packages under <span class="codefrag">org.apache.hadoop</span>.
       All unit tests pass, but little else has yet been tested.</p>
-<a name="N100DD"></a><a name="30+March%2C+2006%3A+Hadoop+project+approved"></a>
+<a name="N100EF"></a><a name="30+March%2C+2006%3A+Hadoop+project+approved"></a>
 <h3 class="h4">30 March, 2006: Hadoop project approved</h3>
 <p>The Lucene PMC has elected to split the Nutch MapReduce and
       distributed filesytem code into a new project named Hadoop.</p>

Diferenças do arquivo suprimidas por serem muito extensas
+ 26 - 15
site/index.pdf


+ 22 - 15
src/c++/libhdfs/hdfs.c

@@ -574,13 +574,17 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
 
     jthrowable jException;
     jbyteArray jbWarray;
-    jint noWrittenBytes = 0;
 
     //Sanity check
     if (!f || f->type == UNINITIALIZED) {
         errno = EBADF;
         return -1;
     }
+    
+    if (length < 0) {
+    	errno = EINVAL;
+    	return -1;
+    }
 
     //Error checking... make sure that this file is 'writable'
     if (f->type != OUTPUT) {
@@ -589,20 +593,23 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
         return -1;
     }
 
-    //Write the requisite bytes into the file
-    jbWarray = (*env)->NewByteArray(env, length);
-    (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
-    if (invokeMethod(env, NULL, &jException, INSTANCE, jOutputStream,
-                "org/apache/hadoop/fs/FSDataOutputStream", "write", 
-                "([B)V", jbWarray)) {
-        fprintf(stderr, 
-            "Call to org.apache.hadoop.fs.FSDataOutputStream::write failed!\n"
-            );
-        errno = EINTERNAL;
-        noWrittenBytes = -1;
-    } 
-    (*env)->ReleaseByteArrayElements(env, jbWarray, 
-                (*env)->GetByteArrayElements(env, jbWarray, 0), JNI_ABORT);
+	// 'length' equals 'zero' is a valid use-case according to Posix!
+	if (length != 0) {
+	    //Write the requisite bytes into the file
+	    jbWarray = (*env)->NewByteArray(env, length);
+	    (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
+	    if (invokeMethod(env, NULL, &jException, INSTANCE, jOutputStream,
+	                "org/apache/hadoop/fs/FSDataOutputStream", "write", 
+	                "([B)V", jbWarray)) {
+	        fprintf(stderr, 
+	            "Call to org.apache.hadoop.fs.FSDataOutputStream::write failed!\n"
+	            );
+	        errno = EINTERNAL;
+	        length = -1;
+	    } 
+	    (*env)->ReleaseByteArrayElements(env, jbWarray, 
+	                (*env)->GetByteArrayElements(env, jbWarray, 0), JNI_ABORT);
+	}
 
     //Return no. of bytes succesfully written (libc way)
     //i.e. 'length' itself! ;-)

+ 5 - 1
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -35,6 +35,9 @@ class DataStorage {
   private ArrayList storageFiles = new ArrayList();
   private ArrayList storageLocks = new ArrayList();
   
+  // cache away the names of all passed in dirs
+  private File[] origDirs = null;
+  
   // cache away the names of locked dirs
   private File[] dirs = null;
   
@@ -65,6 +68,7 @@ class DataStorage {
    */
   public DataStorage( int curVersion, File[] dataDirs ) throws IOException {
     this.version = curVersion;
+    this.origDirs = dataDirs;
     for (int idx = 0; idx < dataDirs.length; idx++) {
       storageFiles.add(idx, new RandomAccessFile( 
                           new File(dataDirs[idx], STORAGE_INFO_FILE_NAME ), 
@@ -129,7 +133,7 @@ class DataStorage {
     FileLock lock = file.getChannel().tryLock();
     if (lock == null) {
       // log a warning
-      LOG.warn("Cannot lock storage file in directory "+dirs[idx].getName());
+      LOG.warn("Cannot lock storage file in directory "+origDirs[idx].getName());
       // remove the file from fileList, and close it
       storageFiles.add(idx, null);
       file.close();

+ 1 - 3
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -216,9 +216,7 @@ public class ObjectWritable implements Writable, Configurable {
     } else {                                      // Writable
       Class instanceClass = null;
       try {
-        instanceClass = 
-          Class.forName(UTF8.readString(in), true, 
-                        Thread.currentThread().getContextClassLoader());
+        instanceClass = conf.getClassByName(UTF8.readString(in));
       } catch (ClassNotFoundException e) {
         throw new RuntimeException("readObject can't find class", e);
       }

+ 13 - 8
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -980,14 +980,19 @@ public class SequenceFile {
       
       // if version >= 5
       // setup the compression codec
-      if (version >= CUSTOM_COMPRESS_VERSION && this.decompress) {    
-        try {
-          this.codec = (CompressionCodec)
-          ReflectionUtils.newInstance(conf.getClassByName(Text.readString(in)),
-              conf);
-        } catch (ClassNotFoundException cnfe) {
-          cnfe.printStackTrace();
-          throw new IllegalArgumentException("Unknown codec: " + cnfe);
+      if (decompress) {
+        if (version >= CUSTOM_COMPRESS_VERSION) {
+          String codecClassname = Text.readString(in);
+          try {
+            Class codecClass = conf.getClassByName(codecClassname);
+            this.codec = (CompressionCodec)
+                 ReflectionUtils.newInstance(codecClass, conf);
+          } catch (ClassNotFoundException cnfe) {
+            throw new IllegalArgumentException("Unknown codec: " + 
+                                               codecClassname, cnfe);
+          }
+        } else {
+          codec = new DefaultCodec();
         }
       }
       

+ 4 - 13
src/java/org/apache/hadoop/mapred/JobInProgress.java

@@ -109,24 +109,15 @@ class JobInProgress {
         //
         String jobFile = profile.getJobFile();
 
-        JobConf jd = new JobConf(localJobFile);
         FileSystem fs = FileSystem.get(conf);
-        String ifClassName = jd.get("mapred.input.format.class");
-        InputFormat inputFormat;
-        if (ifClassName != null && localJarFile != null) {
-          try {
+        if (localJarFile != null) {
             ClassLoader loader =
               new URLClassLoader(new URL[]{ localFs.pathToFile(localJarFile).toURL() });
-            Class inputFormatClass = Class.forName(ifClassName, true, loader);
-            inputFormat = (InputFormat)inputFormatClass.newInstance();
-          } catch (Exception e) {
-            throw new IOException(e.toString());
-          }
-        } else {
-          inputFormat = jd.getInputFormat();
+            conf.setClassLoader(loader);
         }
+        InputFormat inputFormat = conf.getInputFormat();
 
-        FileSplit[] splits = inputFormat.getSplits(fs, jd, numMapTasks);
+        FileSplit[] splits = inputFormat.getSplits(fs, conf, numMapTasks);
 
         //
         // sort splits by decreasing length, to reduce job's tail

+ 9 - 0
src/site/src/documentation/content/xdocs/index.xml

@@ -14,6 +14,15 @@
     <section>
       <title>News</title>
 
+      <section>
+      <title>13 September, 2006: release 0.6.1 available</title>
+      <p>For details see the <a
+      href="http://tinyurl.com/lykp4">release notes</a>. The release can
+      be obtained from <a
+      href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
+      nearby mirror</a>.
+      </p> </section>
+
       <section>
       <title>8 September, 2006: release 0.6.0 available</title>
       <p>For details see the <a

+ 1 - 0
src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java

@@ -56,6 +56,7 @@ public class TestMiniMRWithDFS extends TestCase {
     conf.set("fs.default.name", fileSys);
     conf.set("mapred.job.tracker", jobTracker);
     conf.setJobName("wordcount");
+    conf.setInputFormat(TextInputFormat.class);
     
     // the keys are words (strings)
     conf.setOutputKeyClass(Text.class);

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff