瀏覽代碼

HADOOP-2804. Add support to publish CHANGES.txt as HTML when running the Ant 'docs' target.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@637304 13f79535-47bb-0310-9956-ffa450edef68
Nigel Daley 17 年之前
父節點
當前提交
1e5562161f

+ 3 - 0
CHANGES.txt

@@ -95,6 +95,9 @@ Trunk (unreleased changes)
     HADOOP-2775.  Adds unit test framework for HOD. 
     HADOOP-2775.  Adds unit test framework for HOD. 
     (Vinod Kumar Vavilapalli via ddas).
     (Vinod Kumar Vavilapalli via ddas).
 
 
+    HADOOP-2804.  Add support to publish CHANGES.txt as HTML when running
+    the Ant 'docs' target. (nigel)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-2790.  Fixed inefficient method hasSpeculativeTask by removing
     HADOOP-2790.  Fixed inefficient method hasSpeculativeTask by removing

+ 12 - 0
build.xml

@@ -40,6 +40,7 @@
   <property name="docs.dir" value="${basedir}/docs"/>
   <property name="docs.dir" value="${basedir}/docs"/>
   <property name="contrib.dir" value="${basedir}/src/contrib"/>
   <property name="contrib.dir" value="${basedir}/src/contrib"/>
   <property name="docs.src" value="${basedir}/src/docs"/>
   <property name="docs.src" value="${basedir}/src/docs"/>
+  <property name="changes.src" value="${docs.src}/changes"/>
   <property name="c++.src" value="${basedir}/src/c++"/>
   <property name="c++.src" value="${basedir}/src/c++"/>
   <property name="c++.utils.src" value="${c++.src}/utils"/>
   <property name="c++.utils.src" value="${c++.src}/utils"/>
   <property name="c++.pipes.src" value="${c++.src}/pipes"/>
   <property name="c++.pipes.src" value="${c++.src}/pipes"/>
@@ -674,6 +675,7 @@
     </copy>
     </copy>
     <style basedir="${conf.dir}" destdir="${docs.dir}"
     <style basedir="${conf.dir}" destdir="${docs.dir}"
            includes="hadoop-default.xml" style="conf/configuration.xsl"/>
            includes="hadoop-default.xml" style="conf/configuration.xsl"/>
+    <antcall target="changes-to-html"/>
   </target>
   </target>
 
 
   <target name="forrest.check" unless="forrest.home">
   <target name="forrest.check" unless="forrest.home">
@@ -718,6 +720,16 @@
     </javadoc>
     </javadoc>
   </target>	
   </target>	
 	
 	
+  <target name="changes-to-html" description="Convert CHANGES.txt into an html file">
+    <mkdir dir="${docs.dir}"/>
+    <exec executable="perl" input="CHANGES.txt" output="${docs.dir}/changes.html" failonerror="true">
+      <arg value="${changes.src}/changes2html.pl"/>
+    </exec>
+    <copy todir="${docs.dir}">
+      <fileset dir="${changes.src}" includes="*.css"/>
+    </copy>
+  </target>
+
   <!-- ================================================================== -->
   <!-- ================================================================== -->
   <!-- D I S T R I B U T I O N                                            -->
   <!-- D I S T R I B U T I O N                                            -->
   <!-- ================================================================== -->
   <!-- ================================================================== -->

+ 170 - 0
docs/ChangesFancyStyle.css

@@ -0,0 +1,170 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+/**
+ * General
+ */
+
+img { border: 0; }
+
+#content table {
+  border: 0;
+  width: 100%;
+}
+/*Hack to get IE to render the table at 100%*/
+* html #content table { margin-left: -3px; }
+
+#content th,
+#content td {
+  margin: 0;
+  padding: 0;
+  vertical-align: top;
+}
+
+.clearboth {
+  clear: both;
+}
+
+.note, .warning, .fixme {
+  border: solid black 1px;
+  margin: 1em 3em;
+}
+
+.note .label {
+  background: #369;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.note .content {
+  background: #F0F0FF;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.warning .label {
+  background: #C00;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.warning .content {
+  background: #FFF0F0;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.fixme .label {
+  background: #C6C600;
+  color: black;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.fixme .content {
+  padding: 5px 10px;
+}
+
+/**
+ * Typography
+ */
+
+body {
+  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
+  font-size: 100%;
+}
+
+#content {
+  font-family: Georgia, Palatino, Times, serif;
+  font-size: 95%;
+}
+#tabs {
+  font-size: 70%;
+}
+#menu {
+  font-size: 80%;
+}
+#footer {
+  font-size: 70%;
+}
+
+h1, h2, h3, h4, h5, h6 {
+  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
+  font-weight: bold;
+  margin-top: 1em;
+  margin-bottom: .5em;
+}
+
+h1 {
+    margin-top: 0;
+    margin-bottom: 1em;
+  font-size: 1.4em;
+  background-color: 73CAFF
+}
+#content h1 {
+  font-size: 160%;
+  margin-bottom: .5em;
+}
+#menu h1 {
+  margin: 0;
+  padding: 10px;
+  background: #336699;
+  color: white;
+}
+h2 { 
+  font-size: 120%;
+  background-color: 73CAFF
+}
+h3 { font-size: 100%; }
+h4 { font-size: 90%; }
+h5 { font-size: 80%; }
+h6 { font-size: 75%; }
+
+p {
+  line-height: 120%;
+  text-align: left;
+  margin-top: .5em;
+  margin-bottom: 1em;
+}
+
+#content li,
+#content th,
+#content td,
+#content li ul,
+#content li ol{
+  margin-top: .5em;
+  margin-bottom: .5em;
+}
+
+
+#content li li,
+#minitoc-area li{
+  margin-top: 0em;
+  margin-bottom: 0em;
+}
+
+#content .attribution {
+  text-align: right;
+  font-style: italic;
+  font-size: 85%;
+  margin-top: 1em;
+}
+
+.codefrag {
+  font-family: "Courier New", Courier, monospace;
+  font-size: 110%;
+}

+ 49 - 0
docs/ChangesSimpleStyle.css

@@ -0,0 +1,49 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+body {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h1 {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h2 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+h3 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+a:link {
+  color: blue;
+}
+
+a:visited {
+  color: purple; 
+}
+
+li {
+  margin-top: 1em;
+  margin-bottom: 1em;
+}

+ 3169 - 0
docs/changes.html

@@ -0,0 +1,3169 @@
+<!--
+**********************************************************
+** WARNING: This file is generated from CHANGES.txt by the 
+**          Perl script 'changes2html.pl'.
+**          Do *not* edit this file!
+**********************************************************
+          
+****************************************************************************
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+****************************************************************************
+-->
+<html>
+<head>
+  <title>Hadoop Change Log</title>
+  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
+  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
+  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+  <SCRIPT>
+    function toggleList(e) {
+      element = document.getElementById(e).style;
+      element.display == 'none' ? element.display = 'block' : element.display='none';
+    }
+    function collapse() {
+      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
+        var list = document.getElementsByTagName("ul")[i];
+        if (list.id != 'trunk_(unreleased_changes)_' && list.id != 'release_0.16.1_-_2008-03-13_') {
+          list.style.display = "none";
+        }
+      }
+      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
+        document.getElementsByTagName("ol")[i].style.display = "none"; 
+      }
+    }
+    window.onload = collapse;
+  </SCRIPT>
+</head>
+<body>
+
+<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
+<h1>Hadoop Change Log</h1>
+
+<h2><a href="javascript:toggleList('trunk_(unreleased_changes)_')">Trunk (unreleased changes)
+</a></h2>
+<ul id="trunk_(unreleased_changes)_">
+  <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(9)
+    <ol id="trunk_(unreleased_changes)_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2786">HADOOP-2786</a>.  Move hbase out of hadoop core
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2345">HADOOP-2345</a>.  New HDFS transactions to support appending
+to files.  Disk layout version changed from -11 to -12.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2192">HADOOP-2192</a>. Error messages from "dfs mv" command improved.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1902">HADOOP-1902</a>. "dfs du" command without any arguments operates on the
+current working directory.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2873">HADOOP-2873</a>.  Fixed bad disk format introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-2345">HADOOP-2345</a>.
+Disk layout version changed from -12 to -13. See changelist 630992<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1985">HADOOP-1985</a>.  This addresses rack-awareness for Map tasks and for
+HDFS in a uniform way.<br />(ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1986">HADOOP-1986</a>.  Add support for a general serialization mechanism for
+Map Reduce.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-771">HADOOP-771</a>. FileSystem.delete() takes an explicit parameter that
+specifies whether a recursive delete is intended.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2470">HADOOP-2470</a>. Remove getContentLength(String), open(String, long, long)
+and isDir(String) from ClientProtocol. ClientProtocol version changed
+from 26 to 27. (Tsz Wo (Nicholas), SZE via cdouglas)
+</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._new_features_')">  NEW FEATURES
+</a>&nbsp;&nbsp;&nbsp;(6)
+    <ol id="trunk_(unreleased_changes)_._new_features_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1398">HADOOP-1398</a>.  Add HBase in-memory block cache.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2178">HADOOP-2178</a>.  Job History on DFS.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2063">HADOOP-2063</a>. A new parameter to dfs -get command to fetch a file
+even if it is corrupted.  (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2219">HADOOP-2219</a>. A new command "df -count" that counts the number of
+files and directories.  (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2906">HADOOP-2906</a>. Add an OutputFormat capable of using keys, values, and
+config params to map records to different output files.<br />(Runping Qi via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2346">HADOOP-2346</a>. Utilities to support timeout while writing to sockets.
+DFSClient and DataNode sockets have 10min write timeout.<br />(rangadi)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(13)
+    <ol id="trunk_(unreleased_changes)_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2655">HADOOP-2655</a>. Copy on write for data and metadata files in the
+presence of snapshots. Needed for supporting appends to HDFS
+files.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1967">HADOOP-1967</a>.  When a Path specifies the same scheme as the default
+FileSystem but no authority, the default FileSystem's authority is
+used.  Also add warnings for old-format FileSystem names, accessor
+methods for fs.default.name, and check for null authority in HDFS.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2895">HADOOP-2895</a>. Let the profiling string be configurable.<br />(Martin Traverso via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-910">HADOOP-910</a>. Enables Reduces to do merges for the on-disk map output files
+in parallel with their copying.<br />(Amar Kamat via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2833">HADOOP-2833</a>. Do not use "Dr. Who" as the default user in JobClient.
+A valid user name is required. (Tsz Wo (Nicholas), SZE via rangadi)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-730">HADOOP-730</a>. Use rename rather than copy for local renames.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2810">HADOOP-2810</a>. Updated the Hadoop Core logo.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2057">HADOOP-2057</a>.  Streaming should optionally treat a non-zero exit status
+of a child process as a failed task.<br />(Rick Cox via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2765">HADOOP-2765</a>. Enables specifying ulimits for streaming/pipes tasks<br />(ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2888">HADOOP-2888</a>. Make gridmix scripts more readily configurable and amenable
+to automated execution.<br />(Mukund Madhugiri via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2908">HADOOP-2908</a>.  A document that describes the DFS Shell command.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2981">HADOOP-2981</a>.  Update README.txt to reflect the upcoming use of
+cryptography.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2775">HADOOP-2775</a>.  Adds unit test framework for HOD.
+(Vinod Kumar Vavilapalli via ddas).
+</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._optimizations_')">  OPTIMIZATIONS
+</a>&nbsp;&nbsp;&nbsp;(4)
+    <ol id="trunk_(unreleased_changes)_._optimizations_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2790">HADOOP-2790</a>.  Fixed inefficient method hasSpeculativeTask by removing
+repetitive calls to get the current time and late checking to see if
+we want speculation on at all.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2758">HADOOP-2758</a>. Reduce buffer copies in DataNode when data is read from
+HDFS, without negatively affecting read throughput.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2399">HADOOP-2399</a>. Input key and value to combiner and reducer is reused.
+(Owen O'Malley via ddas).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2423">HADOOP-2423</a>.  Code optimization in FSNamesystem.mkdirs.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(34)
+    <ol id="trunk_(unreleased_changes)_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
+errors.<br />(Mahadev Konar via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2190">HADOOP-2190</a>. bring behaviour '-ls' and '-du' closer to Linux shell
+commands in case of errors.<br />(Mahadev Konar via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2193">HADOOP-2193</a>. 'fs -rm' and 'fs -rmr' show error message when the target
+file does not exist.<br />(Mahadev Konar via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2738">HADOOP-2738</a> Text is not subclassable because set(Text) and compareTo(Object)
+access the other instance's private members directly.<br />(jimk)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2779">HADOOP-2779</a>.  Remove the references to HBase in the build.xml.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2194">HADOOP-2194</a>. dfs cat on a non-existent file throws FileNotFoundException.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2767">HADOOP-2767</a>. Fix for NetworkTopology erroneously skipping the last leaf
+node on a rack.<br />(Hairong Kuang and Mark Butler via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1593">HADOOP-1593</a>. FsShell works with paths in non-default FileSystem.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2191">HADOOP-2191</a>. du and dus command on non-existent directory gives
+appropriate error message.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2832">HADOOP-2832</a>. Remove tabs from code of DFSClient for better
+indentation.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2844">HADOOP-2844</a>. distcp closes file handles for sequence files.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2727">HADOOP-2727</a>. Fix links in Web UI of the hadoop daemons and some docs<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2871">HADOOP-2871</a>. Fixes a problem to do with file: URI in the JobHistory init.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2800">HADOOP-2800</a>.  Deprecate SetFile.Writer constructor not the whole class.<br />(Johan Oskarsson via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2891">HADOOP-2891</a>.  DFSClient.close() closes all open files.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2845">HADOOP-2845</a>.  Fix dfsadmin disk utilization report on Solaris.<br />(Martin Traverso via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2912">HADOOP-2912</a>. MiniDFSCluster restart should wait for namenode to exit
+safemode. This was causing TestFsck to fail.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2820">HADOOP-2820</a>. The following classes in streaming are removed :
+StreamLineRecordReader StreamOutputFormat StreamSequenceRecordReader.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2819">HADOOP-2819</a>. The following methods in JobConf are removed:
+getInputKeyClass() setInputKeyClass getInputValueClass()
+setInputValueClass(Class theClass) setSpeculativeExecution
+getSpeculativeExecution()<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2817">HADOOP-2817</a>. Removes deprecated mapred.tasktracker.tasks.maximum and
+ClusterStatus.getMaxTasks().<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2821">HADOOP-2821</a>. Removes deprecated ShellUtil and ToolBase classes from
+the util package.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2934">HADOOP-2934</a>. The namenode was encountreing a NPE while loading
+leases from the fsimage. Fixed.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2938">HADOOP-2938</a>. Some fs commands did not glob paths.
+(Tsz Wo (Nicholas), SZE via rangadi)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2943">HADOOP-2943</a>. Compression of intermediate map output causes failures
+in the merge.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2870">HADOOP-2870</a>.  DataNode and NameNode closes all connections while
+shutting down.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2973">HADOOP-2973</a>. Fix TestLocalDFS for Windows platform.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2971">HADOOP-2971</a>. select multiple times if it returns early in
+SocketIOWithTimeout.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2955">HADOOP-2955</a>. Fix TestCrcCorruption test failures caused by <a href="http://issues.apache.org/jira/browse/HADOOP-2758">HADOOP-2758</a><br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2657">HADOOP-2657</a>. A flush call on the DFSOutputStream flushes the last
+partial CRC chunk too.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2974">HADOOP-2974</a>. IPC unit tests used "0.0.0.0" to connect to server, which
+is not always supported.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2996">HADOOP-2996</a>. Fixes uses of StringBuffer in StreamUtils class.<br />(Dave Brosius via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2995">HADOOP-2995</a>. Fixes StreamBaseRecordReader's getProgress to return a
+floating point number.<br />(Dave Brosius via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2972">HADOOP-2972</a>. Fix for a NPE in FSDataset.invalidate.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2994">HADOOP-2994</a>. Code cleanup for DFSClient: remove redundant
+conversions from string to string.<br />(Dave Brosius via dhruba)</li>
+    </ol>
+  </li>
+</ul>
+<h2><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_')">Release 0.16.1 - 2008-03-13
+</a></h2>
+<ul id="release_0.16.1_-_2008-03-13_">
+  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(2)
+    <ol id="release_0.16.1_-_2008-03-13_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2861">HADOOP-2861</a>. Improve the user interface for the HOD commands.
+Command line structure has changed.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2869">HADOOP-2869</a>. Deprecate SequenceFile.setCompressionType in favor of
+SequenceFile.createWriter, SequenceFileOutputFormat.setCompressionType,
+and JobConf.setMapOutputCompressionType. (Arun C Murthy via cdouglas)
+Configuration changes to hadoop-default.xml:
+  deprecated io.seqfile.compression.type
+</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.16.1_-_2008-03-13_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2371">HADOOP-2371</a>. User guide for file permissions in HDFS.<br />(Robert Chansler via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2730">HADOOP-2730</a>. HOD documentation update.<br />(Vinod Kumar Vavilapalli via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2911">HADOOP-2911</a>. Make the information printed by the HOD allocate and
+info commands less verbose and clearer.<br />(Vinod Kumar via nigel)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(35)
+    <ol id="release_0.16.1_-_2008-03-13_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2789">HADOOP-2789</a>. Race condition in IPC Server Responder that could close
+connections early.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2785">HADOOP-2785</a>. minor. Fix a typo in Datanode block verification<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2788">HADOOP-2788</a>. minor. Fix help message for chgrp shell command (Raghu Angadi).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1188">HADOOP-1188</a>. fstime file is updated when a storage directory containing
+namespace image becomes inaccessible.<br />(shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2787">HADOOP-2787</a>. An application can set a configuration variable named
+dfs.umask to set the umask that is used by DFS.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2780">HADOOP-2780</a>. The default socket buffer size for DataNodes is 128K.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2716">HADOOP-2716</a>. Superuser privileges for the Balancer.
+(Tsz Wo (Nicholas), SZE via shv)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2754">HADOOP-2754</a>. Filter out .crc files from local file system listing.<br />(Hairong Kuang via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2733">HADOOP-2733</a>. Fix compiler warnings in test code.
+(Tsz Wo (Nicholas), SZE via cdouglas)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2725">HADOOP-2725</a>. Modify distcp to avoid leaving partially copied files at
+the destination after encountering an error. (Tsz Wo (Nicholas), SZE
+via cdouglas)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2391">HADOOP-2391</a>. Cleanup job output directory before declaring a job as
+SUCCESSFUL.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2808">HADOOP-2808</a>. Minor fix to FileUtil::copy to mind the overwrite
+formal.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2683">HADOOP-2683</a>. Moving UGI out of the RPC Server.
+(Tsz Wo (Nicholas), SZE via shv)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2814">HADOOP-2814</a>. Fix for NPE in datanode in unit test TestDataTransferProtocol.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2811">HADOOP-2811</a>. Dump of counters in job history does not add comma between
+groups.<br />(runping via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2735">HADOOP-2735</a>. Enables setting TMPDIR for tasks.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2843">HADOOP-2843</a>. Fix protections on map-side join classes to enable derivation.<br />(cdouglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2840">HADOOP-2840</a>. Fix gridmix scripts to correctly invoke the java sort through
+the proper jar.<br />(Mukund Madhugiri via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2766">HADOOP-2766</a>. Enables setting of HADOOP_OPTS env variable for the hadoop
+daemons through HOD.<br />(Vinod Kumar Vavilapalli via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2769">HADOOP-2769</a>.  TestNNThroughputBnechmark should not use a fixed port for
+the namenode http port.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2852">HADOOP-2852</a>. Update gridmix benchmark to avoid an artifically long tail.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2894">HADOOP-2894</a>. Fix a problem to do with tasktrackers failing to connect to
+JobTracker upon reinitialization. (Owen O'Malley via ddas).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2903">HADOOP-2903</a>.  Fix exception generated by Metrics while using pushMetric().<br />(girish vaitheeswaran via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2904">HADOOP-2904</a>.  Fix to RPC metrics to log the correct host name.<br />(girish vaitheeswaran via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2918">HADOOP-2918</a>.  Improve error logging so that dfs writes failure with
+"No lease on file" can be diagnosed.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2923">HADOOP-2923</a>.  Add SequenceFileAsBinaryInputFormat, which was
+missed in the commit for <a href="http://issues.apache.org/jira/browse/HADOOP-2603">HADOOP-2603</a>.<br />(cdouglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2847">HADOOP-2847</a>.  Ensure idle cluster cleanup works even if the JobTracker
+becomes unresponsive to RPC calls.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2809">HADOOP-2809</a>.  Fix HOD syslog config syslog-address so that it works.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2931">HADOOP-2931</a>. IOException thrown by DFSOutputStream had wrong stack
+trace in some cases.<br />(Michael Bieniosek via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2883">HADOOP-2883</a>. Write failures and data corruptions on HDFS files.
+The write timeout is back to what it was on 0.15 release. Also, the
+datnodes flushes the block file buffered output stream before
+sending a positive ack for the packet back to the client.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2925">HADOOP-2925</a>. Fix HOD to create the mapred system directory using a
+naming convention that will avoid clashes in multi-user shared
+cluster scenario.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2756">HADOOP-2756</a>. NPE in DFSClient while closing DFSOutputStreams
+under load.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2958">HADOOP-2958</a>. Fixed FileBench which broke due to <a href="http://issues.apache.org/jira/browse/HADOOP-2391">HADOOP-2391</a> which performs
+a check for existence of the output directory and a trivial bug in
+GenericMRLoadGenerator where min/max word lenghts were identical since
+they were looking at the same config variables<br />(Chris Douglas via
+acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2915">HADOOP-2915</a>. Fixed FileSystem.CACHE so that a username is included
+in the cache key. (Tsz Wo (Nicholas), SZE via nigel)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2813">HADOOP-2813</a>. TestDU unit test uses its own directory to run its
+sequence of tests.<br />(Mahadev Konar via dhruba)</li>
+    </ol>
+  </li>
+</ul>
+<h2><a href="javascript:toggleList('older')">Older Releases</a></h2>
+<ul id="older">
+<h3><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_')">Release 0.16.0 - 2008-02-07
+</a></h3>
+<ul id="release_0.16.0_-_2008-02-07_">
+  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(14)
+    <ol id="release_0.16.0_-_2008-02-07_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1245">HADOOP-1245</a>.  Use the mapred.tasktracker.tasks.maximum value
+configured on each tasktracker when allocating tasks, instead of
+the value configured on the jobtracker. InterTrackerProtocol
+version changed from 5 to 6.<br />(Michael Bieniosek via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1843">HADOOP-1843</a>. Removed code from Configuration and JobConf deprecated by
+<a href="http://issues.apache.org/jira/browse/HADOOP-785">HADOOP-785</a> and a minor fix to Configuration.toString. Specifically the
+important change is that mapred-default.xml is no longer supported and
+Configuration no longer supports the notion of default/final resources.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1302">HADOOP-1302</a>.  Remove deprecated abacus code from the contrib directory.
+This also fixes a configuration bug in AggregateWordCount, so that the
+job now works.<br />(enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2288">HADOOP-2288</a>.  Enhance FileSystem API to support access control.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2184">HADOOP-2184</a>.  RPC Support for user permissions and authentication.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a>.  RPC Server uses any available port if the specified
+port is zero. Otherwise it uses the specified port. Also combines
+the configuration attributes for the servers' bind address and
+port from "x.x.x.x" and "y" to "x.x.x.x:y".
+Deprecated configuration variables:
+  dfs.info.bindAddress
+  dfs.info.port
+  dfs.datanode.bindAddress
+  dfs.datanode.port
+  dfs.datanode.info.bindAdress
+  dfs.datanode.info.port
+  dfs.secondary.info.bindAddress
+  dfs.secondary.info.port
+  mapred.job.tracker.info.bindAddress
+  mapred.job.tracker.info.port
+  mapred.task.tracker.report.bindAddress
+  tasktracker.http.bindAddress
+  tasktracker.http.port
+New configuration variables (post <a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>):
+  dfs.secondary.http.address
+  dfs.datanode.address
+  dfs.datanode.http.address
+  dfs.http.address
+  mapred.job.tracker.http.address
+  mapred.task.tracker.report.address
+  mapred.task.tracker.http.address<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2401">HADOOP-2401</a>.  Only the current leaseholder can abandon a block for
+a HDFS file.  ClientProtocol version changed from 20 to 21.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2381">HADOOP-2381</a>.  Support permission information in FileStatus. Client
+Protocol version changed from 21 to 22.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2110">HADOOP-2110</a>. Block report processing creates fewer transient objects.
+Datanode Protocol version changed from 10 to 11.<br />(Sanjay Radia via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2567">HADOOP-2567</a>.  Add FileSystem#getHomeDirectory(), which returns the
+user's home directory in a FileSystem as a fully-qualified path.
+FileSystem#getWorkingDirectory() is also changed to return a
+fully-qualified path, which can break applications that attempt
+to, e.g., pass LocalFileSystem#getWorkingDir().toString() directly
+to java.io methods that accept file names.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2514">HADOOP-2514</a>.  Change trash feature to maintain a per-user trash
+directory, named ".Trash" in the user's home directory.  The
+"fs.trash.root" parameter is no longer used.  Full source paths
+are also no longer reproduced within the trash.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2012">HADOOP-2012</a>. Periodic data verification on Datanodes.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1707">HADOOP-1707</a>. The DFSClient does not use a local disk file to cache
+writes to a HDFS file. Changed Data Transfer Version from 7 to 8.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2652">HADOOP-2652</a>. Fix permission issues for HftpFileSystem. This is an
+incompatible change since distcp may not be able to copy files
+from cluster A (compiled with this patch) to cluster B (compiled
+with previous versions). (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._new_features_')">  NEW FEATURES
+</a>&nbsp;&nbsp;&nbsp;(14)
+    <ol id="release_0.16.0_-_2008-02-07_._new_features_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1857">HADOOP-1857</a>.  Ability to run a script when a task fails to capture stack
+traces.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2299">HADOOP-2299</a>.  Defination of a login interface.  A simple implementation for
+Unix users and groups.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1652">HADOOP-1652</a>.  A utility to balance data among datanodes in a HDFS cluster.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2085">HADOOP-2085</a>.  A library to support map-side joins of consistently
+partitioned and sorted data sets.<br />(Chris Douglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1301">HADOOP-1301</a>.  Hadoop-On-Demand (HOD): resource management
+provisioning for Hadoop.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2336">HADOOP-2336</a>. Shell commands to modify file permissions.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1298">HADOOP-1298</a>. Implement file permissions for HDFS.
+(Tsz Wo (Nicholas) &amp; taton via cutting)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2447">HADOOP-2447</a>. HDFS can be configured to limit the total number of
+objects (inodes and blocks) in the file system.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2487">HADOOP-2487</a>. Added an option to get statuses for all submitted/run jobs.
+This information can be used to develop tools for analysing jobs.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1873">HADOOP-1873</a>. Implement user permissions for Map/Reduce framework.<br />(Hairong Kuang via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2532">HADOOP-2532</a>.  Add to MapFile a getClosest method that returns the key
+that comes just before if the key is not present.<br />(stack via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1883">HADOOP-1883</a>. Add versioning to Record I/O.<br />(Vivek Ratan via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2603">HADOOP-2603</a>.  Add SeqeunceFileAsBinaryInputFormat, which reads
+sequence files as BytesWritable/BytesWritable regardless of the
+key and value types used to write the file.<br />(cdouglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2367">HADOOP-2367</a>. Add ability to profile a subset of map/reduce tasks and fetch
+the result to the local filesystem of the submitting application. Also
+includes a general IntegerRanges extension to Configuration for setting
+positive, ranged parameters.<br />(Owen O'Malley via cdouglas)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(54)
+    <ol id="release_0.16.0_-_2008-02-07_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2045">HADOOP-2045</a>.  Change committer list on website to a table, so that
+folks can list their organization, timezone, etc.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2058">HADOOP-2058</a>.  Facilitate creating new datanodes dynamically in
+MiniDFSCluster.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1855">HADOOP-1855</a>.  fsck verifies block placement policies and reports
+violations.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1604">HADOOP-1604</a>.  An system administrator can finalize namenode upgrades
+without running the cluster.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1839">HADOOP-1839</a>.  Link-ify the Pending/Running/Complete/Killed grid in
+jobdetails.jsp to help quickly narrow down and see categorized TIPs'
+details via jobtasks.jsp.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1210">HADOOP-1210</a>.  Log counters in job history.<br />(Owen O'Malley via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1912">HADOOP-1912</a>. Datanode has two new commands COPY and REPLACE. These are
+needed for supporting data rebalance.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2086">HADOOP-2086</a>. This patch adds the ability to add dependencies to a job
+(run via JobControl) after construction.<br />(Adrian Woodhead via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1185">HADOOP-1185</a>. Support changing the logging level of a server without
+restarting the server.  (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2134">HADOOP-2134</a>.  Remove developer-centric requirements from overview.html and
+keep it end-user focussed, specifically sections related to subversion and
+building Hadoop.<br />(Jim Kellerman via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1989">HADOOP-1989</a>. Support simulated DataNodes. This helps creating large virtual
+clusters for testing purposes.<br />(Sanjay Radia via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1274">HADOOP-1274</a>. Support different number of mappers and reducers per
+TaskTracker to  allow administrators to better configure and utilize
+heterogenous clusters.
+Configuration changes to hadoop-default.xml:
+  add mapred.tasktracker.map.tasks.maximum (default value of 2)
+  add mapred.tasktracker.reduce.tasks.maximum (default value of 2)
+  remove mapred.tasktracker.tasks.maximum (deprecated for 0.16.0)<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2104">HADOOP-2104</a>. Adds a description to the ant targets. This makes the
+output of "ant -projecthelp" sensible.<br />(Chris Douglas via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2127">HADOOP-2127</a>. Added a pipes sort example to benchmark trivial pipes
+application versus trivial java application.<br />(omalley via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2113">HADOOP-2113</a>. A new shell command "dfs -text" to view the contents of
+a gziped or SequenceFile.<br />(Chris Douglas via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2207">HADOOP-2207</a>.  Add a "package" target for contrib modules that
+permits each to determine what files are copied into release
+builds.<br />(stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1984">HADOOP-1984</a>. Makes the backoff for failed fetches exponential.
+Earlier, it was a random backoff from an interval.<br />(Amar Kamat via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1327">HADOOP-1327</a>.  Include website documentation for streaming.<br />(Rob Weltman
+via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2000">HADOOP-2000</a>.  Rewrite NNBench to measure namenode performance accurately.
+It now uses the map-reduce framework for load generation.<br />(Mukund Madhugiri via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2248">HADOOP-2248</a>. Speeds up the framework w.r.t Counters. Also has API
+updates to the Counters part.<br />(Owen O'Malley via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2326">HADOOP-2326</a>. The initial block report at Datanode startup time has
+a random backoff period.<br />(Sanjay Radia via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2432">HADOOP-2432</a>. HDFS includes the name of the file while throwing
+"File does not exist"  exception.<br />(Jim Kellerman via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2457">HADOOP-2457</a>. Added a 'forrest.home' property to the 'docs' target in
+build.xml.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2149">HADOOP-2149</a>.  A new benchmark for three name-node operation: file create,
+open, and block report, to evaluate the name-node performance
+for optimizations or new features.<br />(Konstantin Shvachko via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2466">HADOOP-2466</a>. Change FileInputFormat.computeSplitSize to a protected
+non-static method to allow sub-classes to provide alternate
+implementations.<br />(Alejandro Abdelnur via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2425">HADOOP-2425</a>. Change TextOutputFormat to handle Text specifically for better
+performance. Make NullWritable implement Comparable. Make TextOutputFormat
+treat NullWritable like null.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1719">HADOOP-1719</a>. Improves the utilization of shuffle copier threads.<br />(Amar Kamat via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2390">HADOOP-2390</a>. Added documentation for user-controls for intermediate
+map-outputs &amp; final job-outputs and native-hadoop libraries.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1660">HADOOP-1660</a>. Add the cwd of the map/reduce task to the java.library.path
+of the child-jvm to support loading of native libraries distributed via
+the DistributedCache.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2285">HADOOP-2285</a>. Speeds up TextInputFormat. Also includes updates to the
+Text API.<br />(Owen O'Malley via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2233">HADOOP-2233</a>. Adds a generic load generator for modeling MR jobs.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2369">HADOOP-2369</a>. Adds a set of scripts for simulating a mix of user map/reduce
+workloads.<br />(Runping Qi via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2547">HADOOP-2547</a>. Removes use of a 'magic number' in build.xml.<br />(Hrishikesh via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2268">HADOOP-2268</a>. Fix org.apache.hadoop.mapred.jobcontrol classes to use the
+List/Map interfaces rather than concrete ArrayList/HashMap classes
+internally.<br />(Adrian Woodhead via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2406">HADOOP-2406</a>. Add a benchmark for measuring read/write performance through
+the InputFormat interface, particularly with compression.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2131">HADOOP-2131</a>. Allow finer-grained control over speculative-execution. Now
+users can set it for maps and reduces independently.
+Configuration changes to hadoop-default.xml:
+  deprecated mapred.speculative.execution
+  add mapred.map.tasks.speculative.execution
+  add mapred.reduce.tasks.speculative.execution<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1965">HADOOP-1965</a>. Interleave sort/spill in teh map-task along with calls to the
+Mapper.map method. This is done by splitting the 'io.sort.mb' buffer into
+two and using one half for collecting map-outputs and the other half for
+sort/spill.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2464">HADOOP-2464</a>. Unit tests for chmod, chown, and chgrp using DFS.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1876">HADOOP-1876</a>. Persist statuses of completed jobs in HDFS so that the
+JobClient can query and get information about decommissioned jobs and also
+across JobTracker restarts.
+Configuration changes to hadoop-default.xml:
+  add mapred.job.tracker.persist.jobstatus.active (default value of false)
+  add mapred.job.tracker.persist.jobstatus.hours (default value of 0)
+  add mapred.job.tracker.persist.jobstatus.dir (default value of
+                                                /jobtracker/jobsInfo)<br />(Alejandro Abdelnur via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2077">HADOOP-2077</a>. Added version and build information to STARTUP_MSG for all
+hadoop daemons to aid error-reporting, debugging etc.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2398">HADOOP-2398</a>. Additional instrumentation for NameNode and RPC server.
+Add support for accessing instrumentation statistics via JMX.<br />(Sanjay radia via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2449">HADOOP-2449</a>. A return of the non-MR version of NNBench.<br />(Sanjay Radia via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1989">HADOOP-1989</a>. Remove 'datanodecluster' command from bin/hadoop.<br />(Sanjay Radia via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1742">HADOOP-1742</a>. Improve JavaDoc documentation for ClientProtocol, DFSClient,
+and FSNamesystem.<br />(Konstantin Shvachko)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2298">HADOOP-2298</a>. Add Ant target for a binary-only distribution.<br />(Hrishikesh via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2509">HADOOP-2509</a>. Add Ant target for Rat report (Apache license header
+reports).<br />(Hrishikesh via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2469">HADOOP-2469</a>.  WritableUtils.clone should take a Configuration
+instead of a JobConf.<br />(stack via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2659">HADOOP-2659</a>. Introduce superuser permissions for admin operations.
+(Tsz Wo (Nicholas), SZE via shv)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2596">HADOOP-2596</a>. Added a SequenceFile.createWriter api which allows the user
+to specify the blocksize, replication factor and the buffersize to be
+used for the underlying HDFS file.<br />(Alejandro Abdelnur via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2431">HADOOP-2431</a>. Test HDFS File Permissions.<br />(Hairong Kuang via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2232">HADOOP-2232</a>. Add an option to disable Nagle's algorithm in the IPC stack.<br />(Clint Morgan via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2342">HADOOP-2342</a>. Created a micro-benchmark for measuring
+local-file versus hdfs reads.<br />(Owen O'Malley via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2529">HADOOP-2529</a>. First version of HDFS User Guide.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2690">HADOOP-2690</a>. Add jar-test target to build.xml, separating compilation
+and packaging of the test classes.<br />(Enis Soztutar via cdouglas)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._optimizations_')">  OPTIMIZATIONS
+</a>&nbsp;&nbsp;&nbsp;(4)
+    <ol id="release_0.16.0_-_2008-02-07_._optimizations_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1898">HADOOP-1898</a>.  Release the lock protecting the last time of the last stack
+dump while the dump is happening.<br />(Amareshwari Sri Ramadasu via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1900">HADOOP-1900</a>. Makes the heartbeat and task event queries interval
+dependent on the cluster size.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2208">HADOOP-2208</a>. Counter update frequency (from TaskTracker to JobTracker) is
+capped at 1 minute.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2284">HADOOP-2284</a>. Reduce the number of progress updates during the sorting in
+the map task.<br />(Amar Kamat via ddas)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(91)
+    <ol id="release_0.16.0_-_2008-02-07_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2583">HADOOP-2583</a>.  Fixes a bug in the Eclipse plug-in UI to edit locations.
+Plug-in version is now synchronized with Hadoop version.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2100">HADOOP-2100</a>.  Remove faulty check for existence of $HADOOP_PID_DIR and let
+'mkdir -p' check &amp; create it.<br />(Michael Bieniosek via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1642">HADOOP-1642</a>.  Ensure jobids generated by LocalJobRunner are unique to
+avoid collissions and hence job-failures.<br />(Doug Cutting via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2096">HADOOP-2096</a>.  Close open file-descriptors held by streams while localizing
+job.xml in the JobTracker and while displaying it on the webui in
+jobconf.jsp.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2098">HADOOP-2098</a>.  Log start &amp; completion of empty jobs to JobHistory, which
+also ensures that we close the file-descriptor of the job's history log
+opened during job-submission.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2112">HADOOP-2112</a>.  Adding back changes to build.xml lost while reverting
+<a href="http://issues.apache.org/jira/browse/HADOOP-1622">HADOOP-1622</a> i.e. http://svn.apache.org/viewvc?view=rev&amp;revision=588771.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2089">HADOOP-2089</a>.  Fixes the command line argument handling to handle multiple
+-cacheArchive in Hadoop streaming.<br />(Lohit Vijayarenu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2071">HADOOP-2071</a>.  Fix StreamXmlRecordReader to use a BufferedInputStream
+wrapped over the DFSInputStream since mark/reset aren't supported by
+DFSInputStream anymore.<br />(Lohit Vijayarenu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1348">HADOOP-1348</a>.  Allow XML comments inside configuration files.<br />(Rajagopal Natarajan and Enis Soztutar via enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1952">HADOOP-1952</a>.  Improve handling of invalid, user-specified classes while
+configuring streaming jobs such as combiner, input/output formats etc.
+Now invalid options are caught, logged and jobs are failed early.<br />(Lohit
+Vijayarenu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2151">HADOOP-2151</a>. FileSystem.globPaths validates the list of Paths that
+it returns.<br />(Lohit Vijayarenu via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2121">HADOOP-2121</a>. Cleanup DFSOutputStream when the stream encountered errors
+when Datanodes became full.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1130">HADOOP-1130</a>. The FileSystem.closeAll() method closes all existing
+DFSClients.<br />(Chris Douglas via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2204">HADOOP-2204</a>. DFSTestUtil.waitReplication was not waiting for all replicas
+to get created, thus causing unit test failure.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2078">HADOOP-2078</a>. An zero size file may have no blocks associated with it.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2212">HADOOP-2212</a>. ChecksumFileSystem.getSumBufferSize might throw
+java.lang.ArithmeticException. The fix is to initialize bytesPerChecksum
+to 0.<br />(Michael Bieniosek via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2216">HADOOP-2216</a>.  Fix jobtasks.jsp to ensure that it first collects the
+taskids which satisfy the filtering criteria and then use that list to
+print out only the required task-reports, previously it was oblivious to
+the filtering and hence used the wrong index into the array of task-reports.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2272">HADOOP-2272</a>.  Fix findbugs target to reflect changes made to the location
+of the streaming jar file by <a href="http://issues.apache.org/jira/browse/HADOOP-2207">HADOOP-2207</a>.<br />(Adrian Woodhead via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2244">HADOOP-2244</a>.  Fixes the MapWritable.readFields to clear the instance
+field variable every time readFields is called. (Michael Stack via ddas).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2245">HADOOP-2245</a>.  Fixes LocalJobRunner to include a jobId in the mapId. Also,
+adds a testcase for JobControl. (Adrian Woodhead via ddas).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2275">HADOOP-2275</a>. Fix erroneous detection of corrupted file when namenode
+fails to allocate any datanodes for newly allocated block.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2256">HADOOP-2256</a>. Fix a buf in the namenode that could cause it to encounter
+an infinite loop while deleting excess replicas that were created by
+block rebalancing.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2209">HADOOP-2209</a>. SecondaryNamenode process exits if it encounters exceptions
+that it cannot handle.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2314">HADOOP-2314</a>. Prevent TestBlockReplacement from occasionally getting
+into an infinite loop.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2300">HADOOP-2300</a>. This fixes a bug where mapred.tasktracker.tasks.maximum
+would be ignored even if it was set in hadoop-site.xml.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2349">HADOOP-2349</a>.  Improve code layout in file system transaction logging code.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2368">HADOOP-2368</a>.  Fix unit tests on Windows.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2363">HADOOP-2363</a>.  This fix allows running multiple instances of the unit test
+in parallel. The bug was introduced in <a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a> that changed
+port-rolling behaviour.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2271">HADOOP-2271</a>.  Fix chmod task to be non-parallel.<br />(Adrian Woodhead via
+omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2313">HADOOP-2313</a>.  Fail the build if building libhdfs fails.<br />(nigel via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2359">HADOOP-2359</a>.  Remove warning for interruptted exception when closing down
+minidfs.<br />(dhruba via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1841">HADOOP-1841</a>. Prevent slow clients from consuming threads in the NameNode.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2323">HADOOP-2323</a>. JobTracker.close() should not print stack traces for
+normal exit.<br />(jimk via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2376">HADOOP-2376</a>. Prevents sort example from overriding the number of maps.<br />(Owen O'Malley via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2434">HADOOP-2434</a>. FSDatasetInterface read interface causes HDFS reads to occur
+in 1 byte chunks, causing performance degradation.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2459">HADOOP-2459</a>. Fix package target so that src/docs/build files are not
+included in the release.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2215">HADOOP-2215</a>.  Fix documentation in cluster_setup.html &amp;
+mapred_tutorial.html reflect that mapred.tasktracker.tasks.maximum has
+been superceeded by mapred.tasktracker.{map|reduce}.tasks.maximum.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2459">HADOOP-2459</a>. Fix package target so that src/docs/build files are not
+included in the release.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2352">HADOOP-2352</a>. Remove AC_CHECK_LIB for libz and liblzo to ensure that
+libhadoop.so doesn't have a dependency on them.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2453">HADOOP-2453</a>. Fix the configuration for wordcount-simple example in Hadoop
+Pipes which currently produces an XML parsing error.<br />(Amareshwari Sri
+Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2476">HADOOP-2476</a>. Unit test failure while reading permission bits of local
+file system (on Windows) fixed.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2247">HADOOP-2247</a>.  Fine-tune the strategies for killing mappers and reducers
+due to failures while fetching map-outputs. Now the map-completion times
+and number of currently running reduces are taken into account by the
+JobTracker before  killing the mappers, while the progress made by the
+reducer and the number of fetch-failures vis-a-vis total number of
+fetch-attempts are taken into account before teh reducer kills itself.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2452">HADOOP-2452</a>. Fix eclipse plug-in build.xml to refers to the right
+location where hadoop-*-core.jar is generated.<br />(taton)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2492">HADOOP-2492</a>. Additional debugging in the rpc server to better
+diagnose ConcurrentModificationException.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2344">HADOOP-2344</a>. Enhance the utility for executing shell commands to read the
+stdout/stderr streams while waiting for the command to finish (to free up
+the buffers). Also, this patch throws away stderr of the DF utility.
+@deprecated
+  org.apache.hadoop.fs.ShellCommand for org.apache.hadoop.util.Shell
+  org.apache.hadoop.util.ShellUtil for
+    org.apache.hadoop.util.Shell.ShellCommandExecutor<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2511">HADOOP-2511</a>. Fix a javadoc warning in org.apache.hadoop.util.Shell
+introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-2344">HADOOP-2344</a>.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2442">HADOOP-2442</a>. Fix TestLocalFileSystemPermission.testLocalFSsetOwner
+to work on more platforms.<br />(Raghu Angadi via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2488">HADOOP-2488</a>. Fix a regression in random read performance.<br />(Michael Stack via rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2523">HADOOP-2523</a>. Fix TestDFSShell.testFilePermissions on Windows.<br />(Raghu Angadi via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2535">HADOOP-2535</a>. Removed support for deprecated mapred.child.heap.size and
+fixed some indentation issues in TaskRunner. (acmurthy)
+Configuration changes to hadoop-default.xml:
+  remove mapred.child.heap.size
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2512">HADOOP-2512</a>. Fix error stream handling in Shell. Use exit code to
+detect shell command errors in RawLocalFileSystem.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2446">HADOOP-2446</a>. Fixes TestHDFSServerPorts and TestMRServerPorts so they
+do not rely on statically configured ports and cleanup better.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2537">HADOOP-2537</a>. Make build process compatible with Ant 1.7.0.<br />(Hrishikesh via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1281">HADOOP-1281</a>. Ensure running tasks of completed map TIPs (e.g. speculative
+tasks) are killed as soon as the TIP completed.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2571">HADOOP-2571</a>. Suppress a suprious warning in test code.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2481">HADOOP-2481</a>. NNBench report its progress periodically.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2601">HADOOP-2601</a>. Start name-node on a free port for TestNNThroughputBenchmark.<br />(Konstantin Shvachko)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2494">HADOOP-2494</a>.  Set +x on contrib/*/bin/* in packaged tar bundle.<br />(stack via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2605">HADOOP-2605</a>. Remove bogus leading slash in task-tracker report bindAddress.<br />(Konstantin Shvachko)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2620">HADOOP-2620</a>. Trivial. 'bin/hadoop fs -help' did not list chmod, chown, and
+chgrp.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2614">HADOOP-2614</a>. The DFS WebUI accesses are configured to be from the user
+specified by dfs.web.ugi.  (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2543">HADOOP-2543</a>. Implement a "no-permission-checking" mode for smooth
+upgrade from a pre-0.16 install of HDFS.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-290">HADOOP-290</a>. A DataNode log message now prints the target of a replication
+request correctly.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2538">HADOOP-2538</a>. Redirect to a warning, if plaintext parameter is true but
+the filter parameter is not given in TaskLogServlet.<br />(Michael Bieniosek via enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2582">HADOOP-2582</a>. Prevent 'bin/hadoop fs -copyToLocal' from creating
+zero-length files when the src does not exist.<br />(Lohit Vijayarenu via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2189">HADOOP-2189</a>. Incrementing user counters should count as progress.<br />(ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2649">HADOOP-2649</a>. The NameNode periodically computes replication work for
+the datanodes. The periodicity of this computation is now configurable.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2549">HADOOP-2549</a>. Correct disk size computation so that data-nodes could switch
+to other local drives if current is full.<br />(Hairong Kuang via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2633">HADOOP-2633</a>. Fsck should call name-node methods directly rather than
+through rpc. (Tsz Wo (Nicholas), SZE via shv)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2687">HADOOP-2687</a>. Modify a few log message generated by dfs client to be
+logged only at INFO level.<br />(stack via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2402">HADOOP-2402</a>. Fix BlockCompressorStream to ensure it buffers data before
+sending it down to the compressor so that each write call doesn't
+compress.<br />(Chris Douglas via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2645">HADOOP-2645</a>. The Metrics initialization code does not throw
+exceptions when servers are restarted by MiniDFSCluster.<br />(Sanjay Radia via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2691">HADOOP-2691</a>. Fix a race condition that was causing the DFSClient
+to erroneously remove a good datanode from a pipeline that actually
+had another datanode that was bad.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1195">HADOOP-1195</a>. All code in FSNamesystem checks the return value
+of getDataNode for null before using it.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2640">HADOOP-2640</a>. Fix a bug in MultiFileSplitInputFormat that was always
+returning 1 split in some circumstances.<br />(Enis Soztutar via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2626">HADOOP-2626</a>. Fix paths with special characters to work correctly
+with the local filesystem.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2646">HADOOP-2646</a>. Fix SortValidator to work with fully-qualified
+working directories.<br />(Arun C Murthy via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2092">HADOOP-2092</a>. Added a ping mechanism to the pipes' task to periodically
+check if the parent Java task is running, and exit if the parent isn't
+alive and responding.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2714">HADOOP-2714</a>. TestDecommission failed on windows because the replication
+request was timing out.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2576">HADOOP-2576</a>. Namenode performance degradation over time triggered by
+large heartbeat interval.<br />(Raghu Angadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2720">HADOOP-2720</a>. Jumbo bug fix patch to HOD.  Final sync of Apache SVN with
+internal Yahoo SVN.<br />(Hemanth Yamijala via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2713">HADOOP-2713</a>. TestDatanodeDeath failed on windows because the replication
+request was timing out.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2639">HADOOP-2639</a>. Fixes a problem to do with incorrect maintenance of values
+for runningMapTasks/runningReduceTasks.<br />(Amar Kamat and Arun Murthy
+via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2723">HADOOP-2723</a>. Fixed the check for checking whether to do user task
+profiling.<br />(Amareshwari Sri Ramadasu via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2734">HADOOP-2734</a>. Link forrest docs to new http://hadoop.apache.org<br />(Doug Cutting via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2641">HADOOP-2641</a>. Added Apache license headers to 95 files.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2732">HADOOP-2732</a>. Fix bug in path globbing.<br />(Hairong Kuang via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>. Fix backwards compatability with hadoop-0.15 configuration
+files that was broken by <a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a>.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2740">HADOOP-2740</a>. Fix HOD to work with the configuration variables changed in
+<a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>.<br />(Hemanth Yamijala via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2755">HADOOP-2755</a>. Fix fsck performance degradation because of permissions
+issue.  (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2768">HADOOP-2768</a>. Fix performance regression caused by <a href="http://issues.apache.org/jira/browse/HADOOP-1707">HADOOP-1707</a>.<br />(dhruba borthakur via nigel)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.15.3_-_2008-01-18_')">Release 0.15.3 - 2008-01-18
+</a></h3>
+<ul id="release_0.15.3_-_2008-01-18_">
+  <li><a href="javascript:toggleList('release_0.15.3_-_2008-01-18_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(4)
+    <ol id="release_0.15.3_-_2008-01-18_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2562">HADOOP-2562</a>. globPaths supports {ab,cd}.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2540">HADOOP-2540</a>. fsck reports missing blocks incorrectly.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2570">HADOOP-2570</a>. "work" directory created unconditionally, and symlinks
+created from the task cwds.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2574">HADOOP-2574</a>. Fixed mapred_tutorial.xml to correct minor errors with the
+WordCount examples.<br />(acmurthy)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_')">Release 0.15.2 - 2008-01-02
+</a></h3>
+<ul id="release_0.15.2_-_2008-01-02_">
+  <li><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(11)
+    <ol id="release_0.15.2_-_2008-01-02_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2246">HADOOP-2246</a>.  Moved the changelog for <a href="http://issues.apache.org/jira/browse/HADOOP-1851">HADOOP-1851</a> from the NEW FEATURES
+section to the INCOMPATIBLE CHANGES section.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2238">HADOOP-2238</a>.  Fix TaskGraphServlet so that it sets the content type of
+the response appropriately.<br />(Paul Saab via enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2129">HADOOP-2129</a>.  Fix so that distcp works correctly when source is
+HDFS but not the default filesystem.  HDFS paths returned by the
+listStatus() method are now fully-qualified.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2378">HADOOP-2378</a>.  Fixes a problem where the last task completion event would
+get created after the job completes.<br />(Alejandro Abdelnur via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2228">HADOOP-2228</a>.  Checks whether a job with a certain jobId is already running
+and then tries to create the JobInProgress object.<br />(Johan Oskarsson via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2422">HADOOP-2422</a>.  dfs -cat multiple files fail with 'Unable to write to
+output stream'.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2460">HADOOP-2460</a>.  When the namenode encounters ioerrors on writing a
+transaction log, it stops writing new transactions to that one.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2227">HADOOP-2227</a>.  Use the LocalDirAllocator uniformly for handling all of the
+temporary storage required for a given task. It also implies that
+mapred.local.dir.minspacestart is handled by checking if there is enough
+free-space on any one of the available disks.<br />(Amareshwari Sri Ramadasu
+via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2437">HADOOP-2437</a>.  Fix the LocalDirAllocator to choose the seed for the
+round-robin disk selections randomly. This helps in spreading data across
+multiple partitions much better.<br />(acmurhty)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2486">HADOOP-2486</a>. When the list of files from the InMemoryFileSystem is obtained
+for merging, this patch will ensure that only those files whose checksums
+have also got created (renamed) are returned.<br />(ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2456">HADOOP-2456</a>. Hardcode English locale to prevent NumberFormatException
+from occurring when starting the NameNode with certain locales.<br />(Matthias Friedrich via nigel)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(4)
+    <ol id="release_0.15.2_-_2008-01-02_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2160">HADOOP-2160</a>.  Remove project-level, non-user documentation from
+releases, since it's now maintained in a separate tree.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1327">HADOOP-1327</a>.  Add user documentation for streaming.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2382">HADOOP-2382</a>.  Add hadoop-default.html to subversion.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2158">HADOOP-2158</a>. hdfsListDirectory calls FileSystem.listStatus instead
+of FileSystem.listPaths. This reduces the number of RPC calls on the
+namenode, thereby improving scalability.<br />(Christian Kunz via dhruba)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_')">Release 0.15.1 - 2007-11-27
+</a></h3>
+<ul id="release_0.15.1_-_2007-11-27_">
+  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(1)
+    <ol id="release_0.15.1_-_2007-11-27_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-713">HADOOP-713</a>.  Reduce CPU usage on namenode while listing directories.
+FileSystem.listPaths does not return the size of the entire subtree.
+Introduced a new API ClientProtocol.getContentLength that returns the
+size of the subtree.<br />(Dhruba Borthakur via dhruba)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(1)
+    <ol id="release_0.15.1_-_2007-11-27_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1917">HADOOP-1917</a>.  Addition of guides/tutorial for better overall
+documentation for Hadoop. Specifically:
+* quickstart.html is targetted towards first-time users and helps them
+  setup a single-node cluster and play with Hadoop.
+* cluster_setup.html helps admins to configure and setup non-trivial
+  hadoop clusters.
+* mapred_tutorial.html is a comprehensive Map-Reduce tutorial.<br />(acmurthy)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.15.1_-_2007-11-27_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2174">HADOOP-2174</a>.  Removed the unnecessary Reporter.setStatus call from
+FSCopyFilesMapper.close which led to a NPE since the reporter isn't valid
+in the close method.<br />(Chris Douglas via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2172">HADOOP-2172</a>.  Restore performance of random access to local files
+by caching positions of local input streams, avoiding a system
+call.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2205">HADOOP-2205</a>.  Regenerate the Hadoop website since some of the changes made
+by <a href="http://issues.apache.org/jira/browse/HADOOP-1917">HADOOP-1917</a> weren't correctly copied over to the trunk/docs directory.
+Also fixed a couple of minor typos and broken links.<br />(acmurthy)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_')">Release 0.15.0 - 2007-11-2
+</a></h3>
+<ul id="release_0.15.0_-_2007-11-2_">
+  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(10)
+    <ol id="release_0.15.0_-_2007-11-2_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1708">HADOOP-1708</a>.  Make files appear in namespace as soon as they are
+created.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-999">HADOOP-999</a>.  A HDFS Client immediately informs the NameNode of a new
+file creation.  ClientProtocol version changed from 14 to 15.
+(Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-932">HADOOP-932</a>.  File locking interfaces and implementations (that were
+earlier deprecated) are removed.  Client Protocol version changed
+from 15 to 16.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1621">HADOOP-1621</a>.  FileStatus is now a concrete class and FileSystem.listPaths
+is deprecated and replaced with listStatus.<br />(Chris Douglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1656">HADOOP-1656</a>.  The blockSize of a file is stored persistently in the file
+inode.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1838">HADOOP-1838</a>.  The blocksize of files created with an earlier release is
+set to the default block size.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-785">HADOOP-785</a>.  Add support for 'final' Configuration parameters,
+removing support for 'mapred-default.xml', and changing
+'hadoop-site.xml' to not override other files.  Now folks should
+generally use 'hadoop-site.xml' for all configurations.  Values
+with a 'final' tag may not be overridden by subsequently loaded
+configuration files, e.g., by jobs.<br />(Arun C. Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1846">HADOOP-1846</a>. DatanodeReport in ClientProtocol can report live
+datanodes, dead datanodes or all datanodes. Client Protocol version
+changed from 17 to 18.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1851">HADOOP-1851</a>.  Permit specification of map output compression type
+and codec, independent of the final output's compression
+parameters.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1819">HADOOP-1819</a>.  Jobtracker cleanups, including binding ports before
+clearing state directories, so that inadvertently starting a
+second jobtracker doesn't trash one that's already running. Removed
+method JobTracker.getTracker() because the static variable, which
+stored the value caused initialization problems.<br />(omalley via cutting)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._new_features_')">  NEW FEATURES
+</a>&nbsp;&nbsp;&nbsp;(14)
+    <ol id="release_0.15.0_-_2007-11-2_._new_features_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-89">HADOOP-89</a>.  A client can access file data even before the creator
+has closed the file. Introduce a new command "tail" from dfs shell.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1636">HADOOP-1636</a>.  Allow configuration of the number of jobs kept in
+memory by the JobTracker.<br />(Michael Bieniosek via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1667">HADOOP-1667</a>.  Reorganize CHANGES.txt into sections to make it
+easier to read.  Also remove numbering, to make merging easier.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1610">HADOOP-1610</a>.  Add metrics for failed tasks.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1767">HADOOP-1767</a>.  Add "bin/hadoop job -list" sub-command.<br />(taton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1351">HADOOP-1351</a>.  Add "bin/hadoop job [-fail-task|-kill-task]" sub-commands
+to terminate a particular task-attempt.<br />(Enis Soztutar via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1880">HADOOP-1880</a>. SleepJob : An example job that sleeps at each map and
+reduce task.<br />(enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1809">HADOOP-1809</a>. Add a link in web site to #hadoop IRC channel.<br />(enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1894">HADOOP-1894</a>. Add percentage graphs and mapred task completion graphs
+to Web User Interface. Users not using Firefox may install a plugin to
+their browsers to see svg graphics.<br />(enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1914">HADOOP-1914</a>. Introduce a new NamenodeProtocol to allow secondary
+namenodes and rebalancing processes to communicate with a primary
+namenode.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1963">HADOOP-1963</a>.  Add a FileSystem implementation for the Kosmos
+Filesystem (KFS).<br />(Sriram Rao via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1822">HADOOP-1822</a>.  Allow the specialization and configuration of socket
+factories. Provide a StandardSocketFactory, and a SocksSocketFactory to
+allow the use of SOCKS proxies. (taton).
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1968">HADOOP-1968</a>. FileSystem supports wildcard input syntax "{ }".<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2566">HADOOP-2566</a>. Add globStatus method to the FileSystem interface
+and deprecate globPath and listPath.<br />(Hairong Kuang via hairong)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._optimizations_')">  OPTIMIZATIONS
+</a>&nbsp;&nbsp;&nbsp;(8)
+    <ol id="release_0.15.0_-_2007-11-2_._optimizations_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1910">HADOOP-1910</a>.  Reduce the number of RPCs that DistributedFileSystem.create()
+makes to the namenode.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1565">HADOOP-1565</a>.  Reduce memory usage of NameNode by replacing
+TreeMap in HDFS Namespace with ArrayList.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1743">HADOOP-1743</a>.  Change DFS INode from a nested class to standalone
+class, with specialized subclasses for directories and files, to
+save memory on the namenode.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1759">HADOOP-1759</a>.  Change file name in INode from String to byte[],
+saving memory on the namenode.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1766">HADOOP-1766</a>.  Save memory in namenode by having BlockInfo extend
+Block, and replace many uses of Block with BlockInfo.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1687">HADOOP-1687</a>.  Save memory in namenode by optimizing BlockMap
+representation.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1774">HADOOP-1774</a>. Remove use of INode.parent in Block CRC upgrade.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1788">HADOOP-1788</a>.  Increase the buffer size on the Pipes command socket.<br />(Amareshwari Sri Ramadasu and Christian Kunz via omalley)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(64)
+    <ol id="release_0.15.0_-_2007-11-2_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1946">HADOOP-1946</a>.  The Datanode code does not need to invoke du on
+every heartbeat.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1935">HADOOP-1935</a>. Fix a NullPointerException in internalReleaseCreate.<br />(Dhruba Borthakur)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1933">HADOOP-1933</a>. The nodes listed in include and exclude files
+are always listed in the datanode report.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1953">HADOOP-1953</a>. The job tracker should wait beteween calls to try and delete
+the system directory<br />(Owen O'Malley via devaraj)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1932">HADOOP-1932</a>. TestFileCreation fails with message saying filestatus.dat
+is of incorrect size.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1573">HADOOP-1573</a>. Support for 0 reducers in PIPES.<br />(Owen O'Malley via devaraj)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1500">HADOOP-1500</a>. Fix typographical errors in the DFS WebUI.<br />(Nigel Daley via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1076">HADOOP-1076</a>. Periodic checkpoint can continue even if an earlier
+checkpoint encountered an error.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1887">HADOOP-1887</a>. The Namenode encounters an ArrayIndexOutOfBoundsException
+while listing a directory that had a file that was
+being actively written to.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1904">HADOOP-1904</a>. The Namenode encounters an exception because the
+list of blocks per datanode-descriptor was corrupted.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1762">HADOOP-1762</a>. The Namenode fsimage does not contain a list of
+Datanodes.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1890">HADOOP-1890</a>. Removed debugging prints introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-1774">HADOOP-1774</a>.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1763">HADOOP-1763</a>. Too many lost task trackers on large clusters due to
+insufficient number of RPC handler threads on the JobTracker.<br />(Devaraj Das)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1463">HADOOP-1463</a>.  HDFS report correct usage statistics for disk space
+used by HDFS.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1692">HADOOP-1692</a>.  In DFS ant task, don't cache the Configuration.<br />(Chris Douglas via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1726">HADOOP-1726</a>.  Remove lib/jetty-ext/ant.jar.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1772">HADOOP-1772</a>.  Fix hadoop-daemon.sh script to get correct hostname
+under Cygwin.  (Tsz Wo (Nicholas), SZE via cutting)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1749">HADOOP-1749</a>.  Change TestDFSUpgrade to sort files, fixing sporadic
+test failures.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1748">HADOOP-1748</a>.  Fix tasktracker to be able to launch tasks when log
+directory is relative.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1775">HADOOP-1775</a>.  Fix a NullPointerException and an
+IllegalArgumentException in MapWritable.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1795">HADOOP-1795</a>.  Fix so that jobs can generate output file names with
+special characters.<br />(Fr??d??ric Bertin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1810">HADOOP-1810</a>.  Fix incorrect value type in MRBench (SmallJobs)<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1806">HADOOP-1806</a>.  Fix ant task to compile again, also fix default
+builds to compile ant tasks.<br />(Chris Douglas via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1758">HADOOP-1758</a>.  Fix escape processing in librecordio to not be
+quadratic.<br />(Vivek Ratan via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1817">HADOOP-1817</a>.  Fix MultiFileSplit to read and write the split
+length, so that it is not always zero in map tasks.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1853">HADOOP-1853</a>.  Fix contrib/streaming to accept multiple -cacheFile
+options.<br />(Prachi Gupta via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1818">HADOOP-1818</a>. Fix MultiFileInputFormat so that it does not return
+empty splits when numPaths &lt; numSplits.<br />(Thomas Friol via enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1840">HADOOP-1840</a>. Fix race condition which leads to task's diagnostic
+messages getting lost.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1885">HADOOP-1885</a>. Fix race condition in MiniDFSCluster shutdown.<br />(Chris Douglas via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1889">HADOOP-1889</a>.  Fix path in EC2 scripts for building your own AMI.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1892">HADOOP-1892</a>.  Fix a NullPointerException in the JobTracker when
+trying to fetch a task's diagnostic messages from the JobClient.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1897">HADOOP-1897</a>.  Completely remove about.html page from the web site.<br />(enis)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1907">HADOOP-1907</a>.  Fix null pointer exception when getting task diagnostics
+in JobClient.<br />(Christian Kunz via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1882">HADOOP-1882</a>.  Remove spurious asterisks from decimal number displays.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1783">HADOOP-1783</a>.  Make S3 FileSystem return Paths fully-qualified with
+scheme and host.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1925">HADOOP-1925</a>.  Make pipes' autoconf script look for libsocket and libnsl, so
+that it can compile under Solaris.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1940">HADOOP-1940</a>.  TestDFSUpgradeFromImage must shut down its MiniDFSCluster.<br />(Chris Douglas via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1930">HADOOP-1930</a>.  Fix the blame for failed fetchs on the right host.<br />(Arun C.
+Murthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1934">HADOOP-1934</a>.  Fix the platform name on Mac to use underscores rather than
+spaces.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1959">HADOOP-1959</a>.  Use "/" instead of File.separator in the StatusHttpServer.<br />(jimk via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1626">HADOOP-1626</a>.  Improve dfsadmin help messages.<br />(Lohit Vijayarenu via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1695">HADOOP-1695</a>.  The SecondaryNamenode waits for the Primary NameNode to
+start up.<br />(Dhruba Borthakur)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1983">HADOOP-1983</a>.  Have Pipes flush the command socket when progress is sent
+to prevent timeouts during long computations.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1875">HADOOP-1875</a>.  Non-existant directories or read-only directories are
+filtered from dfs.client.buffer.dir.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1992">HADOOP-1992</a>.  Fix the performance degradation in the sort validator.<br />(acmurthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1874">HADOOP-1874</a>.  Move task-outputs' promotion/discard to a separate thread
+distinct from the main heartbeat-processing thread. The main upside being
+that we do not lock-up the JobTracker during HDFS operations, which
+otherwise may lead to lost tasktrackers if the NameNode is unresponsive.<br />(Devaraj Das via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2026">HADOOP-2026</a>. Namenode prints out one log line for "Number of transactions"
+at most once every minute.<br />(Dhruba Borthakur)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2022">HADOOP-2022</a>.  Ensure that status information for successful tasks is correctly
+recorded at the JobTracker, so that, for example, one may view correct
+information via taskdetails.jsp. This bug was introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-1874">HADOOP-1874</a>.<br />(Amar Kamat via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2031">HADOOP-2031</a>.  Correctly maintain the taskid which takes the TIP to
+completion, failing which the case of lost tasktrackers isn't handled
+properly i.e. the map TIP is incorrectly left marked as 'complete' and it
+is never rescheduled elsewhere, leading to hung reduces.<br />(Devaraj Das via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2018">HADOOP-2018</a>. The source datanode of a data transfer waits for
+a response from the target datanode before closing the data stream.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2023">HADOOP-2023</a>. Disable TestLocalDirAllocator on Windows.<br />(Hairong Kuang via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2016">HADOOP-2016</a>.  Ignore status-updates from FAILED/KILLED tasks at the
+TaskTracker. This fixes a race-condition which caused the tasks to wrongly
+remain in the RUNNING state even after being killed by the JobTracker and
+thus handicap the cleanup of the task's output sub-directory.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1771">HADOOP-1771</a>. Fix a NullPointerException in streaming caused by an
+IOException in MROutputThread.<br />(lohit vijayarenu via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2028">HADOOP-2028</a>. Fix distcp so that the log dir does not need to be
+specified and the destination does not need to exist.<br />(Chris Douglas via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2044">HADOOP-2044</a>. The namenode protects all lease manipulations using a
+sortedLease lock.<br />(Dhruba Borthakur)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2051">HADOOP-2051</a>. The TaskCommit thread should not die for exceptions other
+than the InterruptedException. This behavior is there for the other long
+running threads in the JobTracker.<br />(Arun C Murthy via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1973">HADOOP-1973</a>. The FileSystem object would be accessed on the JobTracker
+through a RPC in the InterTrackerProtocol. The check for the object being
+null was missing and hence NPE would be thrown sometimes. This issue fixes
+that problem.<br />(Amareshwari Sri Ramadasu via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2033">HADOOP-2033</a>.  The SequenceFile.Writer.sync method was a no-op, which caused
+very uneven splits for applications like distcp that count on them.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2070">HADOOP-2070</a>.  Added a flush method to pipes' DownwardProtocol and call
+that before waiting for the application to finish to ensure all buffered
+data is flushed.<br />(Owen O'Malley via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2080">HADOOP-2080</a>.  Fixed calculation of the checksum file size when the values
+are large.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2048">HADOOP-2048</a>.  Change error handling in distcp so that each map copies
+as much as possible before reporting the error. Also report progress on
+every copy.<br />(Chris Douglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2073">HADOOP-2073</a>.  Change size of VERSION file after writing contents to it.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2102">HADOOP-2102</a>.  Fix the deprecated ToolBase to pass its Configuration object
+to the superceding ToolRunner to ensure it picks up the appropriate
+configuration resources.<br />(Dennis Kubes and Enis Soztutar via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2103">HADOOP-2103</a>.  Fix minor javadoc bugs introduce by <a href="http://issues.apache.org/jira/browse/HADOOP-2046">HADOOP-2046</a>.<br />(Nigel
+Daley via acmurthy)</li>
+    </ol>
+  </li>
+  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(37)
+    <ol id="release_0.15.0_-_2007-11-2_._improvements_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1908">HADOOP-1908</a>. Restructure data node code so that block sending and
+receiving are seperated from data transfer header handling.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1921">HADOOP-1921</a>. Save the configuration of completed/failed jobs and make them
+available via the web-ui.<br />(Amar Kamat via devaraj)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1266">HADOOP-1266</a>. Remove dependency of package org.apache.hadoop.net on
+org.apache.hadoop.dfs.<br />(Hairong Kuang via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1779">HADOOP-1779</a>. Replace INodeDirectory.getINode() by a getExistingPathINodes()
+to allow the retrieval of all existing INodes along a given path in a
+single lookup. This facilitates removal of the 'parent' field in the
+inode.<br />(Christophe Taton via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1756">HADOOP-1756</a>. Add toString() to some Writable-s.<br />(ab)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1727">HADOOP-1727</a>.  New classes: MapWritable and SortedMapWritable.<br />(Jim Kellerman via ab)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1651">HADOOP-1651</a>.  Improve progress reporting.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1595">HADOOP-1595</a>.  dfsshell can wait for a file to achieve its intended
+replication target. (Tsz Wo (Nicholas), SZE via dhruba)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1693">HADOOP-1693</a>.  Remove un-needed log fields in DFS replication classes,
+since the log may be accessed statically.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1231">HADOOP-1231</a>.  Add generics to Mapper and Reducer interfaces.<br />(tomwhite via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1436">HADOOP-1436</a>.  Improved command-line APIs, so that all tools need
+not subclass ToolBase, and generic parameter parser is public.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1703">HADOOP-1703</a>.  DFS-internal code cleanups, removing several uses of
+the obsolete UTF8.<br />(Christophe Taton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1731">HADOOP-1731</a>.  Add Hadoop's version to contrib jar file names.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1689">HADOOP-1689</a>.  Make shell scripts more portable.  All shell scripts
+now explicitly depend on bash, but do not require that bash be
+installed in a particular location, as long as it is on $PATH.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1744">HADOOP-1744</a>.  Remove many uses of the deprecated UTF8 class from
+the HDFS namenode.<br />(Christophe Taton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1654">HADOOP-1654</a>.  Add IOUtils class, containing generic io-related
+utility methods.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1158">HADOOP-1158</a>.  Change JobTracker to record map-output transmission
+errors and use them to trigger speculative re-execution of tasks.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1601">HADOOP-1601</a>.  Change GenericWritable to use ReflectionUtils for
+instance creation, avoiding classloader issues, and to implement
+Configurable.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1750">HADOOP-1750</a>.  Log standard output and standard error when forking
+task processes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1803">HADOOP-1803</a>.  Generalize build.xml to make files in all
+src/contrib/*/bin directories executable.<br />(stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1739">HADOOP-1739</a>.  Let OS always choose the tasktracker's umbilical
+port.  Also switch default address for umbilical connections to
+loopback.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1812">HADOOP-1812</a>. Let OS choose ports for IPC and RPC unit tests.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1825">HADOOP-1825</a>.  Create $HADOOP_PID_DIR when it does not exist.<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1425">HADOOP-1425</a>.  Replace uses of ToolBase with the Tool interface.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1569">HADOOP-1569</a>.  Reimplement DistCP to use the standard FileSystem/URI
+code in Hadoop so that you can copy from and to all of the supported file
+systems.<br />(Chris Douglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1018">HADOOP-1018</a>.  Improve documentation w.r.t handling of lost hearbeats between
+TaskTrackers and JobTracker.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1718">HADOOP-1718</a>.  Add ant targets for measuring code coverage with clover.<br />(simonwillnauer via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1592">HADOOP-1592</a>.  Log error messages to the client console when tasks
+fail.<br />(Amar Kamat via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1879">HADOOP-1879</a>.  Remove some unneeded casts.<br />(Nilay Vaish via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1878">HADOOP-1878</a>.  Add space between priority links on job details
+page.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-120">HADOOP-120</a>.  In ArrayWritable, prevent creation with null value
+class, and improve documentation.<br />(Cameron Pope via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1926">HADOOP-1926</a>. Add a random text writer example/benchmark so that we can
+benchmark compression codecs on random data.<br />(acmurthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1906">HADOOP-1906</a>. Warn the user if they have an obsolete madred-default.xml
+file in their configuration directory.<br />(acmurthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1971">HADOOP-1971</a>.  Warn when job does not specify a jar.<br />(enis via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1942">HADOOP-1942</a>. Increase the concurrency of transaction logging to
+edits log. Reduce the number of syncs by double-buffering the changes
+to the transaction log.<br />(Dhruba Borthakur)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2046">HADOOP-2046</a>.  Improve mapred javadoc.<br />(Arun C. Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2105">HADOOP-2105</a>.  Improve overview.html to clarify supported platforms,
+software pre-requisites for hadoop, how to install them on various
+platforms and a better general description of hadoop and it's utility.<br />(Jim Kellerman via acmurthy)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.14.4_-_2007-11-26_')">Release 0.14.4 - 2007-11-26
+</a></h3>
+<ul id="release_0.14.4_-_2007-11-26_">
+  <li><a href="javascript:toggleList('release_0.14.4_-_2007-11-26_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.14.4_-_2007-11-26_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2140">HADOOP-2140</a>.  Add missing Apache Licensing text at the front of several
+C and C++ files.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2169">HADOOP-2169</a>.  Fix the DT_SONAME field of libhdfs.so to set it to the
+correct value of 'libhdfs.so', currently it is set to the absolute path of
+libhdfs.so.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2001">HADOOP-2001</a>.  Make the job priority updates and job kills synchronized on
+the JobTracker. Deadlock was seen in the JobTracker because of the lack of
+this synchronization.<br />(Arun C Murthy via ddas)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.14.3_-_2007-10-19_')">Release 0.14.3 - 2007-10-19
+</a></h3>
+<ul id="release_0.14.3_-_2007-10-19_">
+  <li><a href="javascript:toggleList('release_0.14.3_-_2007-10-19_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.14.3_-_2007-10-19_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2053">HADOOP-2053</a>. Fixed a dangling reference to a memory buffer in the map
+output sorter.<br />(acmurthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2036">HADOOP-2036</a>. Fix a NullPointerException in JvmMetrics class.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2043">HADOOP-2043</a>. Release 0.14.2 was compiled with Java 1.6 rather than
+Java 1.5.<br />(cutting)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.14.2_-_2007-10-09_')">Release 0.14.2 - 2007-10-09
+</a></h3>
+<ul id="release_0.14.2_-_2007-10-09_">
+  <li><a href="javascript:toggleList('release_0.14.2_-_2007-10-09_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(9)
+    <ol id="release_0.14.2_-_2007-10-09_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1948">HADOOP-1948</a>. Removed spurious error message during block crc upgrade.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1862">HADOOP-1862</a>.  reduces are getting stuck trying to find map outputs.<br />(Arun C. Murthy via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1977">HADOOP-1977</a>. Fixed handling of ToolBase cli options in JobClient.<br />(enis via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1972">HADOOP-1972</a>.  Fix LzoCompressor to ensure the user has actually asked
+to finish compression.<br />(arun via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1970">HADOOP-1970</a>.  Fix deadlock in progress reporting in the task.<br />(Vivek
+Ratan via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1978">HADOOP-1978</a>.  Name-node removes edits.new after a successful startup.<br />(Konstantin Shvachko via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1955">HADOOP-1955</a>.  The Namenode tries to not pick the same source Datanode for
+a replication request if the earlier replication request for the same
+block and that source Datanode had failed.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1961">HADOOP-1961</a>.  The -get option to dfs-shell works when a single filename
+is specified.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1997">HADOOP-1997</a>.  TestCheckpoint closes the edits file after writing to it,
+otherwise the rename of this file on Windows fails.<br />(Konstantin Shvachko via dhruba)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.14.1_-_2007-09-04_')">Release 0.14.1 - 2007-09-04
+</a></h3>
+<ul id="release_0.14.1_-_2007-09-04_">
+  <li><a href="javascript:toggleList('release_0.14.1_-_2007-09-04_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.14.1_-_2007-09-04_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1740">HADOOP-1740</a>.  Fix null pointer exception in sorting map outputs.<br />(Devaraj
+Das via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1790">HADOOP-1790</a>.  Fix tasktracker to work correctly on multi-homed
+boxes.<br />(Torsten Curdt via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1798">HADOOP-1798</a>.  Fix jobtracker to correctly account for failed
+tasks.<br />(omalley via cutting)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.14.0_-_2007-08-17_')">Release 0.14.0 - 2007-08-17
+</a></h3>
+<ul id="release_0.14.0_-_2007-08-17_">
+  <li><a href="javascript:toggleList('release_0.14.0_-_2007-08-17_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(160)
+    <ol id="release_0.14.0_-_2007-08-17_._incompatible_changes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.
+CONFIG/API - dfs.block.size must now be a multiple of
+  io.byte.per.checksum, otherwise new files can not be written.
+LAYOUT - DFS layout version changed from -6 to -7, which will require an
+  upgrade from previous versions.
+PROTOCOL - Datanode RPC protocol version changed from 7 to 8.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1283">HADOOP-1283</a>
+API - deprecated file locking API.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-894">HADOOP-894</a>
+PROTOCOL - changed ClientProtocol to fetch parts of block locations.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1336">HADOOP-1336</a>
+CONFIG - Enable speculative execution by default.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1197">HADOOP-1197</a>
+API - deprecated method for Configuration.getObject, because
+  Configurations should only contain strings.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1343">HADOOP-1343</a>
+API - deprecate Configuration.set(String,Object) so that only strings are
+  put in Configrations.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1207">HADOOP-1207</a>
+CLI - Fix FsShell 'rm' command to continue when a non-existent file is
+  encountered.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1473">HADOOP-1473</a>
+CLI/API - Job, TIP, and Task id formats have changed and are now unique
+  across job tracker restarts.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1400">HADOOP-1400</a>
+API - JobClient constructor now takes a JobConf object instead of a
+  Configuration object.
+<p/>
+  NEW FEATURES and BUG FIXES
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1197">HADOOP-1197</a>.  In Configuration, deprecate getObject() and add
+getRaw(), which skips variable expansion.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1343">HADOOP-1343</a>.  In Configuration, deprecate set(String,Object) and
+implement Iterable.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1344">HADOOP-1344</a>.  Add RunningJob#getJobName().<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1342">HADOOP-1342</a>.  In aggregators, permit one to limit the number of
+unique values per key.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1340">HADOOP-1340</a>.  Set the replication factor of the MD5 file in the filecache
+to be the same as the replication factor of the original file.<br />(Dhruba Borthakur via tomwhite.)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1355">HADOOP-1355</a>.  Fix null pointer dereference in
+TaskLogAppender.append(LoggingEvent).<br />(Arun C Murthy via tomwhite.)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1357">HADOOP-1357</a>.  Fix CopyFiles to correctly avoid removing "/".<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-234">HADOOP-234</a>.  Add pipes facility, which permits writing MapReduce
+programs in C++.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1359">HADOOP-1359</a>.  Fix a potential NullPointerException in HDFS.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1364">HADOOP-1364</a>.  Fix inconsistent synchronization in SequenceFile.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1379">HADOOP-1379</a>.  Add findbugs target to build.xml.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1364">HADOOP-1364</a>.  Fix various inconsistent synchronization issues.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1393">HADOOP-1393</a>.  Remove a potential unexpected negative number from
+uses of random number generator.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1387">HADOOP-1387</a>.  A number of "performance" code-cleanups suggested
+by findbugs.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1401">HADOOP-1401</a>.  Add contrib/hbase javadoc to tree.<br />(stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-894">HADOOP-894</a>.  Change HDFS so that the client only retrieves a limited
+number of block locations per request from the namenode.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1406">HADOOP-1406</a>.  Plug a leak in MapReduce's use of metrics.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1394">HADOOP-1394</a>.  Implement "performance" code-cleanups in HDFS
+suggested by findbugs.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1413">HADOOP-1413</a>.  Add example program that uses Knuth's dancing links
+algorithm to solve pentomino problems.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1226">HADOOP-1226</a>.  Change HDFS so that paths it returns are always
+fully qualified.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-800">HADOOP-800</a>.  Improvements to HDFS web-based file browser.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1408">HADOOP-1408</a>.  Fix a compiler warning by adding a class to replace
+a generic.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1376">HADOOP-1376</a>.  Modify RandomWriter example so that it can generate
+data for the Terasort benchmark.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1429">HADOOP-1429</a>.  Stop logging exceptions during normal IPC server
+shutdown.<br />(stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1461">HADOOP-1461</a>.  Fix the synchronization of the task tracker to
+avoid lockups in job cleanup.<br />(Arun C Murthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1446">HADOOP-1446</a>.  Update the TaskTracker metrics while the task is
+running.<br />(Devaraj via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1414">HADOOP-1414</a>.  Fix a number of issues identified by FindBugs as
+"Bad Practice".<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1392">HADOOP-1392</a>.  Fix "correctness" bugs identified by FindBugs in
+fs and dfs packages.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1412">HADOOP-1412</a>.  Fix "dodgy" bugs identified by FindBugs in fs and
+io packages.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1261">HADOOP-1261</a>.  Remove redundant events from HDFS namenode's edit
+log when a datanode restarts.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1336">HADOOP-1336</a>.  Re-enable speculative execution by
+default.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1311">HADOOP-1311</a>.  Fix a bug in BytesWritable#set() where start offset
+was ignored.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1450">HADOOP-1450</a>.  Move checksumming closer to user code, so that
+checksums are created before data is stored in large buffers and
+verified after data is read from large buffers, to better catch
+memory errors.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1447">HADOOP-1447</a>.  Add support in contrib/data_join for text inputs.<br />(Senthil Subramanian via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1456">HADOOP-1456</a>.  Fix TestDecommission assertion failure by setting
+the namenode to ignore the load on datanodes while allocating
+replicas.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1396">HADOOP-1396</a>.  Fix FileNotFoundException on DFS block.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1467">HADOOP-1467</a>.  Remove redundant counters from WordCount example.<br />(Owen O'Malley via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1139">HADOOP-1139</a>.  Log HDFS block transitions at INFO level, to better
+enable diagnosis of problems.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1269">HADOOP-1269</a>.  Finer grained locking in HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1438">HADOOP-1438</a>.  Improve HDFS documentation, correcting typos and
+making images appear in PDF.  Also update copyright date for all
+docs.<br />(Luke Nezda via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1457">HADOOP-1457</a>.  Add counters for monitoring task assignments.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1472">HADOOP-1472</a>.  Fix so that timed-out tasks are counted as failures
+rather than as killed.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1234">HADOOP-1234</a>.  Fix a race condition in file cache that caused
+tasktracker to not be able to find cached files.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1482">HADOOP-1482</a>.  Fix secondary namenode to roll info port.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1300">HADOOP-1300</a>.  Improve removal of excess block replicas to be
+rack-aware.  Attempts are now made to keep replicas on more
+racks.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1417">HADOOP-1417</a>.  Disable a few FindBugs checks that generate a lot
+of spurious warnings.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1320">HADOOP-1320</a>.  Rewrite RandomWriter example to bypass reduce.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1449">HADOOP-1449</a>.  Add some examples to contrib/data_join.<br />(Senthil Subramanian via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1459">HADOOP-1459</a>.  Fix so that, in HDFS, getFileCacheHints() returns
+hostnames instead of IP addresses.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1493">HADOOP-1493</a>.  Permit specification of "java.library.path" system
+property in "mapred.child.java.opts" configuration property.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1372">HADOOP-1372</a>.  Use LocalDirAllocator for HDFS temporary block
+files, so that disk space, writability, etc. is considered.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1193">HADOOP-1193</a>.  Pool allocation of compression codecs.  This
+eliminates a memory leak that could cause OutOfMemoryException,
+and also substantially improves performance.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1492">HADOOP-1492</a>.  Fix a NullPointerException handling version
+mismatch during datanode registration.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1442">HADOOP-1442</a>.  Fix handling of zero-length input splits.<br />(Senthil Subramanian via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1444">HADOOP-1444</a>.  Fix HDFS block id generation to check pending
+blocks for duplicates.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1207">HADOOP-1207</a>.  Fix FsShell's 'rm' command to not stop when one of
+the named files does not exist.<br />(Tsz Wo Sze via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1475">HADOOP-1475</a>.  Clear tasktracker's file cache before it
+re-initializes, to avoid confusion.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1505">HADOOP-1505</a>.  Remove spurious stacktrace in ZlibFactory
+introduced in <a href="http://issues.apache.org/jira/browse/HADOOP-1093">HADOOP-1093</a>.<br />(Michael Stack via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1484">HADOOP-1484</a>.  Permit one to kill jobs from the web ui.  Note that
+this is disabled by default.  One must set
+"webinterface.private.actions" to enable this.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1003">HADOOP-1003</a>.  Remove flushing of namenode edit log from primary
+namenode lock, increasing namenode throughput.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1023">HADOOP-1023</a>.  Add links to searchable mail archives.<br />(tomwhite via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1504">HADOOP-1504</a>.  Fix terminate-hadoop-cluster script in contrib/ec2
+to only terminate Hadoop instances, and not other instances
+started by the same user.<br />(tomwhite via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1462">HADOOP-1462</a>.  Improve task progress reporting.  Progress reports
+are no longer blocking since i/o is performed in a separate
+thread.  Reporting during sorting and more is also more
+consistent.<br />(Vivek Ratan via cutting)</li>
+      <li>[ intentionally blank ]
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1453">HADOOP-1453</a>.  Remove some unneeded calls to FileSystem#exists()
+when opening files, reducing the namenode load somewhat.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1489">HADOOP-1489</a>.  Fix text input truncation bug due to mark/reset.
+Add a unittest.<br />(Bwolen Yang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1455">HADOOP-1455</a>.  Permit specification of arbitrary job options on
+pipes command line.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1501">HADOOP-1501</a>.  Better randomize sending of block reports to
+namenode, so reduce load spikes.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1147">HADOOP-1147</a>.  Remove @author tags from Java source files.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1283">HADOOP-1283</a>.  Convert most uses of UTF8 in the namenode to be
+String.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1511">HADOOP-1511</a>.  Speedup hbase unit tests.<br />(stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1517">HADOOP-1517</a>.  Remove some synchronization in namenode to permit
+finer grained locking previously added.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1512">HADOOP-1512</a>.  Fix failing TestTextInputFormat on Windows.<br />(Senthil Subramanian via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1518">HADOOP-1518</a>.  Add a session id to job metrics, for use by HOD.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1292">HADOOP-1292</a>.  Change 'bin/hadoop fs -get' to first copy files to
+a temporary name, then rename them to their final name, so that
+failures don't leave partial files.<br />(Tsz Wo Sze via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1377">HADOOP-1377</a>.  Add support for modification time to FileSystem and
+implement in HDFS and local implementations.  Also, alter access
+to file properties to be through a new FileStatus interface.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1515">HADOOP-1515</a>.  Add MultiFileInputFormat, which can pack multiple,
+typically small, input files into each split.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1514">HADOOP-1514</a>.  Make reducers report progress while waiting for map
+outputs, so they're not killed.<br />(Vivek Ratan via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1508">HADOOP-1508</a>.  Add an Ant task for FsShell operations.  Also add
+new FsShell commands "touchz", "test" and "stat".<br />(Chris Douglas via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1028">HADOOP-1028</a>.  Add log messages for server startup and shutdown.<br />(Tsz Wo Sze via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1485">HADOOP-1485</a>.  Add metrics for monitoring shuffle.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1536">HADOOP-1536</a>.  Remove file locks from libhdfs tests.<br />(Dhruba Borthakur via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1520">HADOOP-1520</a>.  Add appropriate synchronization to FSEditsLog.<br />(Dhruba Borthakur via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1513">HADOOP-1513</a>.  Fix a race condition in directory creation.<br />(Devaraj via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1546">HADOOP-1546</a>.  Remove spurious column from HDFS web UI.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1556">HADOOP-1556</a>.  Make LocalJobRunner delete working files at end of
+job run.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1571">HADOOP-1571</a>.  Add contrib lib directories to root build.xml
+javadoc classpath.<br />(Michael Stack via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1554">HADOOP-1554</a>.  Log killed tasks to the job history and display them on the
+web/ui.<br />(Devaraj Das via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1533">HADOOP-1533</a>.  Add persistent error logging for distcp. The logs are stored
+    into a specified hdfs directory.<br />(Senthil Subramanian via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1286">HADOOP-1286</a>.  Add support to HDFS for distributed upgrades, which
+permits coordinated upgrade of datanode data.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1580">HADOOP-1580</a>.  Improve contrib/streaming so that subprocess exit
+status is displayed for errors.<br />(John Heidemann via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1448">HADOOP-1448</a>.  In HDFS, randomize lists of non-local block
+locations returned to client, so that load is better balanced.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1578">HADOOP-1578</a>.  Fix datanode to send its storage id to namenode
+during registration.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1584">HADOOP-1584</a>.  Fix a bug in GenericWritable which limited it to
+128 types instead of 256.<br />(Espen Amble Kolstad via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1473">HADOOP-1473</a>.  Make job ids unique across jobtracker restarts.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1582">HADOOP-1582</a>.  Fix hdfslib to return 0 instead of -1 at
+end-of-file, per C conventions.<br />(Christian Kunz via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-911">HADOOP-911</a>.  Fix a multithreading bug in libhdfs.<br />(Christian Kunz)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1486">HADOOP-1486</a>.  Fix so that fatal exceptions in namenode cause it
+to exit.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1470">HADOOP-1470</a>.  Factor checksum generation and validation out of
+ChecksumFileSystem so that it can be reused by FileSystem's with
+built-in checksumming.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1590">HADOOP-1590</a>.  Use relative urls in jobtracker jsp pages, so that
+webapp can be used in non-root contexts.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1596">HADOOP-1596</a>.  Fix the parsing of taskids by streaming and improve the
+error reporting.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1535">HADOOP-1535</a>.  Fix the user-controlled grouping to the reduce function.<br />(Vivek Ratan via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1585">HADOOP-1585</a>.  Modify GenericWritable to declare the classes as subtypes
+of Writable<br />(Espen Amble Kolstad via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1576">HADOOP-1576</a>.  Fix errors in count of completed tasks when
+speculative execution is enabled.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1598">HADOOP-1598</a>.  Fix license headers: adding missing; updating old.<br />(Enis Soztutar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1547">HADOOP-1547</a>.  Provide examples for aggregate library.<br />(Runping Qi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1570">HADOOP-1570</a>.  Permit jobs to enable and disable the use of
+hadoop's native library.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1433">HADOOP-1433</a>.  Add job priority.<br />(Johan Oskarsson via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1597">HADOOP-1597</a>.  Add status reports and post-upgrade options to HDFS
+distributed upgrade.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1524">HADOOP-1524</a>.  Permit user task logs to appear as they're
+created.<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1599">HADOOP-1599</a>.  Fix distcp bug on Windows.<br />(Senthil Subramanian via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1562">HADOOP-1562</a>.  Add JVM metrics, including GC and logging stats.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1613">HADOOP-1613</a>.  Fix "DFS Health" page to display correct time of
+last contact.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.  Add optimized checksum support to HDFS.  Checksums
+are now stored with each block, rather than as parallel files.
+This reduces the namenode's memory requirements and increases
+data integrity.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1400">HADOOP-1400</a>.  Make JobClient retry requests, so that clients can
+survive jobtracker problems.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1564">HADOOP-1564</a>.  Add unit tests for HDFS block-level checksums.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1620">HADOOP-1620</a>.  Reduce the number of abstract FileSystem methods,
+simplifying implementations.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1625">HADOOP-1625</a>.  Fix a "could not move files" exception in datanode.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1624">HADOOP-1624</a>.  Fix an infinite loop in datanode.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1084">HADOOP-1084</a>.  Switch mapred file cache to use file modification
+time instead of checksum to detect file changes, as checksums are
+no longer easily accessed.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1623">HADOOP-1623</a>.  Fix an infinite loop when copying directories.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1603">HADOOP-1603</a>.  Fix a bug in namenode initialization where
+default replication is sometimes reset to one on restart.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1635">HADOOP-1635</a>.  Remove hardcoded keypair name and fix launch-hadoop-cluster
+to support later versions of ec2-api-tools.<br />(Stu Hood via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1638">HADOOP-1638</a>.  Fix contrib EC2 scripts to support NAT addressing.<br />(Stu Hood via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1632">HADOOP-1632</a>.  Fix an IllegalArgumentException in fsck.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1619">HADOOP-1619</a>.  Fix FSInputChecker to not attempt to read past EOF.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1640">HADOOP-1640</a>.  Fix TestDecommission on Windows.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1587">HADOOP-1587</a>.  Fix TestSymLink to get required system properties.<br />(Devaraj Das via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1628">HADOOP-1628</a>.  Add block CRC protocol unit tests.<br />(Raghu Angadi via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1653">HADOOP-1653</a>.  FSDirectory code-cleanups. FSDirectory.INode
+becomes a static class.<br />(Christophe Taton via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1066">HADOOP-1066</a>.  Restructure documentation to make more user
+friendly.<br />(Connie Kleinjans and Jeff Hammerbacher via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1551">HADOOP-1551</a>.  libhdfs supports setting replication factor and
+retrieving modification time of files.<br />(Sameer Paranjpye via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1647">HADOOP-1647</a>.  FileSystem.getFileStatus returns valid values for "/".<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1657">HADOOP-1657</a>.  Fix NNBench to ensure that the block size is a
+multiple of bytes.per.checksum.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1553">HADOOP-1553</a>.  Replace user task output and log capture code to use shell
+redirection instead of copier threads in the TaskTracker. Capping the
+size of the output is now done via tail in memory and thus should not be
+large. The output of the tasklog servlet is not forced into UTF8 and is
+not buffered entirely in memory. (omalley)
+Configuration changes to hadoop-default.xml:
+  remove mapred.userlog.num.splits
+  remove mapred.userlog.purge.splits
+  change default mapred.userlog.limit.kb to 0 (no limit)
+  change default mapred.userlog.retain.hours to 24
+Configuration changes to log4j.properties:
+  remove log4j.appender.TLA.noKeepSplits
+  remove log4j.appender.TLA.purgeLogSplits
+  remove log4j.appender.TLA.logsRetainHours
+URL changes:
+  http://&lt;tasktracker&gt;/tasklog.jsp -&gt; http://&lt;tasktracker&gt;tasklog with
+    parameters limited to start and end, which may be positive (from
+    start) or negative (from end).
+Environment:
+  require bash (v2 or later) and tail
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1659">HADOOP-1659</a>.  Fix a job id/job name mixup.<br />(Arun C. Murthy via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1665">HADOOP-1665</a>.  With HDFS Trash enabled and the same file was created
+and deleted more than once, the suceeding deletions creates Trash item
+names suffixed with a integer.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1666">HADOOP-1666</a>.  FsShell object can be used for multiple fs commands.<br />(Dhruba Borthakur via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1654">HADOOP-1654</a>.  Remove performance regression introduced by Block CRC.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1680">HADOOP-1680</a>.  Improvements to Block CRC upgrade messages.<br />(Raghu Angadi via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-71">HADOOP-71</a>.  Allow Text and SequenceFile Map/Reduce inputs from non-default
+filesystems.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1568">HADOOP-1568</a>.  Expose HDFS as xml/http filesystem to provide cross-version
+compatability.<br />(Chris Douglas via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1668">HADOOP-1668</a>.  Added an INCOMPATIBILITY section to CHANGES.txt.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1629">HADOOP-1629</a>.  Added a upgrade test for <a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.<br />(Raghu Angadi via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1698">HADOOP-1698</a>.  Fix performance problems on map output sorting for jobs
+with large numbers of reduces.<br />(Devaraj Das via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1716">HADOOP-1716</a>.  Fix a Pipes wordcount example to remove the 'file:'
+schema from its output path.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1714">HADOOP-1714</a>.  Fix TestDFSUpgradeFromImage to work on Windows.<br />(Raghu Angadi via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1663">HADOOP-1663</a>.  Return a non-zero exit code if streaming fails.<br />(Lohit Renu
+via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1712">HADOOP-1712</a>.  Fix an unhandled exception on datanode during block
+CRC upgrade.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1717">HADOOP-1717</a>.  Fix TestDFSUpgradeFromImage to work on Solaris.<br />(nigel via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1437">HADOOP-1437</a>.  Add Eclipse plugin in contrib.<br />(Eugene Hung and Christophe Taton via cutting)</li>
+    </ol>
+  </li>
+</ul>
+<h3><a href="javascript:toggleList('release_0.13.0_-_2007-06-08_')">Release 0.13.0 - 2007-06-08
+</a></h3>
+    <ol id="release_0.13.0_-_2007-06-08_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1047">HADOOP-1047</a>.  Fix TestReplication to succeed more reliably.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1063">HADOOP-1063</a>.  Fix a race condition in MiniDFSCluster test code.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1101">HADOOP-1101</a>.  In web ui, split shuffle statistics from reduce
+statistics, and add some task averages.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1071">HADOOP-1071</a>.  Improve handling of protocol version mismatch in
+JobTracker.<br />(Tahir Hashmi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1116">HADOOP-1116</a>.  Increase heap size used for contrib unit tests.<br />(Philippe Gassmann via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1120">HADOOP-1120</a>.  Add contrib/data_join, tools to simplify joining
+data from multiple sources using MapReduce.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1064">HADOOP-1064</a>.  Reduce log level of some DFSClient messages.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1137">HADOOP-1137</a>.  Fix StatusHttpServer to work correctly when
+resources are in a jar file.<br />(Benjamin Reed via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1094">HADOOP-1094</a>.  Optimize generated Writable implementations for
+records to not allocate a new BinaryOutputArchive or
+BinaryInputArchive per call.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1068">HADOOP-1068</a>.  Improve error message for clusters with 0 datanodes.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1122">HADOOP-1122</a>.  Fix divide-by-zero exception in FSNamesystem
+chooseTarget method.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1131">HADOOP-1131</a>.  Add a closeAll() static method to FileSystem.<br />(Philippe Gassmann via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1085">HADOOP-1085</a>.  Improve port selection in HDFS and MapReduce test
+code.  Ports are now selected by the OS during testing rather than
+by probing for free ports, improving test reliability.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1153">HADOOP-1153</a>.  Fix HDFS daemons to correctly stop their threads.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1146">HADOOP-1146</a>.  Add a counter for reduce input keys and rename the
+"reduce input records" counter to be "reduce input groups".<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1165">HADOOP-1165</a>.  In records, replace idential generated toString
+methods with a method on the base class.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1164">HADOOP-1164</a>.  Fix TestReplicationPolicy to specify port zero, so
+that a free port is automatically selected.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1166">HADOOP-1166</a>.  Add a NullOutputFormat and use it in the
+RandomWriter example.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1169">HADOOP-1169</a>.  Fix a cut/paste error in CopyFiles utility so that
+S3-based source files are correctly copied.<br />(Michael Stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1167">HADOOP-1167</a>.  Remove extra synchronization in InMemoryFileSystem.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1110">HADOOP-1110</a>.  Fix an off-by-one error counting map inputs.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1178">HADOOP-1178</a>.  Fix a NullPointerException during namenode startup.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1011">HADOOP-1011</a>.  Fix a ConcurrentModificationException when viewing
+job history.<br />(Tahir Hashmi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-672">HADOOP-672</a>.  Improve help for fs shell commands.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1170">HADOOP-1170</a>.  Improve datanode performance by removing device
+checks from common operations.<br />(Igor Bolotin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1090">HADOOP-1090</a>.  Fix SortValidator's detection of whether the input
+file belongs to the sort-input or sort-output directory.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1081">HADOOP-1081</a>.  Fix bin/hadoop on Darwin.<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1045">HADOOP-1045</a>.  Add contrib/hbase, a BigTable-like online database.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1156">HADOOP-1156</a>.  Fix a NullPointerException in MiniDFSCluster.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-702">HADOOP-702</a>.  Add tools to help automate HDFS upgrades.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1163">HADOOP-1163</a>.  Fix ganglia metrics to aggregate metrics from different
+hosts properly.<br />(Michael Bieniosek via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1194">HADOOP-1194</a>.  Make compression style record level for map output
+compression.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1187">HADOOP-1187</a>.  Improve DFS Scalability: avoid scanning entire list of
+datanodes in getAdditionalBlocks.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1133">HADOOP-1133</a>.  Add tool to analyze and debug namenode on a production
+cluster.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1151">HADOOP-1151</a>.  Remove spurious printing to stderr in streaming
+PipeMapRed.<br />(Koji Noguchi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-988">HADOOP-988</a>.  Change namenode to use a single map of blocks to metadata.<br />(Raghu Angadi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1203">HADOOP-1203</a>.  Change UpgradeUtilities used by DFS tests to use
+MiniDFSCluster to start and stop NameNode/DataNodes.<br />(Nigel Daley via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1217">HADOOP-1217</a>.  Add test.timeout property to build.xml, so that
+long-running unit tests may be automatically terminated.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1149">HADOOP-1149</a>.  Improve DFS Scalability: make
+processOverReplicatedBlock() a no-op if blocks are not
+over-replicated.<br />(Raghu Angadi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1149">HADOOP-1149</a>.  Improve DFS Scalability: optimize getDistance(),
+contains(), and isOnSameRack() in NetworkTopology.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1218">HADOOP-1218</a>.  Make synchronization on TaskTracker's RunningJob
+object consistent.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1219">HADOOP-1219</a>.  Ignore progress report once a task has reported as
+'done'.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1114">HADOOP-1114</a>.  Permit user to specify additional CLASSPATH elements
+with a HADOOP_CLASSPATH environment variable.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1198">HADOOP-1198</a>.  Remove ipc.client.timeout parameter override from
+unit test configuration.  Using the default is more robust and
+has almost the same run time.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1211">HADOOP-1211</a>.  Remove deprecated constructor and unused static
+members in DataNode class.<br />(Konstantin Shvachko via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1136">HADOOP-1136</a>.  Fix ArrayIndexOutOfBoundsException in
+FSNamesystem$UnderReplicatedBlocks add() method.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-978">HADOOP-978</a>.  Add the client name and the address of the node that
+previously started to create the file to the description of
+AlreadyBeingCreatedException.<br />(Konstantin Shvachko via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1001">HADOOP-1001</a>.  Check the type of keys and values generated by the
+mapper against the types specified in JobConf.<br />(Tahir Hashmi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-971">HADOOP-971</a>.  Improve DFS Scalability: Improve name node performance
+by adding a hostname to datanodes map.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1189">HADOOP-1189</a>.  Fix 'No space left on device' exceptions on datanodes.<br />(Raghu Angadi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-819">HADOOP-819</a>.  Change LineRecordWriter to not insert a tab between
+key and value when either is null, and to print nothing when both
+are null.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1204">HADOOP-1204</a>.  Rename InputFormatBase to be FileInputFormat, and
+deprecate InputFormatBase.  Also make LineRecordReader easier to
+extend.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1213">HADOOP-1213</a>.  Improve logging of errors by IPC server, to
+consistently include the service name and the call.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1238">HADOOP-1238</a>.  Fix metrics reporting by TaskTracker to correctly
+track maps_running and reduces_running.<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1093">HADOOP-1093</a>.  Fix a race condition in HDFS where blocks were
+sometimes erased before they were reported written.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1239">HADOOP-1239</a>.  Add a package name to some testjar test classes.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1241">HADOOP-1241</a>.  Fix NullPointerException in processReport when
+namenode is restarted.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1244">HADOOP-1244</a>.  Fix stop-dfs.sh to no longer incorrectly specify
+slaves file for stopping datanode.<br />(Michael Bieniosek via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1253">HADOOP-1253</a>.  Fix ConcurrentModificationException and
+NullPointerException in JobControl.<br />(Johan Oskarson via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1256">HADOOP-1256</a>.  Fix NameNode so that multiple DataNodeDescriptors
+can no longer be created on startup.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1214">HADOOP-1214</a>.  Replace streaming classes with new counterparts
+from Hadoop core.<br />(Runping Qi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1250">HADOOP-1250</a>.  Move a chmod utility from streaming to FileUtil.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1258">HADOOP-1258</a>.  Fix TestCheckpoint test case to wait for
+MiniDFSCluster to be active.<br />(Nigel Daley via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1148">HADOOP-1148</a>.  Re-indent all Java source code to consistently use
+two spaces per indent level.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1251">HADOOP-1251</a>.  Add a method to Reporter to get the map InputSplit.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1224">HADOOP-1224</a>.  Fix "Browse the filesystem" link to no longer point
+to dead datanodes.<br />(Enis Soztutar via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1154">HADOOP-1154</a>.  Fail a streaming task if the threads reading from or
+writing to the streaming process fail.<br />(Koji Noguchi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-968">HADOOP-968</a>.  Move shuffle and sort to run in reduce's child JVM,
+rather than in TaskTracker.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1111">HADOOP-1111</a>.  Add support for client notification of job
+completion. If the job configuration has a job.end.notification.url
+property it will make a HTTP GET request to the specified URL.
+The number of retries and the interval between retries is also
+configurable.<br />(Alejandro Abdelnur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1275">HADOOP-1275</a>.  Fix misspelled job notification property in
+hadoop-default.xml.<br />(Alejandro Abdelnur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1152">HADOOP-1152</a>.  Fix race condition in MapOutputCopier.copyOutput file
+rename causing possible reduce task hang.<br />(Tahir Hashmi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1050">HADOOP-1050</a>.  Distinguish between failed and killed tasks so as to
+not count a lost tasktracker against the job.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1271">HADOOP-1271</a>.  Fix StreamBaseRecordReader to be able to log record
+data that's not UTF-8.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1190">HADOOP-1190</a>.  Fix unchecked warnings in main Hadoop code.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1127">HADOOP-1127</a>.  Fix AlreadyBeingCreatedException in namenode for
+jobs run with speculative execution.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1282">HADOOP-1282</a>.  Omnibus HBase patch.  Improved tests &amp; configuration.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1262">HADOOP-1262</a>.  Make dfs client try to read from a different replica
+of the checksum file when a checksum error is detected.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1279">HADOOP-1279</a>.  Fix JobTracker to maintain list of recently
+completed jobs by order of completion, not submission.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1284">HADOOP-1284</a>.  In contrib/streaming, permit flexible specification
+of field delimiter and fields for partitioning and sorting.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1176">HADOOP-1176</a>.  Fix a bug where reduce would hang when a map had
+more than 2GB of output for it.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1293">HADOOP-1293</a>.  Fix contrib/streaming to print more than the first
+twenty lines of standard error.<br />(Koji Noguchi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1297">HADOOP-1297</a>.  Fix datanode so that requests to remove blocks that
+do not exist no longer causes block reports to be re-sent every
+second.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1216">HADOOP-1216</a>.  Change MapReduce so that, when numReduceTasks is
+zero, map outputs are written directly as final output, skipping
+shuffle, sort and reduce.  Use this to implement reduce=NONE
+option in contrib/streaming.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1294">HADOOP-1294</a>.  Fix unchecked warnings in main Hadoop code under
+Java 6.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1299">HADOOP-1299</a>.  Fix so that RPC will restart after RPC.stopClient()
+has been called.<br />(Michael Stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1278">HADOOP-1278</a>.  Improve blacklisting of TaskTrackers by JobTracker,
+to reduce false positives.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1290">HADOOP-1290</a>.  Move contrib/abacus into mapred/lib/aggregate.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1272">HADOOP-1272</a>.  Extract inner classes from FSNamesystem into separate
+classes.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1247">HADOOP-1247</a>.  Add support to contrib/streaming for aggregate
+package, formerly called Abacus.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1061">HADOOP-1061</a>.  Fix bug in listing files in the S3 filesystem.
+NOTE: this change is not backwards compatible!  You should use the
+MigrationTool supplied to migrate existing S3 filesystem data to
+the new format.  Please backup your data first before upgrading
+(using 'hadoop distcp' for example).<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1304">HADOOP-1304</a>.  Make configurable the maximum number of task
+attempts before a job fails.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1308">HADOOP-1308</a>.  Use generics to restrict types when classes are
+passed as parameters to JobConf methods.<br />(Michael Bieniosek via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1312">HADOOP-1312</a>.  Fix a ConcurrentModificationException in NameNode
+that killed the heartbeat monitoring thread.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1315">HADOOP-1315</a>.  Clean up contrib/streaming, switching it to use core
+classes more and removing unused code.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-485">HADOOP-485</a>.  Allow a different comparator for grouping keys in
+calls to reduce.<br />(Tahir Hashmi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1322">HADOOP-1322</a>.  Fix TaskTracker blacklisting to work correctly in
+one- and two-node clusters.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1144">HADOOP-1144</a>.  Permit one to specify a maximum percentage of tasks
+that can fail before a job is aborted.  The default is zero.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1184">HADOOP-1184</a>.  Fix HDFS decomissioning to complete when the only
+copy of a block is on a decommissioned node.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1263">HADOOP-1263</a>.  Change DFSClient to retry certain namenode calls
+with a random, exponentially increasing backoff time, to avoid
+overloading the namenode on, e.g., job start.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1325">HADOOP-1325</a>.  First complete, functioning version of HBase.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1276">HADOOP-1276</a>.  Make tasktracker expiry interval configurable.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1326">HADOOP-1326</a>.  Change JobClient#RunJob() to return the job.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1270">HADOOP-1270</a>.  Randomize the fetch of map outputs, speeding the
+shuffle.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1200">HADOOP-1200</a>.  Restore disk checking lost in <a href="http://issues.apache.org/jira/browse/HADOOP-1170">HADOOP-1170</a>.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1252">HADOOP-1252</a>.  Changed MapReduce's allocation of local files to
+use round-robin among available devices, rather than a hashcode.
+More care is also taken to not allocate files on full or offline
+drives.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1324">HADOOP-1324</a>.  Change so that an FSError kills only the task that
+generates it rather than the entire task tracker.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1310">HADOOP-1310</a>.  Fix unchecked warnings in aggregate code.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1255">HADOOP-1255</a>.  Fix a bug where the namenode falls into an infinite
+loop trying to remove a dead node.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1160">HADOOP-1160</a>.  Fix DistributedFileSystem.close() to close the
+underlying FileSystem, correctly aborting files being written.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1341">HADOOP-1341</a>.  Fix intermittent failures in HBase unit tests
+caused by deadlock.<br />(Jim Kellerman via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1350">HADOOP-1350</a>.  Fix shuffle performance problem caused by forcing
+chunked encoding of map outputs.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1345">HADOOP-1345</a>.  Fix HDFS to correctly retry another replica when a
+checksum error is encountered.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1205">HADOOP-1205</a>.  Improve synchronization around HDFS block map.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1353">HADOOP-1353</a>.  Fix a potential NullPointerException in namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1354">HADOOP-1354</a>.  Fix a potential NullPointerException in FsShell.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1358">HADOOP-1358</a>.  Fix a potential bug when DFSClient calls skipBytes.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1356">HADOOP-1356</a>.  Fix a bug in ValueHistogram.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1363">HADOOP-1363</a>.  Fix locking bug in JobClient#waitForCompletion().<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1368">HADOOP-1368</a>.  Fix inconsistent synchronization in JobInProgress.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1369">HADOOP-1369</a>.  Fix inconsistent synchronization in TaskTracker.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1361">HADOOP-1361</a>.  Fix various calls to skipBytes() to check return
+value.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1388">HADOOP-1388</a>.  Fix a potential NullPointerException in web ui.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1385">HADOOP-1385</a>.  Fix MD5Hash#hashCode() to generally hash to more
+than 256 values.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1386">HADOOP-1386</a>.  Fix Path to not permit the empty string as a
+path, as this has lead to accidental file deletion.  Instead
+force applications to use "." to name the default directory.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1407">HADOOP-1407</a>.  Fix integer division bug in JobInProgress which
+meant failed tasks didn't cause the job to fail.<br />(Arun C Murthy via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1427">HADOOP-1427</a>.  Fix a typo that caused GzipCodec to incorrectly use
+a very small input buffer.<br />(Espen Amble Kolstad via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1435">HADOOP-1435</a>.  Fix globbing code to no longer use the empty string
+to indicate the default directory, per <a href="http://issues.apache.org/jira/browse/HADOOP-1386">HADOOP-1386</a>.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1411">HADOOP-1411</a>.  Make task retry framework handle
+AlreadyBeingCreatedException when wrapped as a RemoteException.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1242">HADOOP-1242</a>.  Improve handling of DFS upgrades.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1332">HADOOP-1332</a>.  Fix so that TaskTracker exits reliably during unit
+tests on Windows.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1431">HADOOP-1431</a>.  Fix so that sort progress reporting during map runs
+only while sorting, so that stuck maps are correctly terminated.<br />(Devaraj Das and Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1452">HADOOP-1452</a>.  Change TaskTracker.MapOutputServlet.doGet.totalRead
+to a long, permitting map outputs to exceed 2^31 bytes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1443">HADOOP-1443</a>.  Fix a bug opening zero-length files in HDFS.<br />(Konstantin Shvachko via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.12.3_-_2007-04-06_')">Release 0.12.3 - 2007-04-06
+</a></h3>
+    <ol id="release_0.12.3_-_2007-04-06_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1162">HADOOP-1162</a>.  Fix bug in record CSV and XML serialization of
+binary values.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1123">HADOOP-1123</a>.  Fix NullPointerException in LocalFileSystem when
+trying to recover from a checksum error.<br />(Hairong Kuang &amp; Nigel Daley via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1177">HADOOP-1177</a>.  Fix bug where IOException in MapOutputLocation.getFile
+was not being logged.<br />(Devaraj Das via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1175">HADOOP-1175</a>.  Fix bugs in JSP for displaying a task's log messages.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1191">HADOOP-1191</a>.  Fix map tasks to wait until sort progress thread has
+stopped before reporting the task done.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1192">HADOOP-1192</a>.  Fix an integer overflow bug in FSShell's 'dus'
+command and a performance problem in HDFS's implementation of it.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1105">HADOOP-1105</a>. Fix reducers to make "progress" while iterating
+through values.<br />(Devaraj Das &amp; Owen O'Malley via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1179">HADOOP-1179</a>. Make Task Tracker close index file as soon as the read
+is done when serving get-map-output requests.<br />(Devaraj Das via tomwhite)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.12.2_-_2007-23-17_')">Release 0.12.2 - 2007-23-17
+</a></h3>
+    <ol id="release_0.12.2_-_2007-23-17_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1135">HADOOP-1135</a>.  Fix bug in block report processing which may cause
+the namenode to delete blocks.<br />(Dhruba Borthakur via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1145">HADOOP-1145</a>.  Make XML serializer and deserializer classes public
+in record package.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1140">HADOOP-1140</a>.  Fix a deadlock in metrics.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1150">HADOOP-1150</a>.  Fix streaming -reducer and -mapper to give them
+defaults.<br />(Owen O'Malley via tomwhite)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.12.1_-_2007-03-17_')">Release 0.12.1 - 2007-03-17
+</a></h3>
+    <ol id="release_0.12.1_-_2007-03-17_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1035">HADOOP-1035</a>.  Fix a StackOverflowError in FSDataSet.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1053">HADOOP-1053</a>.  Fix VInt representation of negative values.  Also
+remove references in generated record code to methods outside of
+the record package and improve some record documentation.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1067">HADOOP-1067</a>.  Compile fails if Checkstyle jar is present in lib
+directory. Also remove dependency on a particular Checkstyle
+version number.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1060">HADOOP-1060</a>.  Fix an IndexOutOfBoundsException in the JobTracker
+that could cause jobs to hang.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1077">HADOOP-1077</a>.  Fix a race condition fetching map outputs that could
+hang reduces.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1083">HADOOP-1083</a>.  Fix so that when a cluster restarts with a missing
+datanode, its blocks are replicated.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1082">HADOOP-1082</a>.  Fix a NullPointerException in ChecksumFileSystem.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1088">HADOOP-1088</a>.  Fix record serialization of negative values.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1080">HADOOP-1080</a>.  Fix bug in bin/hadoop on Windows when native
+libraries are present.<br />(ab via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1091">HADOOP-1091</a>.  Fix a NullPointerException in MetricsRecord.<br />(David Bowen via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1092">HADOOP-1092</a>.  Fix a NullPointerException in HeartbeatMonitor
+thread.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1112">HADOOP-1112</a>.  Fix a race condition in Hadoop metrics.<br />(David Bowen via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1108">HADOOP-1108</a>.  Checksummed file system should retry reading if a
+different replica is found when handling ChecksumException.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1070">HADOOP-1070</a>.  Fix a problem with number of racks and datanodes
+temporarily doubling.<br />(Konstantin Shvachko via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1099">HADOOP-1099</a>.  Fix NullPointerException in JobInProgress.<br />(Gautam Kowshik via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1115">HADOOP-1115</a>.  Fix bug where FsShell copyToLocal doesn't
+copy directories.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1109">HADOOP-1109</a>.  Fix NullPointerException in StreamInputFormat.<br />(Koji Noguchi via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1117">HADOOP-1117</a>.  Fix DFS scalability: when the namenode is
+restarted it consumes 80% CPU.<br />(Dhruba Borthakur via
+tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1089">HADOOP-1089</a>.  Make the C++ version of write and read v-int
+agree with the Java versions.<br />(Milind Bhandarkar via
+tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1096">HADOOP-1096</a>.  Rename InputArchive and OutputArchive and
+make them public.<br />(Milind Bhandarkar via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1128">HADOOP-1128</a>.  Fix missing progress information in map tasks.<br />(Espen Amble Kolstad, Andrzej Bialecki, and Owen O'Malley
+via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1129">HADOOP-1129</a>.  Fix DFSClient to not hide IOExceptions in
+flush method.<br />(Hairong Kuang via tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1126">HADOOP-1126</a>.  Optimize CPU usage for under replicated blocks
+when cluster restarts.<br />(Hairong Kuang via tomwhite)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.12.0_-_2007-03-02_')">Release 0.12.0 - 2007-03-02
+</a></h3>
+    <ol id="release_0.12.0_-_2007-03-02_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-975">HADOOP-975</a>.  Separate stdout and stderr from tasks.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-982">HADOOP-982</a>.  Add some setters and a toString() method to
+BytesWritable.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-858">HADOOP-858</a>.  Move contrib/smallJobsBenchmark to src/test, removing
+obsolete bits.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-992">HADOOP-992</a>.  Fix MiniMR unit tests to use MiniDFS when specified,
+rather than the local FS.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-954">HADOOP-954</a>.  Change use of metrics to use callback mechanism.
+Also rename utility class Metrics to MetricsUtil.<br />(David Bowen &amp; Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-893">HADOOP-893</a>.  Improve HDFS client's handling of dead datanodes.
+The set is no longer reset with each block, but rather is now
+maintained for the life of an open file.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-882">HADOOP-882</a>.  Upgrade to jets3t version 0.5, used by the S3
+FileSystem.  This version supports retries.<br />(Michael Stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-977">HADOOP-977</a>.  Send task's stdout and stderr to JobClient's stdout
+and stderr respectively, with each line tagged by the task's name.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-761">HADOOP-761</a>.  Change unit tests to not use /tmp.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1007">HADOOP-1007</a>. Make names of metrics used in Hadoop unique.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-491">HADOOP-491</a>.  Change mapred.task.timeout to be per-job, and make a
+value of zero mean no timeout.  Also change contrib/streaming to
+disable task timeouts.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1010">HADOOP-1010</a>.  Add Reporter.NULL, a Reporter implementation that
+does nothing.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-923">HADOOP-923</a>.  In HDFS NameNode, move replication computation to a
+separate thread, to improve heartbeat processing time.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-476">HADOOP-476</a>.  Rewrite contrib/streaming command-line processing,
+improving parameter validation.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-973">HADOOP-973</a>.  Improve error messages in Namenode.  This should help
+to track down a problem that was appearing as a
+NullPointerException.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-649">HADOOP-649</a>.  Fix so that jobs with no tasks are not lost.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-803">HADOOP-803</a>.  Reduce memory use by HDFS namenode, phase I.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1021">HADOOP-1021</a>.  Fix MRCaching-based unit tests on Windows.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-889">HADOOP-889</a>.  Remove duplicate code from HDFS unit tests.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-943">HADOOP-943</a>.  Improve HDFS's fsck command to display the filename
+for under-replicated blocks.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-333">HADOOP-333</a>.  Add validator for sort benchmark output.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-947">HADOOP-947</a>.  Improve performance of datanode decomissioning.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-442">HADOOP-442</a>.  Permit one to specify hosts allowed to connect to
+namenode and jobtracker with include and exclude files.<br />(Wendy
+Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1017">HADOOP-1017</a>.  Cache constructors, for improved performance.<br />(Ron Bodkin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-867">HADOOP-867</a>.  Move split creation out of JobTracker to client.
+Splits are now saved in a separate file, read by task processes
+directly, so that user code is no longer required in the
+JobTracker.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1006">HADOOP-1006</a>.  Remove obsolete '-local' option from test code.<br />(Gautam Kowshik via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-952">HADOOP-952</a>. Create a public (shared) Hadoop EC2 AMI.
+The EC2 scripts now support launch of public AMIs.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1025">HADOOP-1025</a>. Remove some obsolete code in ipc.Server.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-997">HADOOP-997</a>. Implement S3 retry mechanism for failed block
+transfers. This includes a generic retry mechanism for use
+elsewhere in Hadoop.<br />(tomwhite)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-990">HADOOP-990</a>.  Improve HDFS support for full datanode volumes.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-564">HADOOP-564</a>.  Replace uses of "dfs://" URIs with the more standard
+"hdfs://".<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1030">HADOOP-1030</a>.  In unit tests, unify setting of ipc.client.timeout.
+Also increase the value used from one to two seconds, in hopes of
+making tests complete more reliably.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-654">HADOOP-654</a>.  Stop assigning tasks to a tasktracker if it has
+failed more than a specified number in the job.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-985">HADOOP-985</a>.  Change HDFS to identify nodes by IP address rather
+than by DNS hostname.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-248">HADOOP-248</a>.  Optimize location of map outputs to not use random
+probes.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1029">HADOOP-1029</a>.  Fix streaming's input format to correctly seek to
+the start of splits.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-492">HADOOP-492</a>.  Add per-job and per-task counters.  These are
+incremented via the Reporter interface and available through the
+web ui and the JobClient API.  The mapreduce framework maintains a
+few basic counters, and applications may add their own.  Counters
+are also passed to the metrics system.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1034">HADOOP-1034</a>.  Fix datanode to better log exceptions.<br />(Philippe Gassmann via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-878">HADOOP-878</a>.  In contrib/streaming, fix reducer=NONE to work with
+multiple maps.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1039">HADOOP-1039</a>.  In HDFS's TestCheckpoint, avoid restarting
+MiniDFSCluster so often, speeding this test.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1040">HADOOP-1040</a>.  Update RandomWriter example to use counters and
+user-defined input and output formats.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1027">HADOOP-1027</a>.  Fix problems with in-memory merging during shuffle
+and re-enable this optimization.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1036">HADOOP-1036</a>.  Fix exception handling in TaskTracker to keep tasks
+from being lost.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1042">HADOOP-1042</a>.  Improve the handling of failed map output fetches.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-928">HADOOP-928</a>.  Make checksums optional per FileSystem.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1044">HADOOP-1044</a>.  Fix HDFS's TestDecommission to not spuriously fail.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-972">HADOOP-972</a>.  Optimize HDFS's rack-aware block placement algorithm.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1043">HADOOP-1043</a>.  Optimize shuffle, increasing parallelism.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-940">HADOOP-940</a>.  Improve HDFS's replication scheduling.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1020">HADOOP-1020</a>.  Fix a bug in Path resolution, and a with unit tests
+on Windows.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-941">HADOOP-941</a>.  Enhance record facility.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1000">HADOOP-1000</a>.  Fix so that log messages in task subprocesses are
+not written to a task's standard error.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1037">HADOOP-1037</a>.  Fix bin/slaves.sh, which currently only works with
+/bin/bash, to specify /bin/bash rather than /bin/sh.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1046">HADOOP-1046</a>. Clean up tmp from partially received stale block files.<br />(ab)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1041">HADOOP-1041</a>.  Optimize mapred counter implementation.  Also group
+counters by their declaring Enum.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1032">HADOOP-1032</a>.  Permit one to specify jars that will be cached
+across multiple jobs.<br />(Gautam Kowshik via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1051">HADOOP-1051</a>.  Add optional checkstyle task to build.xml.  To use
+this developers must download the (LGPL'd) checkstyle jar
+themselves.<br />(tomwhite via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1049">HADOOP-1049</a>.  Fix a race condition in IPC client.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1056">HADOOP-1056</a>.  Check HDFS include/exclude node lists with both IP
+address and hostname.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-994">HADOOP-994</a>.  In HDFS, limit the number of blocks invalidated at
+once.  Large lists were causing datenodes to timeout.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-432">HADOOP-432</a>.  Add a trash feature, disabled by default.  When
+enabled, the FSShell 'rm' command will move things to a trash
+directory in the filesystem.  In HDFS, a thread periodically
+checkpoints the trash and removes old checkpoints.<br />(cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.11.2_-_2007-02-16_')">Release 0.11.2 - 2007-02-16
+</a></h3>
+    <ol id="release_0.11.2_-_2007-02-16_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1009">HADOOP-1009</a>.  Fix an infinite loop in the HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1014">HADOOP-1014</a>.  Disable in-memory merging during shuffle, as this is
+causing data corruption.<br />(Devaraj Das via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.11.1_-_2007-02-09_')">Release 0.11.1 - 2007-02-09
+</a></h3>
+    <ol id="release_0.11.1_-_2007-02-09_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-976">HADOOP-976</a>.  Make SequenceFile.Metadata public.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-917">HADOOP-917</a>.  Fix a NullPointerException in SequenceFile's merger
+with large map outputs.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-984">HADOOP-984</a>.  Fix a bug in shuffle error handling introduced by
+<a href="http://issues.apache.org/jira/browse/HADOOP-331">HADOOP-331</a>.  If a map output is unavailable, the job tracker is
+once more informed.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-987">HADOOP-987</a>.  Fix a problem in HDFS where blocks were not removed
+from neededReplications after a replication target was selected.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.11.0_-_2007-02-02_')">Release 0.11.0 - 2007-02-02
+</a></h3>
+    <ol id="release_0.11.0_-_2007-02-02_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-781">HADOOP-781</a>.  Remove methods deprecated in 0.10 that are no longer
+widely used.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-842">HADOOP-842</a>.  Change HDFS protocol so that the open() method is
+passed the client hostname, to permit the namenode to order block
+locations on the basis of network topology.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-852">HADOOP-852</a>.  Add an ant task to compile record definitions, and
+use it to compile record unit tests.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-757">HADOOP-757</a>.  Fix "Bad File Descriptor" exception in HDFS client
+when an output file is closed twice.<br />(Raghu Angadi via cutting)</li>
+      <li>[ intentionally blank ]
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-890">HADOOP-890</a>.  Replace dashes in metric names with underscores,
+for better compatibility with some monitoring systems.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-801">HADOOP-801</a>.  Add to jobtracker a log of task completion events.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-855">HADOOP-855</a>.  In HDFS, try to repair files with checksum errors.
+An exception is still thrown, but corrupt blocks are now removed
+when they have replicas.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-886">HADOOP-886</a>.  Reduce number of timer threads created by metrics API
+by pooling contexts.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-897">HADOOP-897</a>.  Add a "javac.args" property to build.xml that permits
+one to pass arbitrary options to javac.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-899">HADOOP-899</a>.  Update libhdfs for changes in <a href="http://issues.apache.org/jira/browse/HADOOP-871">HADOOP-871</a>.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-905">HADOOP-905</a>.  Remove some dead code from JobClient.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-902">HADOOP-902</a>.  Fix a NullPointerException in HDFS client when
+closing output streams.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-735">HADOOP-735</a>.  Switch generated record code to use BytesWritable to
+represent fields of type 'buffer'.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-830">HADOOP-830</a>.  Improve mapreduce merge performance by buffering and
+merging multiple map outputs as they arrive at reduce nodes before
+they're written to disk.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-908">HADOOP-908</a>.  Add a new contrib package, Abacus, that simplifies
+counting and aggregation, built on MapReduce.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-901">HADOOP-901</a>.  Add support for recursive renaming to the S3 filesystem.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-912">HADOOP-912</a>.  Fix a bug in TaskTracker.isIdle() that was
+sporadically causing unit test failures.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-909">HADOOP-909</a>.  Fix the 'du' command to correctly compute the size of
+FileSystem directory trees.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-731">HADOOP-731</a>.  When a checksum error is encountered on a file stored
+in HDFS, try another replica of the data, if any.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-732">HADOOP-732</a>.  Add support to SequenceFile for arbitrary metadata,
+as a set of attribute value pairs.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-929">HADOOP-929</a>.  Fix PhasedFileSystem to pass configuration to
+underlying FileSystem.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-935">HADOOP-935</a>.  Fix contrib/abacus to not delete pre-existing output
+files, but rather to fail in this case.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-936">HADOOP-936</a>.  More metric renamings, as in <a href="http://issues.apache.org/jira/browse/HADOOP-890">HADOOP-890</a>.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-856">HADOOP-856</a>.  Fix HDFS's fsck command to not report that
+non-existent filesystems are healthy.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-602">HADOOP-602</a>.  Remove the dependency on Lucene's PriorityQueue
+utility, by copying it into Hadoop.  This facilitates using Hadoop
+with different versions of Lucene without worrying about CLASSPATH
+order.<br />(Milind Bhandarkar via cutting)</li>
+      <li>[ intentionally blank ]
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-227">HADOOP-227</a>.  Add support for backup namenodes, which periodically
+get snapshots of the namenode state.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-884">HADOOP-884</a>.  Add scripts in contrib/ec2 to facilitate running
+Hadoop on an Amazon's EC2 cluster.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-937">HADOOP-937</a>.  Change the namenode to request re-registration of
+datanodes in more circumstances.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-922">HADOOP-922</a>.  Optimize small forward seeks in HDFS.  If data is has
+likely already in flight, skip ahead rather than re-opening the
+block.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-961">HADOOP-961</a>.  Add a 'job -events' sub-command that prints job
+events, including task completions and failures.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-959">HADOOP-959</a>.  Fix namenode snapshot code added in <a href="http://issues.apache.org/jira/browse/HADOOP-227">HADOOP-227</a> to
+work on Windows.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-934">HADOOP-934</a>.  Fix TaskTracker to catch metrics exceptions that were
+causing heartbeats to fail.<br />(Arun Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-881">HADOOP-881</a>.  Fix JobTracker web interface to display the correct
+number of task failures.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-788">HADOOP-788</a>.  Change contrib/streaming to subclass TextInputFormat,
+permitting it to take advantage of native compression facilities.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-962">HADOOP-962</a>.  In contrib/ec2: make scripts executable in tar file;
+add a README; make the environment file use a template.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-549">HADOOP-549</a>.  Fix a NullPointerException in TaskReport's
+serialization.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-963">HADOOP-963</a>.  Fix remote exceptions to have the stack trace of the
+caller thread, not the IPC listener thread.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-967">HADOOP-967</a>.  Change RPC clients to start sending a version header.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-964">HADOOP-964</a>.  Fix a bug introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-830">HADOOP-830</a> where jobs failed
+whose comparators and/or i/o types were in the job's jar.<br />(Dennis Kubes via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-969">HADOOP-969</a>.  Fix a deadlock in JobTracker.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-862">HADOOP-862</a>.  Add support for the S3 FileSystem to the CopyFiles
+tool.<br />(Michael Stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-965">HADOOP-965</a>.  Fix IsolationRunner so that job's jar can be found.<br />(Dennis Kubes via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-309">HADOOP-309</a>.  Fix two NullPointerExceptions in StatusHttpServer.<br />(navychen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-692">HADOOP-692</a>.  Add rack awareness to HDFS's placement of blocks.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.10.1_-_2007-01-10_')">Release 0.10.1 - 2007-01-10
+</a></h3>
+    <ol id="release_0.10.1_-_2007-01-10_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-857">HADOOP-857</a>.  Fix S3 FileSystem implementation to permit its use
+for MapReduce input and output.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-863">HADOOP-863</a>.  Reduce logging verbosity introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-813">HADOOP-813</a>.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-815">HADOOP-815</a>.  Fix memory leaks in JobTracker.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-600">HADOOP-600</a>.  Fix a race condition in JobTracker.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-864">HADOOP-864</a>.  Fix 'bin/hadoop -jar' to operate correctly when
+hadoop.tmp.dir does not yet exist.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-866">HADOOP-866</a>.  Fix 'dfs -get' command to remove existing crc files,
+if any.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-871">HADOOP-871</a>.  Fix a bug in bin/hadoop setting JAVA_LIBRARY_PATH.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-868">HADOOP-868</a>.  Decrease the number of open files during map,
+respecting io.sort.fa ctor.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-865">HADOOP-865</a>.  Fix S3 FileSystem so that partially created files can
+be deleted.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-873">HADOOP-873</a>.	 Pass java.library.path correctly to child processes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-851">HADOOP-851</a>.  Add support for the LZO codec.  This is much faster
+than the default, zlib-based compression, but it is only available
+when the native library is built.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-880">HADOOP-880</a>.  Fix S3 FileSystem to remove directories.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-879">HADOOP-879</a>.  Fix InputFormatBase to handle output generated by
+MapFileOutputFormat.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-659">HADOOP-659</a>.  In HDFS, prioritize replication of blocks based on
+current replication level.  Blocks which are severely
+under-replicated should be further replicated before blocks which
+are less under-replicated.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-726">HADOOP-726</a>.  Deprecate FileSystem locking methods.  They are not
+currently usable.  Locking should eventually provided as an
+independent service.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-758">HADOOP-758</a>.  Fix exception handling during reduce so that root
+exceptions are not masked by exceptions in cleanups.<br />(Raghu Angadi via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.10.0_-_2007-01-05_')">Release 0.10.0 - 2007-01-05
+</a></h3>
+    <ol id="release_0.10.0_-_2007-01-05_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-763">HADOOP-763</a>. Change DFS namenode benchmark to not use MapReduce.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-777">HADOOP-777</a>. Use fully-qualified hostnames for tasktrackers and
+datanodes.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-621">HADOOP-621</a>. Change 'dfs -cat' to exit sooner when output has been
+closed.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-752">HADOOP-752</a>. Rationalize some synchronization in DFS namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-629">HADOOP-629</a>. Fix RPC services to better check the protocol name and
+version.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-774">HADOOP-774</a>. Limit the number of invalid blocks returned with
+heartbeats by the namenode to datanodes.  Transmitting and
+processing very large invalid block lists can tie up both the
+namenode and datanode for too long.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-738">HADOOP-738</a>. Change 'dfs -get' command to not create CRC files by
+default, adding a -crc option to force their creation.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-676">HADOOP-676</a>. Improved exceptions and error messages for common job
+input specification errors.<br />(Sanjay Dahiya via cutting)</li>
+      <li>[Included in 0.9.2 release]
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-756">HADOOP-756</a>. Add new dfsadmin option to wait for filesystem to be
+operational.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-770">HADOOP-770</a>. Fix jobtracker web interface to display, on restart,
+jobs that were running when it was last stopped.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-331">HADOOP-331</a>. Write all map outputs to a single file with an index,
+rather than to a separate file per reduce task.  This should both
+speed the shuffle and make things more scalable.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-818">HADOOP-818</a>. Fix contrib unit tests to not depend on core unit
+tests.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-786">HADOOP-786</a>. Log common exception at debug level.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-796">HADOOP-796</a>. Provide more convenient access to failed task
+information in the web interface.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-764">HADOOP-764</a>. Reduce memory allocations in namenode some.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-802">HADOOP-802</a>. Update description of mapred.speculative.execution to
+mention reduces.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-806">HADOOP-806</a>. Include link to datanodes on front page of namenode
+web interface.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-618">HADOOP-618</a>.  Make JobSubmissionProtocol public.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-782">HADOOP-782</a>.  Fully remove killed tasks.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-792">HADOOP-792</a>.  Fix 'dfs -mv' to return correct status.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-673">HADOOP-673</a>.  Give each task its own working directory again.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-571">HADOOP-571</a>.  Extend the syntax of Path to be a URI; to be
+optionally qualified with a scheme and authority.  The scheme
+determines the FileSystem implementation, while the authority
+determines the FileSystem instance.  New FileSystem
+implementations may be provided by defining an fs.&lt;scheme&gt;.impl
+property, naming the FileSystem implementation class.  This
+permits easy integration of new FileSystem implementations.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-720">HADOOP-720</a>.  Add an HDFS white paper to website.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-794">HADOOP-794</a>.  Fix a divide-by-zero exception when a job specifies
+zero map tasks.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-454">HADOOP-454</a>.  Add a 'dfs -dus' command that provides summary disk
+usage.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-574">HADOOP-574</a>.  Add an Amazon S3 implementation of FileSystem.  To
+use this, one need only specify paths of the form
+s3://id:secret@bucket/.  Alternately, the AWS access key id and
+secret can be specified in your config, with the properties
+fs.s3.awsAccessKeyId and fs.s3.awsSecretAccessKey.<br />(Tom White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-824">HADOOP-824</a>.  Rename DFSShell to be FsShell, since it applies
+generically to all FileSystem implementations.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-813">HADOOP-813</a>.  Fix map output sorting to report progress, so that
+sorts which take longer than the task timeout do not fail.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-825">HADOOP-825</a>.  Fix HDFS daemons when configured with new URI syntax.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-596">HADOOP-596</a>.  Fix a bug in phase reporting during reduce.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-811">HADOOP-811</a>.  Add a utility, MultithreadedMapRunner.<br />(Alejandro Abdelnur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-829">HADOOP-829</a>.  Within HDFS, clearly separate three different
+representations for datanodes: one for RPCs, one for
+namenode-internal use, and one for namespace persistence.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-823">HADOOP-823</a>.  Fix problem starting datanode when not all configured
+data directories exist.<br />(Bryan Pendleton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-451">HADOOP-451</a>.  Add a Split interface.  CAUTION: This incompatibly
+changes the InputFormat and RecordReader interfaces.  Not only is
+FileSplit replaced with Split, but a FileSystem parameter is no
+longer passed in several methods, input validation has changed,
+etc.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-814">HADOOP-814</a>.  Optimize locking in namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-738">HADOOP-738</a>.  Change 'fs -put' and 'fs -get' commands to accept
+standard input and output, respectively.  Standard i/o is
+specified by a file named '-'.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-835">HADOOP-835</a>.  Fix a NullPointerException reading record-compressed
+SequenceFiles.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-836">HADOOP-836</a>.  Fix a MapReduce bug on Windows, where the wrong
+FileSystem was used.  Also add a static FileSystem.getLocal()
+method and better Path checking in HDFS, to help avoid such issues
+in the future.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-837">HADOOP-837</a>.  Improve RunJar utility to unpack jar file
+hadoop.tmp.dir, rather than the system temporary directory.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-841">HADOOP-841</a>.  Fix native library to build 32-bit version even when
+on a 64-bit host, if a 32-bit JVM is used.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-838">HADOOP-838</a>.  Fix tasktracker to pass java.library.path to
+sub-processes, so that libhadoop.a is found.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-844">HADOOP-844</a>.  Send metrics messages on a fixed-delay schedule
+instead of a fixed-rate schedule.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-849">HADOOP-849</a>.  Fix OutOfMemory exceptions in TaskTracker due to a
+file handle leak in SequenceFile.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-745">HADOOP-745</a>.  Fix a synchronization bug in the HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-850">HADOOP-850</a>.  Add Writable implementations for variable-length
+integers.<br />(ab via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-525">HADOOP-525</a>.  Add raw comparators to record types.  This greatly
+improves record sort performance.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-628">HADOOP-628</a>.  Fix a problem with 'fs -cat' command, where some
+characters were replaced with question marks.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-804">HADOOP-804</a>.  Reduce verbosity of MapReduce logging.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-853">HADOOP-853</a>.  Rename 'site' to 'docs', in preparation for inclusion
+in releases.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-371">HADOOP-371</a>.  Include contrib jars and site documentation in
+distributions.  Also add contrib and example documentation to
+distributed javadoc, in separate sections.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-846">HADOOP-846</a>.  Report progress during entire map, as sorting of
+intermediate outputs may happen at any time, potentially causing
+task timeouts.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-840">HADOOP-840</a>.  In task tracker, queue task cleanups and perform them
+in a separate thread.<br />(omalley &amp; Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-681">HADOOP-681</a>.  Add to HDFS the ability to decommission nodes.  This
+causes their blocks to be re-replicated on other nodes, so that
+they may be removed from a cluster.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-470">HADOOP-470</a>.  In HDFS web ui, list the datanodes containing each
+copy of a block.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-700">HADOOP-700</a>.  Change bin/hadoop to only include core jar file on
+classpath, not example, test, etc.  Also rename core jar to
+hadoop-${version}-core.jar so that it can be more easily
+identified.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-619">HADOOP-619</a>.  Extend InputFormatBase to accept individual files and
+glob patterns as MapReduce inputs, not just directories.  Also
+change contrib/streaming to use this.<br />(Sanjay Dahia via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.9.2_-_2006-12-15_')">Release 0.9.2 - 2006-12-15
+</a></h3>
+    <ol id="release_0.9.2_-_2006-12-15_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-639">HADOOP-639</a>. Restructure InterTrackerProtocol to make task
+accounting more reliable.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-827">HADOOP-827</a>. Turn off speculative execution by default, since it's
+currently broken.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-791">HADOOP-791</a>. Fix a deadlock in the task tracker.<br />(Mahadev Konar via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.9.1_-_2006-12-06_')">Release 0.9.1 - 2006-12-06
+</a></h3>
+    <ol id="release_0.9.1_-_2006-12-06_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-780">HADOOP-780</a>. Use ReflectionUtils to instantiate key and value
+objects.<br />(ab)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-779">HADOOP-779</a>. Fix contrib/streaming to work correctly with gzipped
+input files.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.9.0_-_2006-12-01_')">Release 0.9.0 - 2006-12-01
+</a></h3>
+    <ol id="release_0.9.0_-_2006-12-01_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-655">HADOOP-655</a>.  Remove most deprecated code.  A few deprecated things
+remain, notably UTF8 and some methods that are still required.
+Also cleaned up constructors for SequenceFile, MapFile, SetFile,
+and ArrayFile a bit.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-565">HADOOP-565</a>.  Upgrade to Jetty version 6.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-682">HADOOP-682</a>.  Fix DFS format command to work correctly when
+configured with a non-existent directory.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-645">HADOOP-645</a>.  Fix a bug in contrib/streaming when -reducer is NONE.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-687">HADOOP-687</a>.  Fix a classpath bug in bin/hadoop that blocked the
+servers from starting.<br />(Sameer Paranjpye via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-683">HADOOP-683</a>.  Remove a script dependency on bash, so it works with
+dash, the new default for /bin/sh on Ubuntu.<br />(James Todd via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-382">HADOOP-382</a>.  Extend unit tests to run multiple datanodes.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-604">HADOOP-604</a>.  Fix some synchronization issues and a
+NullPointerException in DFS datanode.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-459">HADOOP-459</a>.  Fix memory leaks and a host of other issues with
+libhdfs.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-694">HADOOP-694</a>.  Fix a NullPointerException in jobtracker.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-637">HADOOP-637</a>.  Fix a memory leak in the IPC server.  Direct buffers
+are not collected like normal buffers, and provided little
+advantage.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-696">HADOOP-696</a>.  Fix TestTextInputFormat unit test to not rely on the
+order of directory listings.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-611">HADOOP-611</a>.  Add support for iterator-based merging to
+SequenceFile.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-688">HADOOP-688</a>.  Move DFS administrative commands to a separate
+command named 'dfsadmin'.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-708">HADOOP-708</a>.  Fix test-libhdfs to return the correct status, so
+that failures will break the build.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-646">HADOOP-646</a>.  Fix namenode to handle edits files larger than 2GB.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-705">HADOOP-705</a>.  Fix a bug in the JobTracker when failed jobs were
+not completely cleaned up.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-613">HADOOP-613</a>.  Perform final merge while reducing.  This removes one
+sort pass over the data and should consequently significantly
+decrease overall processing time.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-661">HADOOP-661</a>.  Make each job's configuration visible through the web
+ui.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-489">HADOOP-489</a>.  In MapReduce, separate user logs from system logs.
+Each task's log output is now available through the web ui.<br />(Arun
+C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-712">HADOOP-712</a>.  Fix record io's xml serialization to correctly handle
+control-characters.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-668">HADOOP-668</a>.  Improvements to the web-based DFS browser.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-715">HADOOP-715</a>.  Fix build.xml so that test logs are written in build
+directory, rather than in CWD.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-538">HADOOP-538</a>.  Add support for building an optional native library,
+libhadoop.so, that improves the performance of zlib-based
+compression.  To build this, specify -Dcompile.native to Ant.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-610">HADOOP-610</a>.  Fix an problem when the DFS block size is configured
+to be smaller than the buffer size, typically only when debugging.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-695">HADOOP-695</a>.  Fix a NullPointerException in contrib/streaming.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-652">HADOOP-652</a>.  In DFS, when a file is deleted, the block count is
+now decremented.<br />(Vladimir Krokhmalyov via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-725">HADOOP-725</a>.  In DFS, optimize block placement algorithm,
+previously a performance bottleneck.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-723">HADOOP-723</a>.  In MapReduce, fix a race condition during the
+shuffle, which resulted in FileNotFoundExceptions.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-447">HADOOP-447</a>.  In DFS, fix getBlockSize(Path) to work with relative
+paths.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-733">HADOOP-733</a>.  Make exit codes in DFShell consistent and add a unit
+test.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-709">HADOOP-709</a>.  Fix contrib/streaming to work with commands that
+contain control characters.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-677">HADOOP-677</a>.  In IPC, permit a version header to be transmitted
+when connections are established.  This will permit us to change
+the format of IPC requests back-compatibly in subsequent releases.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-699">HADOOP-699</a>.  Fix DFS web interface so that filesystem browsing
+works correctly, using the right port number.  Also add support
+for sorting datanode list by various columns.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-76">HADOOP-76</a>.  Implement speculative reduce.  Now when a job is
+configured for speculative execution, both maps and reduces will
+execute speculatively.  Reduce outputs are written to temporary
+location and moved to the final location when reduce is complete.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-736">HADOOP-736</a>.  Roll back to Jetty 5.1.4, due to performance problems
+with Jetty 6.0.1.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-739">HADOOP-739</a>.  Fix TestIPC to use different port number, making it
+more reliable.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-749">HADOOP-749</a>.  Fix a NullPointerException in jobfailures.jsp.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-747">HADOOP-747</a>.  Fix record serialization to work correctly when
+records are embedded in Maps.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-698">HADOOP-698</a>.  Fix HDFS client not to retry the same datanode on
+read failures.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-689">HADOOP-689</a>. Add GenericWritable, to facilitate polymorphism in
+MapReduce, SequenceFile, etc.<br />(Feng Jiang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-430">HADOOP-430</a>.  Stop datanode's HTTP server when registration with
+namenode fails.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-750">HADOOP-750</a>.  Fix a potential race condition during mapreduce
+shuffle.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-728">HADOOP-728</a>.  Fix contrib/streaming-related issues, including
+'-reducer NONE'.<br />(Sanjay Dahiya via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.8.0_-_2006-11-03_')">Release 0.8.0 - 2006-11-03
+</a></h3>
+    <ol id="release_0.8.0_-_2006-11-03_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-477">HADOOP-477</a>.  Extend contrib/streaming to scan the PATH environment
+variables when resolving executable program names.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-583">HADOOP-583</a>.  In DFSClient, reduce the log level of re-connect
+attempts from 'info' to 'debug', so they are not normally shown.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-498">HADOOP-498</a>.  Re-implement DFS integrity checker to run server-side,
+for much improved performance.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-586">HADOOP-586</a>.  Use the jar name for otherwise un-named jobs.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-514">HADOOP-514</a>.  Make DFS heartbeat interval configurable.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-588">HADOOP-588</a>.  Fix logging and accounting of failed tasks.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-462">HADOOP-462</a>.  Improve command line parsing in DFSShell, so that
+incorrect numbers of arguments result in informative errors rather
+than ArrayOutOfBoundsException.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-561">HADOOP-561</a>.  Fix DFS so that one replica of each block is written
+locally, if possible.  This was the intent, but there as a bug.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-610">HADOOP-610</a>.  Fix TaskTracker to survive more exceptions, keeping
+tasks from becoming lost.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-625">HADOOP-625</a>.  Add a servlet to all http daemons that displays a
+stack dump, useful for debugging.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-554">HADOOP-554</a>.  Fix DFSShell to return -1 for errors.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-626">HADOOP-626</a>.  Correct the documentation in the NNBench example
+code, and also remove a mistaken call there.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-634">HADOOP-634</a>.  Add missing license to many files.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-627">HADOOP-627</a>.  Fix some synchronization problems in MiniMRCluster
+that sometimes caused unit tests to fail.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-563">HADOOP-563</a>.  Improve the NameNode's lease policy so that leases
+are held for one hour without renewal (instead of one minute).
+However another attempt to create the same file will still succeed
+if the lease has not been renewed within a minute.  This prevents
+communication or scheduling problems from causing a write to fail
+for up to an hour, barring some other process trying to create the
+same file.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-635">HADOOP-635</a>.  In DFSShell, permit specification of multiple files
+as the source for file copy and move commands.<br />(Dhruba Borthakur via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-641">HADOOP-641</a>.  Change NameNode to request a fresh block report from
+a re-discovered DataNode, so that no-longer-needed replications
+are stopped promptly.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-642">HADOOP-642</a>.  Change IPC client to specify an explicit connect
+timeout.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-638">HADOOP-638</a>.  Fix an unsynchronized access to TaskTracker's
+internal state.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-624">HADOOP-624</a>.  Fix servlet path to stop a Jetty warning on startup.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-578">HADOOP-578</a>.  Failed tasks are no longer placed at the end of the
+task queue.  This was originally done to work around other
+problems that have now been fixed.  Re-executing failed tasks
+sooner causes buggy jobs to fail faster.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-658">HADOOP-658</a>.  Update source file headers per Apache policy.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-636">HADOOP-636</a>.  Add MapFile &amp; ArrayFile constructors which accept a
+Progressable, and pass it down to SequenceFile.  This permits
+reduce tasks which use MapFile to still report progress while
+writing blocks to the filesystem.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-576">HADOOP-576</a>.  Enable contrib/streaming to use the file cache.  Also
+extend the cache to permit symbolic links to cached items, rather
+than local file copies.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-482">HADOOP-482</a>.  Fix unit tests to work when a cluster is running on
+the same machine, removing port conflicts.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-90">HADOOP-90</a>.  Permit dfs.name.dir to list multiple directories,
+where namenode data is to be replicated.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-651">HADOOP-651</a>.  Fix DFSCk to correctly pass parameters to the servlet
+on the namenode.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-553">HADOOP-553</a>.  Change main() routines of DataNode and NameNode to
+log exceptions rather than letting the JVM print them to standard
+error.  Also, change the hadoop-daemon.sh script to rotate
+standard i/o log files.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-399">HADOOP-399</a>.  Fix javadoc warnings.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-599">HADOOP-599</a>.  Fix web ui and command line to correctly report DFS
+filesystem size statistics.  Also improve web layout.<br />(Raghu Angadi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-660">HADOOP-660</a>.  Permit specification of junit test output format.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-663">HADOOP-663</a>.  Fix a few unit test issues.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-664">HADOOP-664</a>.  Cause entire build to fail if libhdfs tests fail.<br />(Nigel Daley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-633">HADOOP-633</a>.  Keep jobtracker from dying when job initialization
+throws exceptions.  Also improve exception handling in a few other
+places and add more informative thread names.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-669">HADOOP-669</a>.  Fix a problem introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-90">HADOOP-90</a> that can cause
+DFS to lose files.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-373">HADOOP-373</a>.  Consistently check the value returned by
+FileSystem.mkdirs().<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-670">HADOOP-670</a>.  Code cleanups in some DFS internals: use generic
+types, replace Vector with ArrayList, etc.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-647">HADOOP-647</a>.  Permit map outputs to use a different compression
+type than the job output.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-671">HADOOP-671</a>.  Fix file cache to check for pre-existence before
+creating .<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-665">HADOOP-665</a>.  Extend many DFSShell commands to accept multiple
+arguments.  Now commands like "ls", "rm", etc. will operate on
+multiple files.<br />(Dhruba Borthakur via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.7.2_-_2006-10-18_')">Release 0.7.2 - 2006-10-18
+</a></h3>
+    <ol id="release_0.7.2_-_2006-10-18_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-607">HADOOP-607</a>.  Fix a bug where classes included in job jars were not
+found by tasks.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-609">HADOOP-609</a>.  Add a unit test that checks that classes in job jars
+can be found by tasks.  Also modify unit tests to specify multiple
+local directories.<br />(Mahadev Konar via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.7.1_-_2006-10-11_')">Release 0.7.1 - 2006-10-11
+</a></h3>
+    <ol id="release_0.7.1_-_2006-10-11_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-593">HADOOP-593</a>.  Fix a NullPointerException in the JobTracker.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-592">HADOOP-592</a>.  Fix a NullPointerException in the IPC Server.  Also
+consistently log when stale calls are discarded.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-594">HADOOP-594</a>.  Increase the DFS safe-mode threshold from .95 to
+.999, so that nearly all blocks must be reported before filesystem
+modifications are permitted.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-598">HADOOP-598</a>.  Fix tasks to retry when reporting completion, so that
+a single RPC timeout won't fail a task.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-597">HADOOP-597</a>.  Fix TaskTracker to not discard map outputs for errors
+in transmitting them to reduce nodes.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.7.0_-_2006-10-06_')">Release 0.7.0 - 2006-10-06
+</a></h3>
+    <ol id="release_0.7.0_-_2006-10-06_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-243">HADOOP-243</a>.  Fix rounding in the display of task and job progress
+so that things are not shown to be 100% complete until they are in
+fact finished.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-438">HADOOP-438</a>.  Limit the length of absolute paths in DFS, since the
+file format used to store pathnames has some limitations.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-530">HADOOP-530</a>.  Improve error messages in SequenceFile when keys or
+values are of the wrong type.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-288">HADOOP-288</a>.  Add a file caching system and use it in MapReduce to
+cache job jar files on slave nodes.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-533">HADOOP-533</a>.  Fix unit test to not modify conf directory.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-527">HADOOP-527</a>.  Permit specification of the local address that various
+Hadoop daemons should bind to.<br />(Philippe Gassmann via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-542">HADOOP-542</a>.  Updates to contrib/streaming: reformatted source code,
+on-the-fly merge sort, a fix for <a href="http://issues.apache.org/jira/browse/HADOOP-540">HADOOP-540</a>, etc.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-545">HADOOP-545</a>.  Remove an unused config file parameter.<br />(Philippe Gassmann via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-548">HADOOP-548</a>.  Add an Ant property "test.output" to build.xml that
+causes test output to be logged to the console.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-261">HADOOP-261</a>.  Record an error message when map output is lost.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-293">HADOOP-293</a>.  Report the full list of task error messages in the
+web ui, not just the most recent.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-551">HADOOP-551</a>.  Restore JobClient's console printouts to only include
+a maximum of one update per one percent of progress.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-306">HADOOP-306</a>.  Add a "safe" mode to DFS.  The name node enters this
+when less than a specified percentage of file data is complete.
+Currently safe mode is only used on startup, but eventually it
+will also be entered when datanodes disconnect and file data
+becomes incomplete.  While in safe mode no filesystem
+modifications are permitted and block replication is inhibited.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-431">HADOOP-431</a>.  Change 'dfs -rm' to not operate recursively and add a
+new command, 'dfs -rmr' which operates recursively.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-263">HADOOP-263</a>.  Include timestamps for job transitions.  The web
+interface now displays the start and end times of tasks and the
+start times of sorting and reducing for reduce tasks.  Also,
+extend ObjectWritable to handle enums, so that they can be passed
+as RPC parameters.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-556">HADOOP-556</a>.  Contrib/streaming: send keep-alive reports to task
+tracker every 10 seconds rather than every 100 records, to avoid
+task timeouts.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-547">HADOOP-547</a>.  Fix reduce tasks to ping tasktracker while copying
+data, rather than only between copies, avoiding task timeouts.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-537">HADOOP-537</a>.  Fix src/c++/libhdfs build process to create files in
+build/, no longer modifying the source tree.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-487">HADOOP-487</a>.  Throw a more informative exception for unknown RPC
+hosts.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-559">HADOOP-559</a>.  Add file name globbing (pattern matching) support to
+the FileSystem API, and use it in DFSShell ('bin/hadoop dfs')
+commands.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-508">HADOOP-508</a>.  Fix a bug in FSDataInputStream.  Incorrect data was
+returned after seeking to a random location.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-560">HADOOP-560</a>.  Add a "killed" task state.  This can be used to
+distinguish kills from other failures.  Task state has also been
+converted to use an enum type instead of an int, uncovering a bug
+elsewhere.  The web interface is also updated to display killed
+tasks.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-423">HADOOP-423</a>.  Normalize Paths containing directories named "." and
+"..", using the standard, unix interpretation.  Also add checks in
+DFS, prohibiting the use of "." or ".." as directory or file
+names.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-513">HADOOP-513</a>.  Replace map output handling with a servlet, rather
+than a JSP page.  This fixes an issue where
+IllegalStateException's were logged, sets content-length
+correctly, and better handles some errors.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-552">HADOOP-552</a>.  Improved error checking when copying map output files
+to reduce nodes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-566">HADOOP-566</a>.  Fix scripts to work correctly when accessed through
+relative symbolic links.<br />(Lee Faris via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-519">HADOOP-519</a>.  Add positioned read methods to FSInputStream.  These
+permit one to read from a stream without moving its position, and
+can hence be performed by multiple threads at once on a single
+stream. Implement an optimized version for DFS and local FS.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-522">HADOOP-522</a>. Permit block compression with MapFile and SetFile.
+Since these formats are always sorted, block compression can
+provide a big advantage.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-567">HADOOP-567</a>. Record version and revision information in builds.  A
+package manifest is added to the generated jar file containing
+version information, and a VersionInfo utility is added that
+includes further information, including the build date and user,
+and the subversion revision and repository.  A 'bin/hadoop
+version' comand is added to show this information, and it is also
+added to various web interfaces.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-568">HADOOP-568</a>.  Fix so that errors while initializing tasks on a
+tasktracker correctly report the task as failed to the jobtracker,
+so that it will be rescheduled.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-550">HADOOP-550</a>.  Disable automatic UTF-8 validation in Text.  This
+permits, e.g., TextInputFormat to again operate on non-UTF-8 data.<br />(Hairong and Mahadev via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-343">HADOOP-343</a>.  Fix mapred copying so that a failed tasktracker
+doesn't cause other copies to slow.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-239">HADOOP-239</a>.  Add a persistent job history mechanism, so that basic
+job statistics are not lost after 24 hours and/or when the
+jobtracker is restarted.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-506">HADOOP-506</a>.  Ignore heartbeats from stale task trackers.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-255">HADOOP-255</a>.  Discard stale, queued IPC calls.  Do not process
+calls whose clients will likely time out before they receive a
+response.  When the queue is full, new calls are now received and
+queued, and the oldest calls are discarded, so that, when servers
+get bogged down, they no longer develop a backlog on the socket.
+This should improve some DFS namenode failure modes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-581">HADOOP-581</a>.  Fix datanode to not reset itself on communications
+errors with the namenode.  If a request to the namenode fails, the
+datanode should retry, not restart.  This reduces the load on the
+namenode, since restarts cause a resend of the block report.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.6.2_-_2006-09-18_')">Release 0.6.2 - 2006-09-18
+</a></h3>
+    <ol id="release_0.6.2_-_2006-09-18_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-532">HADOOP-532</a>.  Fix a bug reading value-compressed sequence files,
+where an exception was thrown reporting that the full value had not
+been read.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-534">HADOOP-534</a>.  Change the default value class in JobConf to be Text
+instead of the now-deprecated UTF8.  This fixes the Grep example
+program, which was updated to use Text, but relies on this
+default.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.6.1_-_2006-09-13_')">Release 0.6.1 - 2006-09-13
+</a></h3>
+    <ol id="release_0.6.1_-_2006-09-13_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-520">HADOOP-520</a>.  Fix a bug in libhdfs, where write failures were not
+correctly returning error codes.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-523">HADOOP-523</a>.  Fix a NullPointerException when TextInputFormat is
+explicitly specified.  Also add a test case for this.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-521">HADOOP-521</a>.  Fix another NullPointerException finding the
+ClassLoader when using libhdfs.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-526">HADOOP-526</a>.  Fix a NullPointerException when attempting to start
+two datanodes in the same directory.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-529">HADOOP-529</a>.  Fix a NullPointerException when opening
+value-compressed sequence files generated by pre-0.6.0 Hadoop.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.6.0_-_2006-09-08_')">Release 0.6.0 - 2006-09-08
+</a></h3>
+    <ol id="release_0.6.0_-_2006-09-08_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-427">HADOOP-427</a>.  Replace some uses of DatanodeDescriptor in the DFS
+web UI code with DatanodeInfo, the preferred public class.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-426">HADOOP-426</a>.  Fix streaming contrib module to work correctly on
+Solaris.  This was causing nightly builds to fail.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-400">HADOOP-400</a>.  Improvements to task assignment.  Tasks are no longer
+re-run on nodes where they have failed (unless no other node is
+available).  Also, tasks are better load-balanced among nodes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-324">HADOOP-324</a>.  Fix datanode to not exit when a disk is full, but
+rather simply to fail writes.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-434">HADOOP-434</a>.  Change smallJobsBenchmark to use standard Hadoop
+scripts.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-453">HADOOP-453</a>.  Fix a bug in Text.setCapacity().<br />(siren via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-450">HADOOP-450</a>.  Change so that input types are determined by the
+RecordReader rather than specified directly in the JobConf.  This
+facilitates jobs with a variety of input types.
+<p/>
+WARNING: This contains incompatible API changes!  The RecordReader
+interface has two new methods that all user-defined InputFormats
+must now define.  Also, the values returned by TextInputFormat are
+no longer of class UTF8, but now of class Text.
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-436">HADOOP-436</a>.  Fix an error-handling bug in the web ui.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-455">HADOOP-455</a>.  Fix a bug in Text, where DEL was not permitted.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-456">HADOOP-456</a>.  Change the DFS namenode to keep a persistent record
+of the set of known datanodes.  This will be used to implement a
+"safe mode" where filesystem changes are prohibited when a
+critical percentage of the datanodes are unavailable.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-322">HADOOP-322</a>.  Add a job control utility.  This permits one to
+specify job interdependencies.  Each job is submitted only after
+the jobs it depends on have successfully completed.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-176">HADOOP-176</a>.  Fix a bug in IntWritable.Comparator.<br />(Dick King via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-421">HADOOP-421</a>.  Replace uses of String in recordio package with Text
+class, for improved handling of UTF-8 data.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-464">HADOOP-464</a>.  Improved error message when job jar not found.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-469">HADOOP-469</a>.  Fix /bin/bash specifics that have crept into our
+/bin/sh scripts since <a href="http://issues.apache.org/jira/browse/HADOOP-352">HADOOP-352</a>.<br />(Jean-Baptiste Quenot via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-468">HADOOP-468</a>.  Add HADOOP_NICENESS environment variable to set
+scheduling priority for daemons.<br />(Vetle Roeim via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-473">HADOOP-473</a>.  Fix TextInputFormat to correctly handle more EOL
+formats.  Things now work correctly with CR, LF or CRLF.<br />(Dennis Kubes &amp; James White via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-461">HADOOP-461</a>.  Make Java 1.5 an explicit requirement.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-54">HADOOP-54</a>.  Add block compression to SequenceFile.  One may now
+specify that blocks of keys and values are compressed together,
+improving compression for small keys and values.
+SequenceFile.Writer's constructor is now deprecated and replaced
+with a factory method.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-281">HADOOP-281</a>.  Prohibit DFS files that are also directories.<br />(Wendy Chien via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-486">HADOOP-486</a>.  Add the job username to JobStatus instances returned
+by JobClient.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-437">HADOOP-437</a>.  contrib/streaming: Add support for gzipped inputs.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-463">HADOOP-463</a>.  Add variable expansion to config files.
+Configuration property values may now contain variable
+expressions.  A variable is referenced with the syntax
+'${variable}'.  Variables values are found first in the
+configuration, and then in Java system properties.  The default
+configuration is modified so that temporary directories are now
+under ${hadoop.tmp.dir}, which is, by default,
+/tmp/hadoop-${user.name}.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-419">HADOOP-419</a>. Fix a NullPointerException finding the ClassLoader
+when using libhdfs.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-460">HADOOP-460</a>. Fix contrib/smallJobsBenchmark to use Text instead of
+UTF8.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-196">HADOOP-196</a>.  Fix Configuration(Configuration) constructor to work
+correctly.<br />(Sami Siren via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-501">HADOOP-501</a>.  Fix Configuration.toString() to handle URL resources.<br />(Thomas Friol via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-499">HADOOP-499</a>.  Reduce the use of Strings in contrib/streaming,
+replacing them with Text for better performance.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-64">HADOOP-64</a>.  Manage multiple volumes with a single DataNode.
+Previously DataNode would create a separate daemon per configured
+volume, each with its own connection to the NameNode.  Now all
+volumes are handled by a single DataNode daemon, reducing the load
+on the NameNode.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-424">HADOOP-424</a>.  Fix MapReduce so that jobs which generate zero splits
+do not fail.<br />(Fr??d??ric Bertin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-408">HADOOP-408</a>.  Adjust some timeouts and remove some others so that
+unit tests run faster.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-507">HADOOP-507</a>.  Fix an IllegalAccessException in DFS.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-320">HADOOP-320</a>.  Fix so that checksum files are correctly copied when
+the destination of a file copy is a directory.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-286">HADOOP-286</a>.  In DFSClient, avoid pinging the NameNode with
+renewLease() calls when no files are being written.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-312">HADOOP-312</a>.  Close idle IPC connections.  All IPC connections were
+cached forever.  Now, after a connection has been idle for more
+than a configurable amount of time (one second by default), the
+connection is closed, conserving resources on both client and
+server.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-497">HADOOP-497</a>.  Permit the specification of the network interface and
+nameserver to be used when determining the local hostname
+advertised by datanodes and tasktrackers.<br />(Lorenzo Thione via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-441">HADOOP-441</a>.  Add a compression codec API and extend SequenceFile
+to use it.  This will permit the use of alternate compression
+codecs in SequenceFile.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-483">HADOOP-483</a>. Improvements to libhdfs build and documentation.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-458">HADOOP-458</a>.  Fix a memory corruption bug in libhdfs.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-517">HADOOP-517</a>.  Fix a contrib/streaming bug in end-of-line detection.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-474">HADOOP-474</a>.  Add CompressionCodecFactory, and use it in
+TextInputFormat and TextOutputFormat.  Compressed input files are
+automatically decompressed when they have the correct extension.
+Output files will, when output compression is specified, be
+generated with an approprate extension.  Also add a gzip codec and
+fix problems with UTF8 text inputs.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.5.0_-_2006-08-04_')">Release 0.5.0 - 2006-08-04
+</a></h3>
+    <ol id="release_0.5.0_-_2006-08-04_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-352">HADOOP-352</a>.  Fix shell scripts to use /bin/sh instead of
+/bin/bash, for better portability.<br />(Jean-Baptiste Quenot via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-313">HADOOP-313</a>.  Permit task state to be saved so that single tasks
+may be manually re-executed when debugging.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-339">HADOOP-339</a>.  Add method to JobClient API listing jobs that are
+not yet complete, i.e., that are queued or running.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-355">HADOOP-355</a>.  Updates to the streaming contrib module, including
+API fixes, making reduce optional, and adding an input type for
+StreamSequenceRecordReader.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-358">HADOOP-358</a>.  Fix a NPE bug in Path.equals().<br />(Fr??d??ric Bertin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-327">HADOOP-327</a>.  Fix ToolBase to not call System.exit() when
+exceptions are thrown.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-359">HADOOP-359</a>.  Permit map output to be compressed.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-341">HADOOP-341</a>.  Permit input URI to CopyFiles to use the HTTP
+protocol.  This lets one, e.g., more easily copy log files into
+DFS.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-361">HADOOP-361</a>.  Remove unix dependencies from streaming contrib
+module tests, making them pure java.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-354">HADOOP-354</a>.  Make public methods to stop DFS daemons.<br />(Barry Kaplan via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-252">HADOOP-252</a>.  Add versioning to RPC protocols.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-356">HADOOP-356</a>.  Add contrib to "compile" and "test" build targets, so
+that this code is better maintained.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-307">HADOOP-307</a>.  Add smallJobsBenchmark contrib module.  This runs
+lots of small jobs, in order to determine per-task overheads.<br />(Sanjay Dahiya via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-342">HADOOP-342</a>.  Add a tool for log analysis: Logalyzer.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-347">HADOOP-347</a>.  Add web-based browsing of DFS content.  The namenode
+redirects browsing requests to datanodes.  Content requests are
+redirected to datanodes where the data is local when possible.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-351">HADOOP-351</a>.  Make Hadoop IPC kernel independent of Jetty.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-237">HADOOP-237</a>.  Add metric reporting to DFS and MapReduce.  With only
+minor configuration changes, one can now monitor many Hadoop
+system statistics using Ganglia or other monitoring systems.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-376">HADOOP-376</a>.  Fix datanode's HTTP server to scan for a free port.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-260">HADOOP-260</a>.  Add --config option to shell scripts, specifying an
+alternate configuration directory.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-381">HADOOP-381</a>.  Permit developers to save the temporary files for
+tasks whose names match a regular expression, to facilliate
+debugging.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-344">HADOOP-344</a>.  Fix some Windows-related problems with DF.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-380">HADOOP-380</a>.  Fix reduce tasks to poll less frequently for map
+outputs.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-321">HADOOP-321</a>.  Refactor DatanodeInfo, in preparation for
+<a href="http://issues.apache.org/jira/browse/HADOOP-306">HADOOP-306</a>.<br />(Konstantin Shvachko &amp; omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-385">HADOOP-385</a>.  Fix some bugs in record io code generation.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-302">HADOOP-302</a>.  Add new Text class to replace UTF8, removing
+limitations of that class.  Also refactor utility methods for
+writing zero-compressed integers (VInts and VLongs).<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-335">HADOOP-335</a>.  Refactor DFS namespace/transaction logging in
+namenode.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-375">HADOOP-375</a>.  Fix handling of the datanode HTTP daemon's port so
+that multiple datanode's can be run on a single host.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-386">HADOOP-386</a>.  When removing excess DFS block replicas, remove those
+on nodes with the least free space first.<br />(Johan Oskarson via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-389">HADOOP-389</a>.  Fix intermittent failures of mapreduce unit tests.
+Also fix some build dependencies.<br />(Mahadev &amp; Konstantin via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-362">HADOOP-362</a>.  Fix a problem where jobs hang when status messages
+are recieved out-of-order.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-394">HADOOP-394</a>.  Change order of DFS shutdown in unit tests to
+minimize errors logged.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-396">HADOOP-396</a>.  Make DatanodeID implement Writable.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-377">HADOOP-377</a>.  Permit one to add URL resources to a Configuration.<br />(Jean-Baptiste Quenot via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-345">HADOOP-345</a>.  Permit iteration over Configuration key/value pairs.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-409">HADOOP-409</a>.  Streaming contrib module: make configuration
+properties available to commands as environment variables.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-369">HADOOP-369</a>.  Add -getmerge option to dfs command that appends all
+files in a directory into a single local file.<br />(Johan Oskarson via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-410">HADOOP-410</a>.  Replace some TreeMaps with HashMaps in DFS, for
+a 17% performance improvement.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-411">HADOOP-411</a>.  Add unit tests for command line parser.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-412">HADOOP-412</a>.  Add MapReduce input formats that support filtering
+of SequenceFile data, including sampling and regex matching.
+Also, move JobConf.newInstance() to a new utility class.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-226">HADOOP-226</a>.  Fix fsck command to properly consider replication
+counts, now that these can vary per file.<br />(Bryan Pendleton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-425">HADOOP-425</a>.  Add a Python MapReduce example, using Jython.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.4.0_-_2006-06-28_')">Release 0.4.0 - 2006-06-28
+</a></h3>
+    <ol id="release_0.4.0_-_2006-06-28_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-298">HADOOP-298</a>.  Improved progress reports for CopyFiles utility, the
+distributed file copier.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-299">HADOOP-299</a>.  Fix the task tracker, permitting multiple jobs to
+more easily execute at the same time.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-250">HADOOP-250</a>.  Add an HTTP user interface to the namenode, running
+on port 50070.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-123">HADOOP-123</a>.  Add MapReduce unit tests that run a jobtracker and
+tasktracker, greatly increasing code coverage.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-271">HADOOP-271</a>.  Add links from jobtracker's web ui to tasktracker's
+web ui.  Also attempt to log a thread dump of child processes
+before they're killed.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-210">HADOOP-210</a>.  Change RPC server to use a selector instead of a
+thread per connection.  This should make it easier to scale to
+larger clusters.  Note that this incompatibly changes the RPC
+protocol: clients and servers must both be upgraded to the new
+version to ensure correct operation.<br />(Devaraj Das via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-311">HADOOP-311</a>.  Change DFS client to retry failed reads, so that a
+single read failure will not alone cause failure of a task.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-314">HADOOP-314</a>.  Remove the "append" phase when reducing.  Map output
+files are now directly passed to the sorter, without first
+appending them into a single file.  Now, the first third of reduce
+progress is "copy" (transferring map output to reduce nodes), the
+middle third is "sort" (sorting map output) and the last third is
+"reduce" (generating output).  Long-term, the "sort" phase will
+also be removed.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-316">HADOOP-316</a>.  Fix a potential deadlock in the jobtracker.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-319">HADOOP-319</a>.  Fix FileSystem.close() to remove the FileSystem
+instance from the cache.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-135">HADOOP-135</a>.  Fix potential deadlock in JobTracker by acquiring
+locks in a consistent order.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-278">HADOOP-278</a>.  Check for existence of input directories before
+starting MapReduce jobs, making it easier to debug this common
+error.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-304">HADOOP-304</a>.  Improve error message for
+UnregisterdDatanodeException to include expected node name.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-305">HADOOP-305</a>.  Fix TaskTracker to ask for new tasks as soon as a
+task is finished, rather than waiting for the next heartbeat.
+This improves performance when tasks are short.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-59">HADOOP-59</a>.  Add support for generic command line options.  One may
+now specify the filesystem (-fs), the MapReduce jobtracker (-jt),
+a config file (-conf) or any configuration property (-D).  The
+"dfs", "fsck", "job", and "distcp" commands currently support
+this, with more to be added.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-296">HADOOP-296</a>.  Permit specification of the amount of reserved space
+on a DFS datanode.  One may specify both the percentage free and
+the number of bytes.<br />(Johan Oskarson via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-325">HADOOP-325</a>.  Fix a problem initializing RPC parameter classes, and
+remove the workaround used to initialize classes.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-328">HADOOP-328</a>.  Add an option to the "distcp" command to ignore read
+errors while copying.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-27">HADOOP-27</a>.  Don't allocate tasks to trackers whose local free
+space is too low.<br />(Johan Oskarson via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-318">HADOOP-318</a>.  Keep slow DFS output from causing task timeouts.
+This incompatibly changes some public interfaces, adding a
+parameter to OutputFormat.getRecordWriter() and the new method
+Reporter.progress(), but it makes lots of tasks succeed that were
+previously failing.<br />(Milind Bhandarkar via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.3.2_-_2006-06-09_')">Release 0.3.2 - 2006-06-09
+</a></h3>
+    <ol id="release_0.3.2_-_2006-06-09_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-275">HADOOP-275</a>.  Update the streaming contrib module to use log4j for
+its logging.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-279">HADOOP-279</a>.  Provide defaults for log4j logging parameters, so
+that things still work reasonably when Hadoop-specific system
+properties are not provided.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-280">HADOOP-280</a>.  Fix a typo in AllTestDriver which caused the wrong
+test to be run when "DistributedFSCheck" was specified.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-240">HADOOP-240</a>.  DFS's mkdirs() implementation no longer logs a warning
+when the directory already exists.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-285">HADOOP-285</a>.  Fix DFS datanodes to be able to re-join the cluster
+after the connection to the namenode is lost.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-277">HADOOP-277</a>.  Fix a race condition when creating directories.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-289">HADOOP-289</a>.  Improved exception handling in DFS datanode.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-292">HADOOP-292</a>.  Fix client-side logging to go to standard error
+rather than standard output, so that it can be distinguished from
+application output.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-294">HADOOP-294</a>.  Fixed bug where conditions for retrying after errors
+in the DFS client were reversed.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.3.1_-_2006-06-05_')">Release 0.3.1 - 2006-06-05
+</a></h3>
+    <ol id="release_0.3.1_-_2006-06-05_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-272">HADOOP-272</a>.  Fix a bug in bin/hadoop setting log
+parameters.<br />(omalley &amp; cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-274">HADOOP-274</a>.  Change applications to log to standard output rather
+than to a rolling log file like daemons.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-262">HADOOP-262</a>.  Fix reduce tasks to report progress while they're
+waiting for map outputs, so that they do not time out.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-245">HADOOP-245</a> and <a href="http://issues.apache.org/jira/browse/HADOOP-246">HADOOP-246</a>.  Improvements to record io package.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-276">HADOOP-276</a>.  Add logging config files to jar file so that they're
+always found.<br />(omalley via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.3.0_-_2006-06-02_')">Release 0.3.0 - 2006-06-02
+</a></h3>
+    <ol id="release_0.3.0_-_2006-06-02_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-208">HADOOP-208</a>.  Enhance MapReduce web interface, adding new pages
+for failed tasks, and tasktrackers.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-204">HADOOP-204</a>.  Tweaks to metrics package.<br />(David Bowen via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-209">HADOOP-209</a>.  Add a MapReduce-based file copier.  This will
+copy files within or between file systems in parallel.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-146">HADOOP-146</a>.  Fix DFS to check when randomly generating a new block
+id that no existing blocks already have that id.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-180">HADOOP-180</a>. Make a daemon thread that does the actual task clean ups, so
+that the main offerService thread in the taskTracker doesn't get stuck
+and miss his heartbeat window. This was killing many task trackers as
+big jobs finished (300+ tasks / node).<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-200">HADOOP-200</a>. Avoid transmitting entire list of map task names to
+reduce tasks.  Instead just transmit the number of map tasks and
+henceforth refer to them by number when collecting map output.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-219">HADOOP-219</a>. Fix a NullPointerException when handling a checksum
+exception under SequenceFile.Sorter.sort().<br />(cutting &amp; stack)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-212">HADOOP-212</a>. Permit alteration of the file block size in DFS.  The
+default block size for new files may now be specified in the
+configuration with the dfs.block.size property.  The block size
+may also be specified when files are opened.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-218">HADOOP-218</a>. Avoid accessing configuration while looping through
+tasks in JobTracker.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-161">HADOOP-161</a>. Add hashCode() method to DFS's Block.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-115">HADOOP-115</a>. Map output types may now be specified.  These are also
+used as reduce input types, thus permitting reduce input types to
+differ from reduce output types.<br />(Runping Qi via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-216">HADOOP-216</a>. Add task progress to task status page.<br />(Bryan Pendelton via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-233">HADOOP-233</a>.  Add web server to task tracker that shows running
+tasks and logs.  Also add log access to job tracker web interface.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-205">HADOOP-205</a>.  Incorporate pending tasks into tasktracker load
+calculations.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-247">HADOOP-247</a>.  Fix sort progress to better handle exceptions.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-195">HADOOP-195</a>.  Improve performance of the transfer of map outputs to
+reduce nodes by performing multiple transfers in parallel, each on
+a separate socket.<br />(Sameer Paranjpye via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-251">HADOOP-251</a>.  Fix task processes to be tolerant of failed progress
+reports to their parent process.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-325">HADOOP-325</a>.  Improve the FileNotFound exceptions thrown by
+LocalFileSystem to include the name of the file.<br />(Benjamin Reed via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-254">HADOOP-254</a>.  Use HTTP to transfer map output data to reduce
+nodes.  This, together with <a href="http://issues.apache.org/jira/browse/HADOOP-195">HADOOP-195</a>, greatly improves the
+performance of these transfers.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-163">HADOOP-163</a>.  Cause datanodes that\ are unable to either read or
+write data to exit, so that the namenode will no longer target
+them for new blocks and will replicate their data on other nodes.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-222">HADOOP-222</a>.  Add a -setrep option to the dfs commands that alters
+file replication levels.<br />(Johan Oskarson via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-75">HADOOP-75</a>.  In DFS, only check for a complete file when the file
+is closed, rather than as each block is written.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-124">HADOOP-124</a>. Change DFS so that datanodes are identified by a
+persistent ID rather than by host and port.  This solves a number
+of filesystem integrity problems, when, e.g., datanodes are
+restarted.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-256">HADOOP-256</a>.  Add a C API for DFS.<br />(Arun C Murthy via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-211">HADOOP-211</a>.  Switch to use the Jakarta Commons logging internally,
+configured to use log4j by default.<br />(Arun C Murthy and cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-265">HADOOP-265</a>.  Tasktracker now fails to start if it does not have a
+writable local directory for temporary files.  In this case, it
+logs a message to the JobTracker and exits.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-270">HADOOP-270</a>.  Fix potential deadlock in datanode shutdown.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.2.1_-_2006-05-12_')">Release 0.2.1 - 2006-05-12
+</a></h3>
+    <ol id="release_0.2.1_-_2006-05-12_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-199">HADOOP-199</a>.  Fix reduce progress (broken by <a href="http://issues.apache.org/jira/browse/HADOOP-182">HADOOP-182</a>).<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-201">HADOOP-201</a>.  Fix 'bin/hadoop dfs -report'.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-207">HADOOP-207</a>.  Fix JDK 1.4 incompatibility introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-96">HADOOP-96</a>.
+System.getenv() does not work in JDK 1.4.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.2.0_-_2006-05-05_')">Release 0.2.0 - 2006-05-05
+</a></h3>
+    <ol id="release_0.2.0_-_2006-05-05_">
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-126">HADOOP-126</a>. 'bin/hadoop dfs -cp' now correctly copies .crc
+files.<br />(Konstantin Shvachko via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-51">HADOOP-51</a>. Change DFS to support per-file replication counts.<br />(Konstantin Shvachko via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-131">HADOOP-131</a>.  Add scripts to start/stop dfs and mapred daemons.
+Use these in start/stop-all scripts.<br />(Chris Mattmann via cutting)</li>
+      <li>Stop using ssh options by default that are not yet in widely used
+versions of ssh.  Folks can still enable their use by uncommenting
+a line in conf/hadoop-env.sh.<br />(cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-92">HADOOP-92</a>.  Show information about all attempts to run each
+task in the web ui.<br />(Mahadev konar via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-128">HADOOP-128</a>.  Improved DFS error handling.<br />(Owen O'Malley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-129">HADOOP-129</a>.  Replace uses of java.io.File with new class named
+Path.  This fixes bugs where java.io.File methods were called
+directly when FileSystem methods were desired, and reduces the
+likelihood of such bugs in the future.  It also makes the handling
+of pathnames more consistent between local and dfs FileSystems and
+between Windows and Unix. java.io.File-based methods are still
+available for back-compatibility, but are deprecated and will be
+removed once 0.2 is released.<br />(cutting)</li>
+      <li>Change dfs.data.dir and mapred.local.dir to be comma-separated
+lists of directories, no longer be space-separated. This fixes
+several bugs on Windows.<br />(cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-144">HADOOP-144</a>.  Use mapred task id for dfs client id, to
+facilitate debugging.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-143">HADOOP-143</a>.  Do not line-wrap stack-traces in web ui.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-118">HADOOP-118</a>.  In DFS, improve clean up of abandoned file
+creations.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-138">HADOOP-138</a>.  Stop multiple tasks in a single heartbeat, rather
+than one per heartbeat.<br />(Stefan via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-139">HADOOP-139</a>.  Remove a potential deadlock in
+LocalFileSystem.lock().<br />(Igor Bolotin via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-134">HADOOP-134</a>.  Don't hang jobs when the tasktracker is
+misconfigured to use an un-writable local directory.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-115">HADOOP-115</a>.  Correct an error message.<br />(Stack via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-133">HADOOP-133</a>.  Retry pings from child to parent, in case of
+(local) communcation problems.  Also log exit status, so that one
+can distinguish patricide from other deaths.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-142">HADOOP-142</a>.  Avoid re-running a task on a host where it has
+previously failed.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-148">HADOOP-148</a>.  Maintain a task failure count for each
+tasktracker and display it in the web ui.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-151">HADOOP-151</a>.  Close a potential socket leak, where new IPC
+connection pools were created per configuration instance that RPCs
+use.  Now a global RPC connection pool is used again, as
+originally intended.<br />(cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-69">HADOOP-69</a>.  Don't throw a NullPointerException when getting
+hints for non-existing file split.<br />(Bryan Pendelton via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-157">HADOOP-157</a>.  When a task that writes dfs files (e.g., a reduce
+task) failed and was retried, it would fail again and again,
+eventually failing the job.  The problem was that dfs did not yet
+know that the failed task had abandoned the files, and would not
+yet let another task create files with the same names.  Dfs now
+retries when creating a file long enough for locks on abandoned
+files to expire.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-150">HADOOP-150</a>.  Improved task names that include job
+names.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-162">HADOOP-162</a>.  Fix ConcurrentModificationException when
+releasing file locks.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-132">HADOOP-132</a>.  Initial check-in of new Metrics API, including
+implementations for writing metric data to a file and for sending
+it to Ganglia.<br />(David Bowen via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-160">HADOOP-160</a>.  Remove some uneeded synchronization around
+time-consuming operations in the TaskTracker.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-166">HADOOP-166</a>.  RPCs failed when passed subclasses of a declared
+parameter type.  This is fixed by changing ObjectWritable to store
+both the declared type and the instance type for Writables.  Note
+that this incompatibly changes the format of ObjectWritable and
+will render unreadable any ObjectWritables stored in files.
+Nutch only uses ObjectWritable in intermediate files, so this
+should not be a problem for Nutch.<br />(Stefan &amp; cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-168">HADOOP-168</a>.  MapReduce RPC protocol methods should all declare
+IOException, so that timeouts are handled appropriately.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-169">HADOOP-169</a>.  Don't fail a reduce task if a call to the
+jobtracker to locate map outputs fails.<br />(omalley via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-170">HADOOP-170</a>.  Permit FileSystem clients to examine and modify
+the replication count of individual files.  Also fix a few
+replication-related bugs.<br />(Konstantin Shvachko via cutting)</li>
+      <li>Permit specification of a higher replication levels for job
+submission files (job.xml and job.jar).  This helps with large
+clusters, since these files are read by every node.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-173">HADOOP-173</a>.  Optimize allocation of tasks with local data.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-167">HADOOP-167</a>.  Reduce number of Configurations and JobConf's
+created.<br />(omalley via cutting)</li>
+      <li>NUTCH-256.  Change FileSystem#createNewFile() to create a .crc
+file.  The lack of a .crc file was causing warnings.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-174">HADOOP-174</a>.  Change JobClient to not abort job until it has failed
+to contact the job tracker for five attempts, not just one as
+before.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-177">HADOOP-177</a>.  Change MapReduce web interface to page through tasks.
+Previously, when jobs had more than a few thousand tasks they
+could crash web browsers.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-178">HADOOP-178</a>.  In DFS, piggyback blockwork requests from datanodes
+on heartbeat responses from namenode.  This reduces the volume of
+RPC traffic.  Also move startup delay in blockwork from datanode
+to namenode.  This fixes a problem where restarting the namenode
+triggered a lot of uneeded replication.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-183">HADOOP-183</a>.  If the DFS namenode is restarted with different
+minimum and/or maximum replication counts, existing files'
+replication counts are now automatically adjusted to be within the
+newly configured bounds.<br />(Hairong Kuang via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-186">HADOOP-186</a>.  Better error handling in TaskTracker's top-level
+loop.  Also improve calculation of time to send next heartbeat.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-187">HADOOP-187</a>.  Add two MapReduce examples/benchmarks.  One creates
+files containing random data.  The second sorts the output of the
+first.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-185">HADOOP-185</a>.  Fix so that, when a task tracker times out making the
+RPC asking for a new task to run, the job tracker does not think
+that it is actually running the task returned.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-190">HADOOP-190</a>.  If a child process hangs after it has reported
+completion, its output should not be lost.<br />(Stack via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-184">HADOOP-184</a>. Re-structure some test code to better support testing
+on a cluster.<br />(Mahadev Konar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-191">HADOOP-191</a>  Add streaming package, Hadoop's first contrib module.
+This permits folks to easily submit MapReduce jobs whose map and
+reduce functions are implemented by shell commands.  Use
+'bin/hadoop jar build/hadoop-streaming.jar' to get details.<br />(Michel Tourn via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-189">HADOOP-189</a>.  Fix MapReduce in standalone configuration to
+correctly handle job jar files that contain a lib directory with
+nested jar files.<br />(cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-65">HADOOP-65</a>.  Initial version of record I/O framework that enables
+the specification of record types and generates marshalling code
+in both Java and C++.  Generated Java code implements
+WritableComparable, but is not yet otherwise used by
+Hadoop.<br />(Milind Bhandarkar via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-193">HADOOP-193</a>.  Add a MapReduce-based FileSystem benchmark.<br />(Konstantin Shvachko via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-194">HADOOP-194</a>.  Add a MapReduce-based FileSystem checker.  This reads
+every block in every file in the filesystem.<br />(Konstantin Shvachko
+via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-182">HADOOP-182</a>.  Fix so that lost task trackers to not change the
+status of reduce tasks or completed jobs.  Also fixes the progress
+meter so that failed tasks are subtracted.<br />(omalley via cutting)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-96">HADOOP-96</a>.  Logging improvements.  Log files are now separate from
+standard output and standard error files.  Logs are now rolled.
+Logging of all DFS state changes can be enabled, to facilitate
+debugging.<br />(Hairong Kuang via cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.1.1_-_2006-04-08_')">Release 0.1.1 - 2006-04-08
+</a></h3>
+    <ol id="release_0.1.1_-_2006-04-08_">
+      <li>Added CHANGES.txt, logging all significant changes to Hadoop.<br />(cutting)</li>
+      <li>Fix MapReduceBase.close() to throw IOException, as declared in the
+Closeable interface.  This permits subclasses which override this
+method to throw that exception.<br />(cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-117">HADOOP-117</a>.  Pathnames were mistakenly transposed in
+JobConf.getLocalFile() causing many mapred temporary files to not
+be removed.<br />(Raghavendra Prabhu via cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-116">HADOOP-116</a>. Clean up job submission files when jobs complete.<br />(cutting)</li>
+      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-125">HADOOP-125</a>. Fix handling of absolute paths on Windows<br />(cutting)</li>
+    </ol>
+<h3><a href="javascript:toggleList('release_0.1.0_-_2006-04-01_')">Release 0.1.0 - 2006-04-01
+</a></h3>
+    <ol id="release_0.1.0_-_2006-04-01_">
+      <li>The first release of Hadoop.
+</li>
+    </ol>
+</ul>
+</body>
+</html>

+ 6 - 0
docs/cluster_setup.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hdfs_design.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hdfs_permissions_guide.html

@@ -121,6 +121,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
 <div class="menupage">
 <div class="menupage">
 <div class="menupagetitle">HDFS Permissions Guide</div>
 <div class="menupagetitle">HDFS Permissions Guide</div>
 </div>
 </div>
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 3 - 0
docs/hdfs_shell.html

@@ -149,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hdfs_user_guide.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menupagetitle">HDFS User Guide</div>
 <div class="menupagetitle">HDFS User Guide</div>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hod.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hod_admin_guide.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hod_config_guide.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/hod_user_guide.html

@@ -122,6 +122,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -148,6 +151,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/index.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit">
 <div id="credit">
 <hr>
 <hr>

+ 18 - 0
docs/linkmap.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">
@@ -213,6 +219,12 @@ document.write("Last Published: " + document.lastModified);
 </li>
 </li>
 </ul>
 </ul>
     
     
+<ul>
+<li>
+<a href="hdfs_shell.html">HDFS Shell Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
+</li>
+</ul>
+    
 <ul>
 <ul>
 <li>
 <li>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
@@ -272,6 +284,12 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>lists</em>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>lists</em>
 </li>
 </li>
 </ul>
 </ul>
+    
+<ul>
+<li>
+<a href="changes.html">Release Notes</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>changes</em>
+</li>
+</ul>
   
   
 </ul>
 </ul>
 </ul>
 </ul>

+ 12 - 12
docs/linkmap.pdf

@@ -5,10 +5,10 @@
 /Producer (FOP 0.20.5) >>
 /Producer (FOP 0.20.5) >>
 endobj
 endobj
 5 0 obj
 5 0 obj
-<< /Length 942 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1008 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
  >>
 stream
 stream
-Gatn%;,>q#&BE]*.JsG^Gd_*Da^]mECk;\'VEKlP15SXTd\J-`dD#RL"<&3p;MDMVla)lRs*VA`ml6ba=Eo?mLd8>9U#n#"+`KBX!1>JE@5GqQJfYFfZj&`*Q:f=fE('W>X;mb2r-g4UrBqoK1Ma[+p#D5[f.tt.+nu5801M?19KH`TNqkm"Lj!f9WiF&BCK:r3Z06s="J*q3G%7)chM@*_-Bm/PWK_7V4L(-X,us38NceW"\7<P]ln!funAKcC#^^'f(c>`1*'sG6gG=s+7C*eb>2eAB5<FYuPsaP@(l#tOg$NbrK(&tr@RuT5I=uULq9pOjfB0aQXbqKURRb%$bn`p/poS)B]U<0Gr!oB*DIp@Yno6E"o:UlLX&85RNfbIdK)r:D>/g6$(8[VJ`k$uZ.M%j"&NUhTI20Gg43?b[bHaq5V:>2![.?uF2:6IZX3T`B,c?'f^48=->qC?>Go<M,PM7RW$lXk^e%esXPLXmj;T]40_r^f&fl?siX5k;2'QV3!nbmErU>?:P8sX6m<Mm(&s$TREo+276LAe%_+0;@JYrVY-P<-.tl_rB'WC/Y4'K,,??!VN:PVO+fo`BWI]"XVsEbf[_%eV4H>ht^?<3hr$:)NV^\-&uh3"H3DDQ-8XegeF]c^Em?`"f7tY@DB**3QR`6+kG=nYoc;GPB$40[Q2sl=mLn.,.Gq2t$V/Zc1PQF&PFug2"J9',rf(LXBKerD<6'1/J)4H@KbeNUW#@%p$n`If2GMa8&=iN77Q4YRE.;b.8eL'l9_E^Hg[5cfUI3'XQ(pqp<Asp<`NgD$,WW%t+XqX,J00TTihA]:L$%]fIn6cb4WiI*/Nk*B+E0B-?gho/iA(@e6KsGH(#"n/K5`D\7YMX*7@Ac@*D=Z#).PQ$sFD27"^r4DDDoBK_Ym^!URrQ7^AK\<RBe\,I10]C4\~>
+Gatn&?#SI?'Sc)J.t$#VPQ:u)?`*4rDX)F$m:)V5C6KLbYu;UNg:n^fBah+K7rAthiIH=4#CI"$NYdm41Z-(Q5]<ot^ud;A5]8MG*n4QX784jK&GRgK(2NC5_-AI0kT/V)e0>4]14NEt:"QZ.pR*mF/A(UIOXlQplHoFg/UhAEl60u.*B>#@J3dZ*(9+AR(UP&?YhQg+D@)81,Y_L>U,1e<]ubr5H7EW54ARLSAP#n%XCG7s!pnQ:%m,b9'Q2Um0W+;4(;XTc."d&_bk?uC7FB>5_H-I#gX".gHfGYB#Xf8J?(*eI6X\J1&-O\/Y`B>NP<UYYHEH'D=0fofUA;_dBXU`8`:!j3@SI*HqR6^sl:I49;EU#;U-A&YF#AiGdigK&6.ePJ;p4s0;S5uhfr/%.>NO7Q4$Z;&^WW?QF=13s.8b6h4r:4?*Ho.,]4U^U`QEEFJn1[8e5q`EVmCY=3%a/i7rk)QUHtX\0G:ssBsJ]g6:,@(I^kn4.skR:eM?q#/cgnd=+!6f-E6aVSYm[T5)km0(1:3l5@2EW1Weal(?J_#Q/7@t:SK2MIL(Z\_d:iA)"_Fm"lC<UMPQ97o4:e"U6IbZ9a[BZ-*FC7qFZkYZa-eod4Y+;ra^$:%\^"Q0(2G>8l:QP`t\;Pgk")oS4(&Y2clj6CRM@iZH)f*G"&FVlO[8>3^$pKC([Z?Y'heNVUcS%"r\Cg=$8//=@r^CZYXsdh;W4rjfqI9gLJD=iu:!LLEk<<9u#bA*"CCKPO&1XFZ75Pi80E\9lE4O@ln&ga'fllWV!%'"dbWKJmH5oCA\GJFKr#n)+QD;*etkdd[LcV+qqVpG+$N?NSdV2J@($"UDTmk?\oTCYD^M4H^/=2EWcIU:li!;&7Fdg_HTcQ-HF@G#jZK7]`NmI+Omt'Tr&<0e;J]>"!_ncD#T]?Q)a7iF,)40/.>rp?&;JF68[NffXBt^;EaX"lBrB0T!ZHpYT>TuZnYbOb-H^R3hA_@\uG]7Ul%C[~>
 endstream
 endstream
 endobj
 endobj
 6 0 obj
 6 0 obj
@@ -72,17 +72,17 @@ endobj
 xref
 xref
 0 12
 0 12
 0000000000 65535 f 
 0000000000 65535 f 
-0000001765 00000 n 
-0000001823 00000 n 
-0000001873 00000 n 
+0000001832 00000 n 
+0000001890 00000 n 
+0000001940 00000 n 
 0000000015 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000000071 00000 n 
-0000001104 00000 n 
-0000001210 00000 n 
-0000001322 00000 n 
-0000001431 00000 n 
-0000001541 00000 n 
-0000001649 00000 n 
+0000001171 00000 n 
+0000001277 00000 n 
+0000001389 00000 n 
+0000001498 00000 n 
+0000001608 00000 n 
+0000001716 00000 n 
 trailer
 trailer
 <<
 <<
 /Size 12
 /Size 12
@@ -90,5 +90,5 @@ trailer
 /Info 4 0 R
 /Info 4 0 R
 >>
 >>
 startxref
 startxref
-1993
+2060
 %%EOF
 %%EOF

+ 6 - 0
docs/mapred_tutorial.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menupage">
 <div class="menupage">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/native_libraries.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/quickstart.html

@@ -120,6 +120,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -146,6 +149,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 6 - 0
docs/streaming.html

@@ -123,6 +123,9 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">HDFS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
@@ -149,6 +152,9 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 </div>
+<div class="menuitem">
+<a href="changes.html">Release Notes</a>
+</div>
 </div>
 </div>
 <div id="credit"></div>
 <div id="credit"></div>
 <div id="roundbottom">
 <div id="roundbottom">

+ 170 - 0
src/docs/changes/ChangesFancyStyle.css

@@ -0,0 +1,170 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+/**
+ * General
+ */
+
+img { border: 0; }
+
+#content table {
+  border: 0;
+  width: 100%;
+}
+/*Hack to get IE to render the table at 100%*/
+* html #content table { margin-left: -3px; }
+
+#content th,
+#content td {
+  margin: 0;
+  padding: 0;
+  vertical-align: top;
+}
+
+.clearboth {
+  clear: both;
+}
+
+.note, .warning, .fixme {
+  border: solid black 1px;
+  margin: 1em 3em;
+}
+
+.note .label {
+  background: #369;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.note .content {
+  background: #F0F0FF;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.warning .label {
+  background: #C00;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.warning .content {
+  background: #FFF0F0;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.fixme .label {
+  background: #C6C600;
+  color: black;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.fixme .content {
+  padding: 5px 10px;
+}
+
+/**
+ * Typography
+ */
+
+body {
+  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
+  font-size: 100%;
+}
+
+#content {
+  font-family: Georgia, Palatino, Times, serif;
+  font-size: 95%;
+}
+#tabs {
+  font-size: 70%;
+}
+#menu {
+  font-size: 80%;
+}
+#footer {
+  font-size: 70%;
+}
+
+h1, h2, h3, h4, h5, h6 {
+  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
+  font-weight: bold;
+  margin-top: 1em;
+  margin-bottom: .5em;
+}
+
+h1 {
+    margin-top: 0;
+    margin-bottom: 1em;
+  font-size: 1.4em;
+  background-color: 73CAFF
+}
+#content h1 {
+  font-size: 160%;
+  margin-bottom: .5em;
+}
+#menu h1 {
+  margin: 0;
+  padding: 10px;
+  background: #336699;
+  color: white;
+}
+h2 { 
+  font-size: 120%;
+  background-color: 73CAFF
+}
+h3 { font-size: 100%; }
+h4 { font-size: 90%; }
+h5 { font-size: 80%; }
+h6 { font-size: 75%; }
+
+p {
+  line-height: 120%;
+  text-align: left;
+  margin-top: .5em;
+  margin-bottom: 1em;
+}
+
+#content li,
+#content th,
+#content td,
+#content li ul,
+#content li ol{
+  margin-top: .5em;
+  margin-bottom: .5em;
+}
+
+
+#content li li,
+#minitoc-area li{
+  margin-top: 0em;
+  margin-bottom: 0em;
+}
+
+#content .attribution {
+  text-align: right;
+  font-style: italic;
+  font-size: 85%;
+  margin-top: 1em;
+}
+
+.codefrag {
+  font-family: "Courier New", Courier, monospace;
+  font-size: 110%;
+}

+ 49 - 0
src/docs/changes/ChangesSimpleStyle.css

@@ -0,0 +1,49 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+body {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h1 {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h2 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+h3 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+a:link {
+  color: blue;
+}
+
+a:visited {
+  color: purple; 
+}
+
+li {
+  margin-top: 1em;
+  margin-bottom: 1em;
+}

+ 282 - 0
src/docs/changes/changes2html.pl

@@ -0,0 +1,282 @@
+#!/usr/bin/perl
+#
+# Transforms Lucene Java's CHANGES.txt into Changes.html
+#
+# Input is on STDIN, output is to STDOUT
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use strict;
+use warnings;
+
+my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
+my $title = undef;
+my $release = undef;
+my $sections = undef;
+my $items = undef;
+my $first_relid = undef;
+my $second_relid = undef;
+my @releases = ();
+
+my @lines = <>;                        # Get all input at once
+
+#
+# Parse input and build hierarchical release structure in @releases
+#
+for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
+  $_ = $lines[$line_num];
+  next unless (/\S/);                  # Skip blank lines
+
+  unless ($title) {
+    if (/\S/) {
+      s/^\s+//;                        # Trim leading whitespace
+      s/\s+$//;                        # Trim trailing whitespace
+    }
+    $title = $_;
+    next;
+  }
+
+  if (/^(Release)|(Trunk)/) {   # Release headings
+    $release = $_;
+    $sections = [];
+    push @releases, [ $release, $sections ];
+    ($first_relid = lc($release)) =~ s/\s+/_/g   if ($#releases == 0);
+    ($second_relid = lc($release)) =~ s/\s+/_/g  if ($#releases == 1);
+    $items = undef;
+    next;
+  }
+
+  # Section heading: 2 leading spaces, words all capitalized
+  if (/^  ([A-Z]+)\s*/) {
+    my $heading = $_;
+    $items = [];
+    push @$sections, [ $heading, $items ];
+    next;
+  }
+
+  # Handle earlier releases without sections - create a headless section
+  unless ($items) {
+    $items = [];
+    push @$sections, [ undef, $items ];
+  }
+
+  my $type;
+  if (@$items) { # A list item has been encountered in this section before
+    $type = $items->[0];  # 0th position of items array is list type
+  } else {
+    $type = get_list_type($_);
+    push @$items, $type;
+  }
+
+  if ($type eq 'numbered') { # The modern items list style
+    # List item boundary is another numbered item or an unindented line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s{0,2}\d+\.\s*)//;       # Trim the leading item number
+    my $leading_ws_width = length($1);
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines
+           and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    $item =~ s/\n+\Z/\n/;                  # Trim trailing blank lines
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  } elsif ($type eq 'paragraph') {         # List item boundary is a blank line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s+)//;
+    my $leading_ws_width = defined($1) ? length($1) : 0;
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  } else { # $type is one of the bulleted types
+    # List item boundary is another bullet or a blank line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s*$type\s*)//;           # Trim the leading bullet
+    my $leading_ws_width = length($1);
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines
+           and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  }
+}
+
+#
+# Print HTML-ified version to STDOUT
+#
+print<<"__HTML_HEADER__";
+<!--
+**********************************************************
+** WARNING: This file is generated from CHANGES.txt by the 
+**          Perl script 'changes2html.pl'.
+**          Do *not* edit this file!
+**********************************************************
+          
+****************************************************************************
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+****************************************************************************
+-->
+<html>
+<head>
+  <title>$title</title>
+  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
+  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
+  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+  <SCRIPT>
+    function toggleList(e) {
+      element = document.getElementById(e).style;
+      element.display == 'none' ? element.display = 'block' : element.display='none';
+    }
+    function collapse() {
+      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
+        var list = document.getElementsByTagName("ul")[i];
+        if (list.id != '$first_relid' && list.id != '$second_relid') {
+          list.style.display = "none";
+        }
+      }
+      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
+        document.getElementsByTagName("ol")[i].style.display = "none"; 
+      }
+    }
+    window.onload = collapse;
+  </SCRIPT>
+</head>
+<body>
+
+<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
+<h1>$title</h1>
+
+__HTML_HEADER__
+
+my $heading;
+my $relcnt = 0;
+my $header = 'h2';
+for my $rel (@releases) {
+  if (++$relcnt == 3) {
+    $header = 'h3';
+    print "<h2><a href=\"javascript:toggleList('older')\">";
+    print "Older Releases";
+    print "</a></h2>\n";
+    print "<ul id=\"older\">\n"
+  }
+      
+  ($release, $sections) = @$rel;
+
+  # The first section heading is undefined for the older sectionless releases
+  my $has_release_sections = $sections->[0][0];
+
+  (my $relid = lc($release)) =~ s/\s+/_/g;
+  print "<$header><a href=\"javascript:toggleList('$relid')\">";
+  print "$release";
+  print "</a></$header>\n";
+  print "<ul id=\"$relid\">\n"
+    if ($has_release_sections);
+
+  for my $section (@$sections) {
+    ($heading, $items) = @$section;
+    (my $sectid = lc($heading)) =~ s/\s+/_/g;
+    my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";  
+
+    print "  <li><a href=\"javascript:toggleList('$relid.$sectid')\">",
+          ($heading || ''), "</a>&nbsp;&nbsp;&nbsp;$numItemsStr\n"
+      if ($has_release_sections);
+
+    my $list_type = $items->[0] || '';
+    my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
+    my $listid = $sectid ? "$relid.$sectid" : $relid;
+    print "    <$list id=\"$listid\">\n";
+
+    for my $itemnum (1..$#{$items}) {
+      my $item = $items->[$itemnum];
+      $item =~ s:&:&amp;:g;                            # Escape HTML metachars
+      $item =~ s:<:&lt;:g; 
+      $item =~ s:>:&gt;:g;
+
+      $item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:;       # Separate attribution
+      $item =~ s:\n{2,}:\n<p/>\n:g;                    # Keep paragraph breaks
+      $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)}  # Link to JIRA
+                {<a href="${jira_url_prefix}$1">$1</a>}g;
+      print "      <li>$item</li>\n";
+    }
+    print "    </$list>\n";
+    print "  </li>\n" if ($has_release_sections);
+  }
+  print "</ul>\n" if ($has_release_sections);
+}
+print "</ul>\n" if ($relcnt > 3);
+print "</body>\n</html>\n";
+
+
+#
+# Subroutine: get_list_type
+#
+# Takes one parameter:
+#
+#    - The first line of a sub-section/point
+#
+# Returns one scalar:
+#
+#    - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
+#      'paragraph'.
+#
+sub get_list_type {
+  my $first_list_item_line = shift;
+  my $type = 'paragraph'; # Default to paragraph type
+
+  if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
+    $type = 'numbered';
+  } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
+    $type = $1;
+  }
+  return $type;
+}
+
+1;

+ 2 - 0
src/docs/forrest.properties

@@ -102,3 +102,5 @@
 #project.issues-rss-url=
 #project.issues-rss-url=
 #I18n Property only works for the "forrest run" target.
 #I18n Property only works for the "forrest run" target.
 #project.i18n=true
 #project.i18n=true
+project.configfile=${project.home}/src/documentation/conf/cli.xconf
+

+ 325 - 0
src/docs/src/documentation/conf/cli.xconf

@@ -0,0 +1,325 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!--+
+    |  This is the Apache Cocoon command line configuration file.
+    |  Here you give the command line interface details of where
+    |  to find various aspects of your Cocoon installation.
+    |
+    |  If you wish, you can also use this file to specify the URIs
+    |  that you wish to generate.
+    |
+    |  The current configuration information in this file is for
+    |  building the Cocoon documentation. Therefore, all links here
+    |  are relative to the build context dir, which, in the build.xml
+    |  file, is set to ${build.context}
+    |
+    |  Options:
+    |    verbose:            increase amount of information presented
+    |                        to standard output (default: false)
+    |    follow-links:       whether linked pages should also be
+    |                        generated (default: true)
+    |    precompile-only:    precompile sitemaps and XSP pages, but
+    |                        do not generate any pages (default: false)
+    |    confirm-extensions: check the mime type for the generated page
+    |                        and adjust filename and links extensions
+    |                        to match the mime type
+    |                        (e.g. text/html->.html)
+    |
+    |  Note: Whilst using an xconf file to configure the Cocoon
+    |        Command Line gives access to more features, the use of
+    |        command line parameters is more stable, as there are
+    |        currently plans to improve the xconf format to allow
+    |        greater flexibility. If you require a stable and
+    |        consistent method for accessing the CLI, it is recommended
+    |        that you use the command line parameters to configure
+    |        the CLI. See documentation at:
+    |        http://cocoon.apache.org/2.1/userdocs/offline/
+    |        http://wiki.apache.org/cocoon/CommandLine
+    |
+    +-->
+
+<cocoon verbose="true"
+        follow-links="true"
+        precompile-only="false"
+        confirm-extensions="false">
+
+   <!--+
+       |  The context directory is usually the webapp directory
+       |  containing the sitemap.xmap file.
+       |
+       |  The config file is the cocoon.xconf file.
+       |
+       |  The work directory is used by Cocoon to store temporary
+       |  files and cache files.
+       |
+       |  The destination directory is where generated pages will
+       |  be written (assuming the 'simple' mapper is used, see
+       |  below)
+       +-->
+   <context-dir>.</context-dir>
+   <config-file>WEB-INF/cocoon.xconf</config-file>
+   <work-dir>../tmp/cocoon-work</work-dir>
+   <dest-dir>../site</dest-dir>
+
+   <!--+
+       |  A checksum file can be used to store checksums for pages
+       |  as they are generated. When the site is next generated,
+       |  files will not be written if their checksum has not changed.
+       |  This means that it will be easier to detect which files
+       |  need to be uploaded to a server, using the timestamp.
+       |
+       |  The default path is relative to the core webapp directory.
+       |  An asolute path can be used.
+       +-->
+   <!--   <checksums-uri>build/work/checksums</checksums-uri>-->
+
+   <!--+
+       | Broken link reporting options:
+       |   Report into a text file, one link per line:
+       |     <broken-links type="text" report="filename"/>
+       |   Report into an XML file:
+       |     <broken-links type="xml" report="filename"/>
+       |   Ignore broken links (default):
+       |     <broken-links type="none"/>
+       |
+       |   Two attributes to this node specify whether a page should
+       |   be generated when an error has occured. 'generate' specifies
+       |   whether a page should be generated (default: true) and
+       |   extension specifies an extension that should be appended
+       |   to the generated page's filename (default: none)
+       |
+       |   Using this, a quick scan through the destination directory
+       |   will show broken links, by their filename extension.
+       +-->
+   <broken-links type="xml"
+                 file="../brokenlinks.xml"
+                 generate="false"
+                 extension=".error"
+                 show-referrers="true"/>
+
+   <!--+
+       |  Load classes at startup. This is necessary for generating
+       |  from sites that use SQL databases and JDBC.
+       |  The <load-class> element can be repeated if multiple classes
+       |  are needed.
+       +-->
+   <!--
+   <load-class>org.firebirdsql.jdbc.Driver</load-class>
+   -->
+
+   <!--+
+       |  Configures logging.
+       |  The 'log-kit' parameter specifies the location of the log kit
+       |  configuration file (usually called logkit.xconf.
+       |
+       |  Logger specifies the logging category (for all logging prior
+       |  to other Cocoon logging categories taking over)
+       |
+       |  Available log levels are:
+       |    DEBUG:        prints all level of log messages.
+       |    INFO:         prints all level of log messages except DEBUG
+       |                  ones.
+       |    WARN:         prints all level of log messages except DEBUG
+       |                  and INFO ones.
+       |    ERROR:        prints all level of log messages except DEBUG,
+       |                  INFO and WARN ones.
+       |    FATAL_ERROR:  prints only log messages of this level
+       +-->
+   <!-- <logging log-kit="WEB-INF/logkit.xconf" logger="cli" level="ERROR" /> -->
+
+   <!--+
+       |  Specifies the filename to be appended to URIs that
+       |  refer to a directory (i.e. end with a forward slash).
+       +-->
+   <default-filename>index.html</default-filename>
+
+   <!--+
+       |  Specifies a user agent string to the sitemap when
+       |  generating the site.
+       |
+       |  A generic term for a web browser is "user agent". Any
+       |  user agent, when connecting to a web server, will provide
+       |  a string to identify itself (e.g. as Internet Explorer or
+       |  Mozilla). It is possible to have Cocoon serve different
+       |  content depending upon the user agent string provided by
+       |  the browser. If your site does this, then you may want to
+       |  use this <user-agent> entry to provide a 'fake' user agent
+       |  to Cocoon, so that it generates the correct version of your
+       |  site.
+       |
+       |  For most sites, this can be ignored.
+       +-->
+   <!--
+   <user-agent>Cocoon Command Line Environment 2.1</user-agent>
+   -->
+
+   <!--+
+       |  Specifies an accept string to the sitemap when generating
+       |  the site.
+       |  User agents can specify to an HTTP server what types of content
+       |  (by mime-type) they are able to receive. E.g. a browser may be
+       |  able to handle jpegs, but not pngs. The HTTP accept header
+       |  allows the server to take the browser's capabilities into account,
+       |  and only send back content that it can handle.
+       |
+       |  For most sites, this can be ignored.
+       +-->
+
+   <accept>*/*</accept>
+
+   <!--+
+       | Specifies which URIs should be included or excluded, according
+       | to wildcard patterns.
+       |
+       | These includes/excludes are only relevant when you are following
+       | links. A link URI must match an include pattern (if one is given)
+       | and not match an exclude pattern, if it is to be followed by
+       | Cocoon. It can be useful, for example, where there are links in
+       | your site to pages that are not generated by Cocoon, such as
+       | references to api-documentation.
+       |
+       | By default, all URIs are included. If both include and exclude
+       | patterns are specified, a URI is first checked against the
+       | include patterns, and then against the exclude patterns.
+       |
+       | Multiple patterns can be given, using muliple include or exclude
+       | nodes.
+       |
+       | The order of the elements is not significant, as only the first
+       | successful match of each category is used.
+       |
+       | Currently, only the complete source URI can be matched (including
+       | any URI prefix). Future plans include destination URI matching
+       | and regexp matching. If you have requirements for these, contact
+       | dev@cocoon.apache.org.
+       +-->
+
+   <exclude pattern="**/"/>
+   <exclude pattern="api/**"/>
+   <exclude pattern="changes.html"/>
+
+<!--
+  This is a workaround for FOR-284 "link rewriting broken when
+  linking to xml source views which contain site: links".
+  See the explanation there and in declare-broken-site-links.xsl
+-->
+   <exclude pattern="site:**"/>
+   <exclude pattern="ext:**"/>
+   <exclude pattern="lm:**"/>
+   <exclude pattern="**/site:**"/>
+   <exclude pattern="**/ext:**"/>
+   <exclude pattern="**/lm:**"/>
+
+   <!-- Exclude tokens used in URLs to ASF mirrors (interpreted by a CGI) -->
+   <exclude pattern="[preferred]/**"/>
+   <exclude pattern="[location]"/>
+
+   <!--   <include-links extension=".html"/>-->
+
+   <!--+
+       |  <uri> nodes specify the URIs that should be generated, and
+       |  where required, what should be done with the generated pages.
+       |  They describe the way the URI of the generated file is created
+       |  from the source page's URI. There are three ways that a generated
+       |  file URI can be created: append, replace and insert.
+       |
+       |  The "type" attribute specifies one of (append|replace|insert):
+       |
+       |  append:
+       |  Append the generated page's URI to the end of the source URI:
+       |
+       |   <uri type="append" src-prefix="documents/" src="index.html"
+       |   dest="build/dest/"/>
+       |
+       |  This means that
+       |   (1) the "documents/index.html" page is generated
+       |   (2) the file will be written to "build/dest/documents/index.html"
+       |
+       |  replace:
+       |  Completely ignore the generated page's URI - just
+       |  use the destination URI:
+       |
+       |   <uri type="replace" src-prefix="documents/" src="index.html"
+       |   dest="build/dest/docs.html"/>
+       |
+       |  This means that
+       |   (1) the "documents/index.html" page is generated
+       |   (2) the result is written to "build/dest/docs.html"
+       |   (3) this works only for "single" pages - and not when links
+       |       are followed
+       |
+       |  insert:
+       |  Insert generated page's URI into the destination
+       |  URI at the point marked with a * (example uses fictional
+       |  zip protocol)
+       |
+       |   <uri type="insert" src-prefix="documents/" src="index.html"
+       |   dest="zip://*.zip/page.html"/>
+       |
+       |  This means that
+       |   (1)
+       |
+       |  In any of these scenarios, if the dest attribute is omitted,
+       |  the value provided globally using the <dest-dir> node will
+       |  be used instead.
+       +-->
+   <!--
+   <uri type="replace"
+        src-prefix="samples/"
+        src="hello-world/hello.html"
+        dest="build/dest/hello-world.html"/>
+   -->
+
+   <!--+
+       | <uri> nodes can be grouped together in a <uris> node. This
+       | enables a group of URIs to share properties. The following
+       | properties can be set for a group of URIs:
+       |   * follow-links:       should pages be crawled for links
+       |   * confirm-extensions: should file extensions be checked
+       |                         for the correct mime type
+       |   * src-prefix:         all source URIs should be
+       |                         pre-pended with this prefix before
+       |                         generation. The prefix is not
+       |                         included when calculating the
+       |                         destination URI
+       |   * dest:               the base destination URI to be
+       |                         shared by all pages in this group
+       |   * type:               the method to be used to calculate
+       |                         the destination URI. See above
+       |                         section on <uri> node for details.
+       |
+       | Each <uris> node can have a name attribute. When a name
+       | attribute has been specified, the -n switch on the command
+       | line can be used to tell Cocoon to only process the URIs
+       | within this URI group. When no -n switch is given, all
+       | <uris> nodes are processed. Thus, one xconf file can be
+       | used to manage multiple sites.
+       +-->
+   <!--
+   <uris name="mirrors" follow-links="false">
+     <uri type="append" src="mirrors.html"/>
+   </uris>
+   -->
+
+   <!--+
+       |  File containing URIs (plain text, one per line).
+       +-->
+   <!--
+   <uri-file>uris.txt</uri-file>
+   -->
+</cocoon>

+ 2 - 0
src/docs/src/documentation/content/xdocs/site.xml

@@ -51,6 +51,7 @@ See http://forrest.apache.org/docs/linking.html for more info.
     <wiki      label="Wiki"               href="ext:wiki" />
     <wiki      label="Wiki"               href="ext:wiki" />
     <faq       label="FAQ"                href="ext:faq" />
     <faq       label="FAQ"                href="ext:faq" />
     <lists     label="Mailing Lists"      href="ext:lists" />
     <lists     label="Mailing Lists"      href="ext:lists" />
+    <changes   label="Release Notes"      href="ext:changes" />
   </docs>
   </docs>
 
 
   <external-refs>
   <external-refs>
@@ -80,6 +81,7 @@ See http://forrest.apache.org/docs/linking.html for more info.
       <python href="http://www.python.org" />
       <python href="http://www.python.org" />
       <twisted-python href="http://twistedmatrix.com/trac/" />
       <twisted-python href="http://twistedmatrix.com/trac/" />
     </hod>
     </hod>
+    <changes href="changes.html" />
     <api href="api/">
     <api href="api/">
       <index href="index.html" />
       <index href="index.html" />
       <org href="org/">
       <org href="org/">