ソースを参照

Preparing for Hadoop 0.17.0 and adding release notes

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.17@656523 13f79535-47bb-0310-9956-ffa450edef68
Nigel Daley 17 年 前
コミット
f2c1a6c97c
35 ファイル変更1089 行追加137 行削除
  1. 1 1
      CHANGES.txt
  2. 1 1
      build.xml
  3. 68 69
      docs/changes.html
  4. 5 3
      docs/cluster_setup.html
  5. 3 3
      docs/cluster_setup.pdf
  6. 15 5
      docs/hadoop-default.html
  7. 4 1
      docs/hdfs_design.html
  8. 4 1
      docs/hdfs_permissions_guide.html
  9. 4 1
      docs/hdfs_shell.html
  10. 4 1
      docs/hdfs_user_guide.html
  11. 4 1
      docs/hod.html
  12. 4 1
      docs/hod_admin_guide.html
  13. 4 1
      docs/hod_config_guide.html
  14. 4 1
      docs/hod_user_guide.html
  15. 4 1
      docs/index.html
  16. 11 2
      docs/linkmap.html
  17. 12 12
      docs/linkmap.pdf
  18. 32 27
      docs/mapred_tutorial.html
  19. 1 1
      docs/mapred_tutorial.pdf
  20. 4 1
      docs/native_libraries.html
  21. 4 1
      docs/quickstart.html
  22. 888 0
      docs/releasenotes.html
  23. BIN
      docs/skin/images/rc-b-l-15-1body-2menu-3menu.png
  24. BIN
      docs/skin/images/rc-b-r-15-1body-2menu-3menu.png
  25. BIN
      docs/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png
  26. BIN
      docs/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png
  27. BIN
      docs/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png
  28. BIN
      docs/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png
  29. BIN
      docs/skin/images/rc-t-r-15-1body-2menu-3menu.png
  30. BIN
      docs/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png
  31. BIN
      docs/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png
  32. BIN
      docs/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png
  33. 4 1
      docs/streaming.html
  34. 1 0
      src/docs/src/documentation/conf/cli.xconf
  35. 3 1
      src/docs/src/documentation/content/xdocs/site.xml

+ 1 - 1
CHANGES.txt

@@ -1,7 +1,7 @@
 Hadoop Change Log
 
 
-Release 0.17.0 - Unreleased
+Release 0.17.0 - 2008-05-18
 
   INCOMPATIBLE CHANGES
 

+ 1 - 1
build.xml

@@ -26,7 +26,7 @@
  
   <property name="Name" value="Hadoop"/>
   <property name="name" value="hadoop"/>
-  <property name="version" value="0.17.0"/>
+  <property name="version" value="0.17.1-dev"/>
   <property name="final.name" value="${name}-${version}"/>
   <property name="year" value="2006"/>
   <property name="libhdfs.version" value="1"/>

+ 68 - 69
docs/changes.html

@@ -36,7 +36,7 @@
     function collapse() {
       for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
         var list = document.getElementsByTagName("ul")[i];
-        if (list.id != 'release_0.17.0_-_unreleased_' && list.id != 'release_0.16.3_-_2008-04-16_') {
+        if (list.id != 'release_0.17.0_-_2008-05-18_' && list.id != 'release_0.16.4_-_2008-05-05_') {
           list.style.display = "none";
         }
       }
@@ -52,12 +52,12 @@
 <a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
 <h1>Hadoop Change Log</h1>
 
-<h2><a href="javascript:toggleList('release_0.17.0_-_unreleased_')">Release 0.17.0 - Unreleased
+<h2><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_')">Release 0.17.0 - 2008-05-18
 </a></h2>
-<ul id="release_0.17.0_-_unreleased_">
-  <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(24)
-    <ol id="release_0.17.0_-_unreleased_._incompatible_changes_">
+<ul id="release_0.17.0_-_2008-05-18_">
+  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._incompatible_changes_')">  INCOMPATIBLE CHANGES
+</a>&nbsp;&nbsp;&nbsp;(26)
+    <ol id="release_0.17.0_-_2008-05-18_._incompatible_changes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2786">HADOOP-2786</a>.  Move hbase out of hadoop core
 </li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2345">HADOOP-2345</a>.  New HDFS transactions to support appending
@@ -105,11 +105,17 @@ instance sizes have also been added.<br />(Chris K Wensel via tomwhite)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2826">HADOOP-2826</a>. Deprecated FileSplit.getFile(), LineRecordReader.readLine().<br />(Amareshwari Sriramadasu via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3239">HADOOP-3239</a>. getFileInfo() returns null for non-existing files instead
 of throwing FileNotFoundException.<br />(Lohit Vijayarenu via shv)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3266">HADOOP-3266</a>. Removed HOD changes from CHANGES.txt, as they are now inside
+src/contrib/hod<br />(Hemanth Yamijala via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3280">HADOOP-3280</a>. Separate the configuration of the virtual memory size
+(mapred.child.ulimit) from the jvm heap size, so that 64 bit
+streaming applications are supported even when running with 32 bit
+jvms.<br />(acmurthy via omalley)</li>
     </ol>
   </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._new_features_')">  NEW FEATURES
+  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._new_features_')">  NEW FEATURES
 </a>&nbsp;&nbsp;&nbsp;(12)
-    <ol id="release_0.17.0_-_unreleased_._new_features_">
+    <ol id="release_0.17.0_-_2008-05-18_._new_features_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-1398">HADOOP-1398</a>.  Add HBase in-memory block cache.<br />(tomwhite)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2178">HADOOP-2178</a>.  Job History on DFS.<br />(Amareshwari Sri Ramadasu via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2063">HADOOP-2063</a>. A new parameter to dfs -get command to fetch a file
@@ -134,9 +140,9 @@ read and written to HDFS, S3, KFS, and local file systems.<br />(omalley)</li>
 and restore serializations of objects to/from strings.<br />(enis)</li>
     </ol>
   </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(32)
-    <ol id="release_0.17.0_-_unreleased_._improvements_">
+  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._improvements_')">  IMPROVEMENTS
+</a>&nbsp;&nbsp;&nbsp;(29)
+    <ol id="release_0.17.0_-_2008-05-18_._improvements_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2655">HADOOP-2655</a>. Copy on write for data and metadata files in the
 presence of snapshots. Needed for supporting appends to HDFS
 files.<br />(dhruba)</li>
@@ -157,9 +163,6 @@ to automated execution.<br />(Mukund Madhugiri via cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2908">HADOOP-2908</a>.  A document that describes the DFS Shell command.<br />(Mahadev Konar via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2981">HADOOP-2981</a>.  Update README.txt to reflect the upcoming use of
 cryptography.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2775">HADOOP-2775</a>.  Adds unit test framework for HOD.
-(Vinod Kumar Vavilapalli via ddas).
-</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2804">HADOOP-2804</a>.  Add support to publish CHANGES.txt as HTML when running
 the Ant 'docs' target.<br />(nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2559">HADOOP-2559</a>. Change DFS block placement to allocate the first replica
@@ -168,14 +171,8 @@ second.<br />(lohit vijayarenu via cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2939">HADOOP-2939</a>. Make the automated patch testing process an executable
 Ant target, test-patch.<br />(nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2239">HADOOP-2239</a>. Add HsftpFileSystem to permit transferring files over ssl.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2848">HADOOP-2848</a>. [HOD]hod -o list and deallocate works even after deleting
-the cluster directory.<br />(Hemanth Yamijala via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2899">HADOOP-2899</a>. [HOD] Cleans up hdfs:///mapredsystem directory after
-deallocation.<br />(Hemanth Yamijala via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2886">HADOOP-2886</a>.  Track individual RPC metrics.<br />(girish vaitheeswaran via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2373">HADOOP-2373</a>. Improvement in safe-mode reporting.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2796">HADOOP-2796</a>. Enables distinguishing exit codes from user code vis-a-vis
-HOD's exit code.<br />(Hemanth Yamijala via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3091">HADOOP-3091</a>. Modify FsShell command -put to accept multiple sources.<br />(Lohit Vijaya Renu via cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3092">HADOOP-3092</a>. Show counter values from job -status command.<br />(Tom White via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-1228">HADOOP-1228</a>.  Ant task to generate Eclipse project files.<br />(tomwhite)</li>
@@ -194,11 +191,12 @@ as Hudson generates false negatives under the current load.<br />(Nigel Daley vi
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3174">HADOOP-3174</a>. Illustrative example for MultipleFileInputFormat.<br />(Enis
 Soztutar via acmurthy)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2993">HADOOP-2993</a>. Clarify the usage of JAVA_HOME in the Quick Start guide.<br />(acmurthy via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3124">HADOOP-3124</a>. Make DataNode socket write timeout configurable.<br />(rangadi)</li>
     </ol>
   </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._optimizations_')">  OPTIMIZATIONS
+  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._optimizations_')">  OPTIMIZATIONS
 </a>&nbsp;&nbsp;&nbsp;(12)
-    <ol id="release_0.17.0_-_unreleased_._optimizations_">
+    <ol id="release_0.17.0_-_2008-05-18_._optimizations_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2790">HADOOP-2790</a>.  Fixed inefficient method hasSpeculativeTask by removing
 repetitive calls to get the current time and late checking to see if
 we want speculation on at all.<br />(omalley)</li>
@@ -233,9 +231,9 @@ exponentially increasing number of records (up to 10,000
 records/log).<br />(Zheng Shao via omalley)</li>
     </ol>
   </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(99)
-    <ol id="release_0.17.0_-_unreleased_._bug_fixes_">
+  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(102)
+    <ol id="release_0.17.0_-_2008-05-18_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
 errors.<br />(Mahadev Konar via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2190">HADOOP-2190</a>. bring behaviour '-ls' and '-du' closer to Linux shell
@@ -316,10 +314,6 @@ with different sizes to the namenode, the namenode picks the
 replica(s) with the largest size as the only valid replica(s).<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2825">HADOOP-2825</a>. Deprecated MapOutputLocation.getFile() is removed.<br />(Amareshwari Sri Ramadasu via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2806">HADOOP-2806</a>. Fixes a streaming document.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2924">HADOOP-2924</a>. Fixes an address problem to do with TaskTracker binding
-to an address.<br />(Vinod Kumar Vavilapalli via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2970">HADOOP-2970</a>. Fixes a problem to do with Wrong class definition for
-hodlib/Hod/hod.py for Python &lt; 2.5.1.<br />(Vinod Kumar Vavilapalli via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3008">HADOOP-3008</a>. SocketIOWithTimeout throws InterruptedIOException if the
 thread is interrupted while it is waiting.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3006">HADOOP-3006</a>. Fix wrong packet size reported by DataNode when a block
@@ -330,10 +324,6 @@ it detects a bad connection to another datanode in the pipeline.<br />(dhruba)</
 checksum reservation fails.<br />(Devaraj Das via cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3036">HADOOP-3036</a>. Fix findbugs warnings in UpgradeUtilities.<br />(Konstantin
 Shvachko via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2783">HADOOP-2783</a>. Fixes a problem to do with import in
-hod/hodlib/Common/xmlrpc.py.<br />(Vinod Kumar Vavilapalli via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2936">HADOOP-2936</a>. Fixes HOD in a way that it generates hdfs://host:port on the
-client side configs.<br />(Vinod Kumar Vavilapalli via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3025">HADOOP-3025</a>. ChecksumFileSystem supports the delete method with
 the recursive flag.<br />(Mahadev Konar via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3012">HADOOP-3012</a>. dfs -mv file to user home directory throws exception if
@@ -344,8 +334,6 @@ safe mode<br />(jimk)</li>
 is set as empty.<br />(Amareshwari Sriramadasu via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3080">HADOOP-3080</a>. Removes flush calls from JobHistory.<br />(Amareshwari Sriramadasu via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3086">HADOOP-3086</a>. Adds the testcase missed during commit of hadoop-3040.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2983">HADOOP-2983</a>. [HOD] Fixes the problem - local_fqdn() returns None when
-gethostbyname_ex doesnt return any FQDNs.<br />(Craig Macdonald via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3046">HADOOP-3046</a>. Fix the raw comparators for Text and BytesWritables
 to use the provided length rather than recompute it.<br />(omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3094">HADOOP-3094</a>. Fix BytesWritable.toString to avoid extending the sign bit<br />(Owen O'Malley via cdouglas)</li>
@@ -353,7 +341,6 @@ to use the provided length rather than recompute it.<br />(omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3073">HADOOP-3073</a>. close() on SocketInputStream or SocketOutputStream should
 close the underlying channel.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3087">HADOOP-3087</a>. Fixes a problem to do with refreshing of loadHistory.jsp.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2982">HADOOP-2982</a>. Fixes a problem in the way HOD looks for free nodes.<br />(Hemanth Yamijala via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3065">HADOOP-3065</a>. Better logging message if the rack location of a datanode
 cannot be determined.<br />(Devaraj Das via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3064">HADOOP-3064</a>. Commas in a file path should not be treated as delimiters.<br />(Hairong Kuang via shv)</li>
@@ -371,8 +358,6 @@ first key arrives.<br />(Rick Cox via tomwhite)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3146">HADOOP-3146</a>. A DFSOutputStream.flush method is renamed as
 DFSOutputStream.fsync.<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3165">HADOOP-3165</a>. -put/-copyFromLocal did not treat input file "-" as stdin.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
-already exists.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3041">HADOOP-3041</a>. Deprecate JobConf.setOutputPath and JobConf.getOutputPath.
 Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats
 extending OutputFormatBase, now extend FileOutputFormat. Add the following
@@ -414,11 +399,49 @@ file.<br />(cdouglas via acmurthy)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3256">HADOOP-3256</a>. Encodes the job name used in the filename for history files.<br />(Arun Murthy via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3162">HADOOP-3162</a>. Ensure that comma-separated input paths are treated correctly
 as multiple input paths.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3263">HADOOP-3263</a>. Ensure that the job-history log file always follows the
+pattern of hostname_timestamp_jobid_username_jobname even if username
+and/or jobname are not specfied. This helps to avoid wrong assumptions
+made about the job-history log filename in jobhistory.jsp.<br />(acmurthy)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3251">HADOOP-3251</a>. Fixes getFilesystemName in JobTracker and LocalJobRunner to
+use FileSystem.getUri instead of FileSystem.getName.<br />(Arun Murthy via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3237">HADOOP-3237</a>. Fixes TestDFSShell.testErrOutPut on Windows platform.<br />(Mahadev Konar via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3279">HADOOP-3279</a>. TaskTracker checks for SUCCEEDED task status in addition to
+COMMIT_PENDING status when it fails maps due to lost map.<br />(Devaraj Das)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3286">HADOOP-3286</a>. Prevent collisions in gridmix output dirs by increasing the
+granularity of the timestamp.<br />(Runping Qi via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3285">HADOOP-3285</a>. Fix input split locality when the splits align to
+fs blocks.<br />(omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3372">HADOOP-3372</a>. Fix heap management in streaming tests.<br />(Arun Murthy via
+cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3031">HADOOP-3031</a>. Fix javac warnings in test classes.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3382">HADOOP-3382</a>. Fix memory leak when files are not cleanly closed<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3322">HADOOP-3322</a>. Fix to push MetricsRecord for rpc metrics.<br />(Eric Yang via
+mukund)</li>
     </ol>
   </li>
 </ul>
-<h2><a href="javascript:toggleList('release_0.16.3_-_2008-04-16_')">Release 0.16.3 - 2008-04-16
+<h2><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_')">Release 0.16.4 - 2008-05-05
 </a></h2>
+<ul id="release_0.16.4_-_2008-05-05_">
+  <li><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_._bug_fixes_')">  BUG FIXES
+</a>&nbsp;&nbsp;&nbsp;(3)
+    <ol id="release_0.16.4_-_2008-05-05_._bug_fixes_">
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
+already exists.<br />(rangadi via mukund)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3294">HADOOP-3294</a>. Fix distcp to check the destination length and retry the copy
+if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3186">HADOOP-3186</a>. Fix incorrect permission checkding for mv and renameTo
+in HDFS. (Tsz Wo (Nicholas), SZE via mukund)
+</li>
+    </ol>
+  </li>
+</ul>
+<h2><a href="javascript:toggleList('older')">Older Releases</a></h2>
+<ul id="older">
+<h3><a href="javascript:toggleList('release_0.16.3_-_2008-04-16_')">Release 0.16.3 - 2008-04-16
+</a></h3>
 <ul id="release_0.16.3_-_2008-04-16_">
   <li><a href="javascript:toggleList('release_0.16.3_-_2008-04-16_._bug_fixes_')">  BUG FIXES
 </a>&nbsp;&nbsp;&nbsp;(7)
@@ -445,13 +468,11 @@ from 733 to ensure sharing of HOD clusters works correctly. (Tsz Wo
     </ol>
   </li>
 </ul>
-<h2><a href="javascript:toggleList('older')">Older Releases</a></h2>
-<ul id="older">
 <h3><a href="javascript:toggleList('release_0.16.2_-_2008-04-02_')">Release 0.16.2 - 2008-04-02
 </a></h3>
 <ul id="release_0.16.2_-_2008-04-02_">
   <li><a href="javascript:toggleList('release_0.16.2_-_2008-04-02_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(19)
+</a>&nbsp;&nbsp;&nbsp;(18)
     <ol id="release_0.16.2_-_2008-04-02_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3011">HADOOP-3011</a>. Prohibit distcp from overwriting directories on the
 destination filesystem with files.<br />(cdouglas)</li>
@@ -483,9 +504,6 @@ via omalley)</li>
 exceptions.<br />(Koji Noguchi via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3084">HADOOP-3084</a>. Fix HftpFileSystem to work for zero-lenghth files.<br />(cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3107">HADOOP-3107</a>. Fix NPE when fsck invokes getListings.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3103">HADOOP-3103</a>. [HOD] Hadoop.tmp.dir should not be set to cluster
-directory. (Vinod Kumar Vavilapalli via ddas).
-</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3104">HADOOP-3104</a>. Limit MultithreadedMapRunner to have a fixed length queue
 between the RecordReader and the map threads.<br />(Alejandro Abdelnur via
 omalley)</li>
@@ -501,10 +519,8 @@ DistributedFileSystem.<br />(shv via nigel)</li>
 </a></h3>
 <ul id="release_0.16.1_-_2008-03-13_">
   <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(2)
+</a>&nbsp;&nbsp;&nbsp;(1)
     <ol id="release_0.16.1_-_2008-03-13_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2861">HADOOP-2861</a>. Improve the user interface for the HOD commands.
-Command line structure has changed.<br />(Hemanth Yamijala via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2869">HADOOP-2869</a>. Deprecate SequenceFile.setCompressionType in favor of
 SequenceFile.createWriter, SequenceFileOutputFormat.setCompressionType,
 and JobConf.setMapOutputCompressionType. (Arun C Murthy via cdouglas)
@@ -514,18 +530,15 @@ Configuration changes to hadoop-default.xml:
     </ol>
   </li>
   <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(4)
+</a>&nbsp;&nbsp;&nbsp;(2)
     <ol id="release_0.16.1_-_2008-03-13_._improvements_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2371">HADOOP-2371</a>. User guide for file permissions in HDFS.<br />(Robert Chansler via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2730">HADOOP-2730</a>. HOD documentation update.<br />(Vinod Kumar Vavilapalli via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2911">HADOOP-2911</a>. Make the information printed by the HOD allocate and
-info commands less verbose and clearer.<br />(Vinod Kumar via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3098">HADOOP-3098</a>. Allow more characters in user and group names while
 using -chown and -chgrp commands.<br />(rangadi)</li>
     </ol>
   </li>
   <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(35)
+</a>&nbsp;&nbsp;&nbsp;(31)
     <ol id="release_0.16.1_-_2008-03-13_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2789">HADOOP-2789</a>. Race condition in IPC Server Responder that could close
 connections early.<br />(Raghu Angadi)</li>
@@ -564,8 +577,6 @@ groups.<br />(runping via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2843">HADOOP-2843</a>. Fix protections on map-side join classes to enable derivation.<br />(cdouglas via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2840">HADOOP-2840</a>. Fix gridmix scripts to correctly invoke the java sort through
 the proper jar.<br />(Mukund Madhugiri via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2766">HADOOP-2766</a>. Enables setting of HADOOP_OPTS env variable for the hadoop
-daemons through HOD.<br />(Vinod Kumar Vavilapalli via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2769">HADOOP-2769</a>.  TestNNThroughputBnechmark should not use a fixed port for
 the namenode http port.<br />(omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2852">HADOOP-2852</a>. Update gridmix benchmark to avoid an artifically long tail.<br />(cdouglas)</li>
@@ -578,18 +589,12 @@ JobTracker upon reinitialization. (Owen O'Malley via ddas).
 "No lease on file" can be diagnosed.<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2923">HADOOP-2923</a>.  Add SequenceFileAsBinaryInputFormat, which was
 missed in the commit for <a href="http://issues.apache.org/jira/browse/HADOOP-2603">HADOOP-2603</a>.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2847">HADOOP-2847</a>.  Ensure idle cluster cleanup works even if the JobTracker
-becomes unresponsive to RPC calls.<br />(Hemanth Yamijala via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2809">HADOOP-2809</a>.  Fix HOD syslog config syslog-address so that it works.<br />(Hemanth Yamijala via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2931">HADOOP-2931</a>. IOException thrown by DFSOutputStream had wrong stack
 trace in some cases.<br />(Michael Bieniosek via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2883">HADOOP-2883</a>. Write failures and data corruptions on HDFS files.
 The write timeout is back to what it was on 0.15 release. Also, the
 datnodes flushes the block file buffered output stream before
 sending a positive ack for the packet back to the client.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2925">HADOOP-2925</a>. Fix HOD to create the mapred system directory using a
-naming convention that will avoid clashes in multi-user shared
-cluster scenario.<br />(Hemanth Yamijala via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2756">HADOOP-2756</a>. NPE in DFSClient while closing DFSOutputStreams
 under load.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2958">HADOOP-2958</a>. Fixed FileBench which broke due to <a href="http://issues.apache.org/jira/browse/HADOOP-2391">HADOOP-2391</a> which performs
@@ -682,7 +687,7 @@ with previous versions). (Tsz Wo (Nicholas), SZE via dhruba)
     </ol>
   </li>
   <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(14)
+</a>&nbsp;&nbsp;&nbsp;(13)
     <ol id="release_0.16.0_-_2008-02-07_._new_features_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-1857">HADOOP-1857</a>.  Ability to run a script when a task fails to capture stack
 traces.<br />(Amareshwari Sri Ramadasu via ddas)</li>
@@ -691,8 +696,6 @@ Unix users and groups.<br />(Hairong Kuang via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-1652">HADOOP-1652</a>.  A utility to balance data among datanodes in a HDFS cluster.<br />(Hairong Kuang via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2085">HADOOP-2085</a>.  A library to support map-side joins of consistently
 partitioned and sorted data sets.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1301">HADOOP-1301</a>.  Hadoop-On-Demand (HOD): resource management
-provisioning for Hadoop.<br />(Hemanth Yamijala via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2336">HADOOP-2336</a>. Shell commands to modify file permissions.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-1298">HADOOP-1298</a>. Implement file permissions for HDFS.
 (Tsz Wo (Nicholas) &amp; taton via cutting)
@@ -858,7 +861,7 @@ the map task.<br />(Amar Kamat via ddas)</li>
     </ol>
   </li>
   <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(92)
+</a>&nbsp;&nbsp;&nbsp;(90)
     <ol id="release_0.16.0_-_2008-02-07_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2583">HADOOP-2583</a>.  Fixes a bug in the Eclipse plug-in UI to edit locations.
 Plug-in version is now synchronized with Hadoop version.
@@ -1040,8 +1043,6 @@ alive and responding.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
 request was timing out.<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2576">HADOOP-2576</a>. Namenode performance degradation over time triggered by
 large heartbeat interval.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2720">HADOOP-2720</a>. Jumbo bug fix patch to HOD.  Final sync of Apache SVN with
-internal Yahoo SVN.<br />(Hemanth Yamijala via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2713">HADOOP-2713</a>. TestDatanodeDeath failed on windows because the replication
 request was timing out.<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2639">HADOOP-2639</a>. Fixes a problem to do with incorrect maintenance of values
@@ -1054,8 +1055,6 @@ profiling.<br />(Amareshwari Sri Ramadasu via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2732">HADOOP-2732</a>. Fix bug in path globbing.<br />(Hairong Kuang via nigel)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>. Fix backwards compatability with hadoop-0.15 configuration
 files that was broken by <a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a>.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2740">HADOOP-2740</a>. Fix HOD to work with the configuration variables changed in
-<a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>.<br />(Hemanth Yamijala via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2755">HADOOP-2755</a>. Fix fsck performance degradation because of permissions
 issue.  (Tsz Wo (Nicholas), SZE via dhruba)
 </li>

+ 5 - 3
docs/cluster_setup.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>
@@ -511,8 +514,7 @@ document.write("Last Published: " + document.lastModified);
 <td colspan="1" rowspan="1">mapred.child.java.opts</td>
                     <td colspan="1" rowspan="1">-Xmx512M</td>
                     <td colspan="1" rowspan="1">
-                      Larger heap-size for child jvms of maps/reduces. Also controls the amount 
-                      of virtual memory that a streaming/pipes task gets.
+                      Larger heap-size for child jvms of maps/reduces. 
                     </td>
                   
 </tr>

ファイルの差分が大きいため隠しています
+ 3 - 3
docs/cluster_setup.pdf


+ 15 - 5
docs/hadoop-default.html

@@ -121,9 +121,7 @@ creations/deletions), or "all".</td>
 </tr>
 <tr>
 <td><a name="fs.checkpoint.dir">fs.checkpoint.dir</a></td><td>${hadoop.tmp.dir}/dfs/namesecondary</td><td>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images and edits to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
+      name node should store the temporary images and edits to merge.  
   </td>
 </tr>
 <tr>
@@ -455,8 +453,20 @@ creations/deletions), or "all".</td>
   For example, to enable verbose gc logging to a file named for the taskid in
   /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
         -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-  The value of -Xmx will also directly influence the amount of virtual memory
-  that a streaming/pipes task gets during execution.
+  
+  The configuration variable mapred.child.ulimit can be used to control the
+  maximum virtual memory of the child processes. 
+  </td>
+</tr>
+<tr>
+<td><a name="mapred.child.ulimit">mapred.child.ulimit</a></td><td></td><td>The maximum virtual memory, in KB, of a process launched by the 
+  Map-Reduce framework. This can be used to control both the Mapper/Reducer 
+  tasks and applications using Hadoop Pipes, Hadoop Streaming etc. 
+  By default it is left unspecified to let cluster admins control it via 
+  limits.conf and other such relevant mechanisms.
+  
+  Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
+  JavaVM, else the VM might not start. 
   </td>
 </tr>
 <tr>

+ 4 - 1
docs/hdfs_design.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hdfs_permissions_guide.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hdfs_shell.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hdfs_user_guide.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hod.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hod_admin_guide.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hod_config_guide.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/hod_user_guide.html

@@ -152,7 +152,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/index.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit">

+ 11 - 2
docs/linkmap.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>
@@ -287,7 +290,13 @@ document.write("Last Published: " + document.lastModified);
     
 <ul>
 <li>
-<a href="changes.html">Release Notes</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>changes</em>
+<a href="releasenotes.html">Release Notes</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>relnotes</em>
+</li>
+</ul>
+    
+<ul>
+<li>
+<a href="changes.html">All Changes</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>changes</em>
 </li>
 </ul>
   

+ 12 - 12
docs/linkmap.pdf

@@ -5,10 +5,10 @@
 /Producer (FOP 0.20.5) >>
 endobj
 5 0 obj
-<< /Length 1008 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1036 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gatn&?#SI?'Sc)J.t$#VPQ:u)?`*4rDX)F$m:)V5C6KLbYu;UNg:n^fBah+K7rAthiIH=4#CI"$NYdm41Z-(Q5]<ot^ud;A5]8MG*n4QX784jK&GRgK(2NC5_-AI0kT/V)e0>4]14NEt:"QZ.pR*mF/A(UIOXlQplHoFg/UhAEl60u.*B>#@J3dZ*(9+AR(UP&?YhQg+D@)81,Y_L>U,1e<]ubr5H7EW54ARLSAP#n%XCG7s!pnQ:%m,b9'Q2Um0W+;4(;XTc."d&_bk?uC7FB>5_H-I#gX".gHfGYB#Xf8J?(*eI6X\J1&-O\/Y`B>NP<UYYHEH'D=0fofUA;_dBXU`8`:!j3@SI*HqR6^sl:I49;EU#;U-A&YF#AiGdigK&6.ePJ;p4s0;S5uhfr/%.>NO7Q4$Z;&^WW?QF=13s.8b6h4r:4?*Ho.,]4U^U`QEEFJn1[8e5q`EVmCY=3%a/i7rk)QUHtX\0G:ssBsJ]g6:,@(I^kn4.skR:eM?q#/cgnd=+!6f-E6aVSYm[T5)km0(1:3l5@2EW1Weal(?J_#Q/7@t:SK2MIL(Z\_d:iA)"_Fm"lC<UMPQ97o4:e"U6IbZ9a[BZ-*FC7qFZkYZa-eod4Y+;ra^$:%\^"Q0(2G>8l:QP`t\;Pgk")oS4(&Y2clj6CRM@iZH)f*G"&FVlO[8>3^$pKC([Z?Y'heNVUcS%"r\Cg=$8//=@r^CZYXsdh;W4rjfqI9gLJD=iu:!LLEk<<9u#bA*"CCKPO&1XFZ75Pi80E\9lE4O@ln&ga'fllWV!%'"dbWKJmH5oCA\GJFKr#n)+QD;*etkdd[LcV+qqVpG+$N?NSdV2J@($"UDTmk?\oTCYD^M4H^/=2EWcIU:li!;&7Fdg_HTcQ-HF@G#jZK7]`NmI+Omt'Tr&<0e;J]>"!_ncD#T]?Q)a7iF,)40/.>rp?&;JF68[NffXBt^;EaX"lBrB0T!ZHpYT>TuZnYbOb-H^R3hA_@\uG]7Ul%C[~>
+Gatn&?#Q2d'Sc)J/%BCKlIj08E1+0#\uFRqg;Q*I/i^g"JJ:eYrVIJ&#.kTrU.qH%+6'qo&*MS/L[j`SM=4jSTGK7oYl.4,d(>nn&*bh=U3;J-#^dD6n]LaB[ua5KHNuM9ouu9\/07i>h%.I%0"9udA`#[$$RH-[msBPspkMnI*l:D"OeY2B(aMR'c=Gb"T^Bsph<'-,6XVrEd^3"$qG'qkf""W.81j3M02bG7=]2Z-RRB#u!*aV'^eQ*cKcB_iN9NE^7BgnBAIsrieXl?=/DIR/fEqS+qq2luD[bV*JLJC<8[HEF#g?HXJcLN8p&RKln],4hXRpl"8s[Fq1nXOIbn`ppjLnqdX<!s9rsk]-5)JPrMW`)\e-A0HY"#rZ3^s<.#RAGR)jrh<MH=OR)`8\C<?<`u,BP4'q^d%^GEb%o&lg%3$_a$2@HZs\CVAmZf#H\d,U7`5^4JI15$@N2o.%k(,^3BN))R3N,GD0,V9ABO,_QZiLWfM"[f-')Om**i$"0".ik+0X8\Gf*+qo8$BtOkY!5%O.nNo.$8M9IsW)g4,ohJJ9&iK4P6SNU"G,"]J?nfLDq>0#=UeP<h06*c09(I?b.;Zng\@VP#X$f"ob/1t_]?iY:PJt'qKJrcbGusN.ML%$;VSLgC%`GO.,T=n>KrP8_al<:EH`Z[G=/>8dNj3`#1D%!uS9S3/m:3CVGE%GfTV2s@e['9$=!BVkAb2tk(128<:N\,SN\*b[YN7/QNmM/lrK7\31/R9`fD9Q$DN#sYK,J_c^]#@9D1'GIa(b0?ALJ#5\]db\C_O6XdpHLU0Dg'^E>3J(E=C8CKDBI2EkuO,QanNpoGtm]89]&p'J#:^`on6e,/]7F+,<?(0CD%iaXCPU\9c8Tc0hj?na*M2\@Ur+_#OVf,FpjuR!P%eQF#o1@9][=4V'H)>)38IXZ?p![m`d"a9OOU6:^/6?R>.seQ6.fR%h<M+UkHh/[NU.q3i[SIo.iK1U!mBPHlXcXs="uA&`ekdqRE7?g+Q<P=S!So`(p7B)V~>
 endstream
 endobj
 6 0 obj
@@ -72,17 +72,17 @@ endobj
 xref
 0 12
 0000000000 65535 f 
-0000001832 00000 n 
-0000001890 00000 n 
-0000001940 00000 n 
+0000001860 00000 n 
+0000001918 00000 n 
+0000001968 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
-0000001171 00000 n 
-0000001277 00000 n 
-0000001389 00000 n 
-0000001498 00000 n 
-0000001608 00000 n 
-0000001716 00000 n 
+0000001199 00000 n 
+0000001305 00000 n 
+0000001417 00000 n 
+0000001526 00000 n 
+0000001636 00000 n 
+0000001744 00000 n 
 trailer
 <<
 /Size 12
@@ -90,5 +90,5 @@ trailer
 /Info 4 0 R
 >>
 startxref
-2060
+2088
 %%EOF

+ 32 - 27
docs/mapred_tutorial.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>
@@ -292,7 +295,7 @@ document.write("Last Published: " + document.lastModified);
 <a href="#Example%3A+WordCount+v2.0">Example: WordCount v2.0</a>
 <ul class="minitoc">
 <li>
-<a href="#Source+Code-N10C7E">Source Code</a>
+<a href="#Source+Code-N10C84">Source Code</a>
 </li>
 <li>
 <a href="#Sample+Runs">Sample Runs</a>
@@ -1531,6 +1534,8 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag">&lt;/property&gt;</span>
         
 </p>
+<p>Users/admins can also specify the maximum virtual memory 
+        of the launched child-task using <span class="codefrag">mapred.child.ulimit</span>.</p>
 <p>When the job starts, the localized job directory
         <span class="codefrag"> ${mapred.local.dir}/taskTracker/jobcache/$jobid/</span>
         has the following directories: </p>
@@ -1585,7 +1590,7 @@ document.write("Last Published: " + document.lastModified);
         loaded via <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#loadLibrary(java.lang.String)">
         System.loadLibrary</a> or <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#load(java.lang.String)">
         System.load</a>.</p>
-<a name="N108F2"></a><a name="Job+Submission+and+Monitoring"></a>
+<a name="N108F8"></a><a name="Job+Submission+and+Monitoring"></a>
 <h3 class="h4">Job Submission and Monitoring</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/JobClient.html">
@@ -1646,7 +1651,7 @@ document.write("Last Published: " + document.lastModified);
 <p>Normally the user creates the application, describes various facets 
         of the job via <span class="codefrag">JobConf</span>, and then uses the 
         <span class="codefrag">JobClient</span> to submit the job and monitor its progress.</p>
-<a name="N10952"></a><a name="Job+Control"></a>
+<a name="N10958"></a><a name="Job+Control"></a>
 <h4>Job Control</h4>
 <p>Users may need to chain map-reduce jobs to accomplish complex
           tasks which cannot be done via a single map-reduce job. This is fairly
@@ -1682,7 +1687,7 @@ document.write("Last Published: " + document.lastModified);
             </li>
           
 </ul>
-<a name="N1097C"></a><a name="Job+Input"></a>
+<a name="N10982"></a><a name="Job+Input"></a>
 <h3 class="h4">Job Input</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputFormat.html">
@@ -1730,7 +1735,7 @@ document.write("Last Published: " + document.lastModified);
         appropriate <span class="codefrag">CompressionCodec</span>. However, it must be noted that
         compressed files with the above extensions cannot be <em>split</em> and 
         each compressed file is processed in its entirety by a single mapper.</p>
-<a name="N109E6"></a><a name="InputSplit"></a>
+<a name="N109EC"></a><a name="InputSplit"></a>
 <h4>InputSplit</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputSplit.html">
@@ -1744,7 +1749,7 @@ document.write("Last Published: " + document.lastModified);
           FileSplit</a> is the default <span class="codefrag">InputSplit</span>. It sets 
           <span class="codefrag">map.input.file</span> to the path of the input file for the
           logical split.</p>
-<a name="N10A0B"></a><a name="RecordReader"></a>
+<a name="N10A11"></a><a name="RecordReader"></a>
 <h4>RecordReader</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordReader.html">
@@ -1756,7 +1761,7 @@ document.write("Last Published: " + document.lastModified);
           for processing. <span class="codefrag">RecordReader</span> thus assumes the 
           responsibility of processing record boundaries and presents the tasks 
           with keys and values.</p>
-<a name="N10A2E"></a><a name="Job+Output"></a>
+<a name="N10A34"></a><a name="Job+Output"></a>
 <h3 class="h4">Job Output</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/OutputFormat.html">
@@ -1781,7 +1786,7 @@ document.write("Last Published: " + document.lastModified);
 <p>
 <span class="codefrag">TextOutputFormat</span> is the default 
         <span class="codefrag">OutputFormat</span>.</p>
-<a name="N10A57"></a><a name="Task+Side-Effect+Files"></a>
+<a name="N10A5D"></a><a name="Task+Side-Effect+Files"></a>
 <h4>Task Side-Effect Files</h4>
 <p>In some applications, component tasks need to create and/or write to
           side-files, which differ from the actual job-output files.</p>
@@ -1820,7 +1825,7 @@ document.write("Last Published: " + document.lastModified);
 <p>The entire discussion holds true for maps of jobs with 
            reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
            goes directly to HDFS.</p>
-<a name="N10A9F"></a><a name="RecordWriter"></a>
+<a name="N10AA5"></a><a name="RecordWriter"></a>
 <h4>RecordWriter</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordWriter.html">
@@ -1828,9 +1833,9 @@ document.write("Last Published: " + document.lastModified);
           pairs to an output file.</p>
 <p>RecordWriter implementations write the job outputs to the 
           <span class="codefrag">FileSystem</span>.</p>
-<a name="N10AB6"></a><a name="Other+Useful+Features"></a>
+<a name="N10ABC"></a><a name="Other+Useful+Features"></a>
 <h3 class="h4">Other Useful Features</h3>
-<a name="N10ABC"></a><a name="Counters"></a>
+<a name="N10AC2"></a><a name="Counters"></a>
 <h4>Counters</h4>
 <p>
 <span class="codefrag">Counters</span> represent global counters, defined either by 
@@ -1844,7 +1849,7 @@ document.write("Last Published: " + document.lastModified);
           Reporter.incrCounter(Enum, long)</a> in the <span class="codefrag">map</span> and/or 
           <span class="codefrag">reduce</span> methods. These counters are then globally 
           aggregated by the framework.</p>
-<a name="N10AE7"></a><a name="DistributedCache"></a>
+<a name="N10AED"></a><a name="DistributedCache"></a>
 <h4>DistributedCache</h4>
 <p>
 <a href="api/org/apache/hadoop/filecache/DistributedCache.html">
@@ -1877,7 +1882,7 @@ document.write("Last Published: " + document.lastModified);
           <a href="api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
           DistributedCache.createSymlink(Configuration)</a> api. Files 
           have <em>execution permissions</em> set.</p>
-<a name="N10B25"></a><a name="Tool"></a>
+<a name="N10B2B"></a><a name="Tool"></a>
 <h4>Tool</h4>
 <p>The <a href="api/org/apache/hadoop/util/Tool.html">Tool</a> 
           interface supports the handling of generic Hadoop command-line options.
@@ -1917,7 +1922,7 @@ document.write("Last Published: " + document.lastModified);
             </span>
           
 </p>
-<a name="N10B57"></a><a name="IsolationRunner"></a>
+<a name="N10B5D"></a><a name="IsolationRunner"></a>
 <h4>IsolationRunner</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/IsolationRunner.html">
@@ -1941,7 +1946,7 @@ document.write("Last Published: " + document.lastModified);
 <p>
 <span class="codefrag">IsolationRunner</span> will run the failed task in a single 
           jvm, which can be in the debugger, over precisely the same input.</p>
-<a name="N10B8A"></a><a name="Debugging"></a>
+<a name="N10B90"></a><a name="Debugging"></a>
 <h4>Debugging</h4>
 <p>Map/Reduce framework provides a facility to run user-provided 
           scripts for debugging. When map/reduce task fails, user can run 
@@ -1952,7 +1957,7 @@ document.write("Last Published: " + document.lastModified);
 <p> In the following sections we discuss how to submit debug script
           along with the job. For submitting debug script, first it has to
           distributed. Then the script has to supplied in Configuration. </p>
-<a name="N10B96"></a><a name="How+to+distribute+script+file%3A"></a>
+<a name="N10B9C"></a><a name="How+to+distribute+script+file%3A"></a>
 <h5> How to distribute script file: </h5>
 <p>
           To distribute  the debug script file, first copy the file to the dfs.
@@ -1975,7 +1980,7 @@ document.write("Last Published: " + document.lastModified);
           <a href="api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
           DistributedCache.createSymLink(Configuration) </a> api.
           </p>
-<a name="N10BAF"></a><a name="How+to+submit+script%3A"></a>
+<a name="N10BB5"></a><a name="How+to+submit+script%3A"></a>
 <h5> How to submit script: </h5>
 <p> A quick way to submit debug script is to set values for the 
           properties "mapred.map.task.debug.script" and 
@@ -1999,17 +2004,17 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag">$script $stdout $stderr $syslog $jobconf $program </span>  
           
 </p>
-<a name="N10BD1"></a><a name="Default+Behavior%3A"></a>
+<a name="N10BD7"></a><a name="Default+Behavior%3A"></a>
 <h5> Default Behavior: </h5>
 <p> For pipes, a default script is run to process core dumps under
           gdb, prints stack trace and gives info about running threads. </p>
-<a name="N10BDC"></a><a name="JobControl"></a>
+<a name="N10BE2"></a><a name="JobControl"></a>
 <h4>JobControl</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
           JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
           and their dependencies.</p>
-<a name="N10BE9"></a><a name="Data+Compression"></a>
+<a name="N10BEF"></a><a name="Data+Compression"></a>
 <h4>Data Compression</h4>
 <p>Hadoop Map-Reduce provides facilities for the application-writer to
           specify compression for both intermediate map-outputs and the
@@ -2023,7 +2028,7 @@ document.write("Last Published: " + document.lastModified);
           codecs for reasons of both performance (zlib) and non-availability of
           Java libraries (lzo). More details on their usage and availability are
           available <a href="native_libraries.html">here</a>.</p>
-<a name="N10C09"></a><a name="Intermediate+Outputs"></a>
+<a name="N10C0F"></a><a name="Intermediate+Outputs"></a>
 <h5>Intermediate Outputs</h5>
 <p>Applications can control compression of intermediate map-outputs
             via the 
@@ -2044,7 +2049,7 @@ document.write("Last Published: " + document.lastModified);
             <a href="api/org/apache/hadoop/mapred/JobConf.html#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)">
             JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)</a> 
             api.</p>
-<a name="N10C35"></a><a name="Job+Outputs"></a>
+<a name="N10C3B"></a><a name="Job+Outputs"></a>
 <h5>Job Outputs</h5>
 <p>Applications can control compression of job-outputs via the
             <a href="api/org/apache/hadoop/mapred/OutputFormatBase.html#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)">
@@ -2064,7 +2069,7 @@ document.write("Last Published: " + document.lastModified);
 </div>
 
     
-<a name="N10C64"></a><a name="Example%3A+WordCount+v2.0"></a>
+<a name="N10C6A"></a><a name="Example%3A+WordCount+v2.0"></a>
 <h2 class="h3">Example: WordCount v2.0</h2>
 <div class="section">
 <p>Here is a more complete <span class="codefrag">WordCount</span> which uses many of the
@@ -2074,7 +2079,7 @@ document.write("Last Published: " + document.lastModified);
       <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
       <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a> 
       Hadoop installation.</p>
-<a name="N10C7E"></a><a name="Source+Code-N10C7E"></a>
+<a name="N10C84"></a><a name="Source+Code-N10C84"></a>
 <h3 class="h4">Source Code</h3>
 <table class="ForrestTable" cellspacing="1" cellpadding="4">
           
@@ -3284,7 +3289,7 @@ document.write("Last Published: " + document.lastModified);
 </tr>
         
 </table>
-<a name="N113E0"></a><a name="Sample+Runs"></a>
+<a name="N113E6"></a><a name="Sample+Runs"></a>
 <h3 class="h4">Sample Runs</h3>
 <p>Sample text-files as input:</p>
 <p>
@@ -3452,7 +3457,7 @@ document.write("Last Published: " + document.lastModified);
 <br>
         
 </p>
-<a name="N114B4"></a><a name="Highlights"></a>
+<a name="N114BA"></a><a name="Highlights"></a>
 <h3 class="h4">Highlights</h3>
 <p>The second version of <span class="codefrag">WordCount</span> improves upon the 
         previous one by using some features offered by the Map-Reduce framework:

ファイルの差分が大きいため隠しています
+ 1 - 1
docs/mapred_tutorial.pdf


+ 4 - 1
docs/native_libraries.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 4 - 1
docs/quickstart.html

@@ -150,7 +150,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 888 - 0
docs/releasenotes.html

@@ -0,0 +1,888 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head>
+    <title>Hadoop 0.17.0 Release Notes</title></head>
+<body>
+<font face="sans-serif">
+    <h1>Hadoop 0.17.0 Release Notes</h1>
+
+These release notes include new developer and user facing incompatibilities, features, and major improvements.  The table below is sorted by Component.
+<ul><a name="changes">
+<h2>Changes Since Hadoop 0.16.4</h2>
+  <table border="1" width="100%" cellpadding="4">
+   <tbody><tr>
+    <td><b>Issue</b></td>
+    <td><b>Component</b></td>
+    <td><b>Notes</b></td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2828">HADOOP-2828</a>
+    </td>
+    <td>
+    conf
+    </td>
+    <td>
+    Remove these deprecated methods in
+    <tt>org.apache.hadoop.conf.Configuration</tt>:<br><tt><ul><li>
+    public Object getObject(String name) </li><li>
+    public void setObject(String name, Object value) </li><li>
+    public Object get(String name, Object defaultValue) </li><li>
+    public void set(String name, Object value)</li><li>public Iterator entries()
+    </li></ul></tt></td>
+   </tr>
+   <tr>
+    <td nowrap>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2410">HADOOP-2410</a>
+    </td>
+    <td>
+    contrib/ec2
+    </td>
+    <td>
+    The command <tt>hadoop-ec2
+    run</tt> has been replaced by <tt>hadoop-ec2 launch-cluster
+    &lt;group&gt; &lt;number of instances&gt;</tt>, and <tt>hadoop-ec2
+    start-hadoop</tt> has been removed since Hadoop is started on instance
+    start up. See <a href="http://wiki.apache.org/hadoop/AmazonEC2">http://wiki.apache.org/hadoop/AmazonEC2</a>
+    for details.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2796">HADOOP-2796</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    Added a provision to reliably detect a
+    failing script's exit code. When the HOD script option
+    returns a non-zero exit code, look for a <tt>script.exitcode</tt>
+    file written to the HOD cluster directory. If this file is present, it
+    means the script failed with the exit code given in the file.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2775">HADOOP-2775</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    Added A unit testing framework based on
+    pyunit to HOD. Developers contributing patches to HOD should now
+    contribute unit tests along with the patches when possible.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3137">HADOOP-3137</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    The HOD version is now the same as the Hadoop version.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2855">HADOOP-2855</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    HOD now handles relative
+    paths correctly for important HOD options such as the cluster directory,
+    tarball option, and script file.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2899">HADOOP-2899</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    HOD now cleans up the HOD generated mapred system directory
+    at cluster deallocation time.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2982">HADOOP-2982</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    The number of free nodes in the cluster
+    is computed using a better algorithm that filters out inconsistencies in
+    node status as reported by Torque.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2947">HADOOP-2947</a>
+    </td>
+    <td>
+    contrib/hod
+    </td>
+    <td>
+    The stdout and stderr streams of
+    daemons are redirected to files that are created under the hadoop log
+    directory. Users can now send a <tt>kill 3</tt> signal to the daemons to get stack traces
+    and thread dumps for debugging.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3168">HADOOP-3168</a>
+    </td>
+    <td>
+    contrib/streaming
+    </td>
+    <td>
+    Decreased the frequency of logging
+    in Hadoop streaming (from every 100 records to every 10,000 records).
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3040">HADOOP-3040</a>
+    </td>
+    <td>
+    contrib/streaming
+    </td>
+    <td>
+    Fixed a critical bug to restore important functionality in Hadoop streaming. If the first character on a line is
+    the separator, then an empty key is assumed and the whole line is the value.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2820">HADOOP-2820</a>
+    </td>
+    <td>
+    contrib/streaming
+    </td>
+    <td>
+    Removed these deprecated classes: <br><tt><ul><li>org.apache.hadoop.streaming.StreamLineRecordReader</li><li>org.apache.hadoop.streaming.StreamOutputFormat</li><li>org.apache.hadoop.streaming.StreamSequenceRecordReader</li></ul></tt></td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3280">HADOOP-3280</a>
+    </td>
+    <td>
+    contrib/streaming
+    </td>
+    <td>
+    Added the
+    <tt>mapred.child.ulimit</tt> configuration variable to limit the maximum virtual memory allocated to processes launched by the 
+Map-Reduce framework. This can be used to control both the Mapper/Reducer 
+tasks and applications using Hadoop pipes, Hadoop streaming etc. 
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2657">HADOOP-2657</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>Added the new API <tt>DFSOututStream.flush()</tt> to
+    flush all outstanding data to DataNodes.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2219">HADOOP-2219</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Added a new <tt>fs -count</tt> command for
+    counting the number of bytes, files, and directories under a given path. <br>
+    <br>
+    Added a new RPC <tt>getContentSummary(String path)</tt> to ClientProtocol.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2559">HADOOP-2559</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Changed DFS block placement to
+    allocate the first replica locally, the second off-rack, and the third
+    intra-rack from the second.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2758">HADOOP-2758</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Improved DataNode CPU usage by 50% while serving data to clients.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2634">HADOOP-2634</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Deprecated ClientProtocol's <tt>exists()</tt> method.  Use <tt>getFileInfo(String)</tt> instead.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2423">HADOOP-2423</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Improved <tt>FSDirectory.mkdirs(...)</tt> performance by about 50% as measured by the NNThroughputBenchmark.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3124">HADOOP-3124</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Made DataNode socket write timeout configurable, however the configuration variable is undocumented.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2470">HADOOP-2470</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Removed <tt>open()</tt> and <tt>isDir()</tt> methods from ClientProtocol without first deprecating. <br>
+    <br>
+    Remove deprecated <tt>getContentLength()</tt> from ClientProtocol.<br>
+    <br>
+    Deprecated <tt>isDirectory</tt> in DFSClient.  Use <tt>getFileStatus()</tt> instead.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2854">HADOOP-2854</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Removed deprecated method <tt>org.apache.hadoop.ipc.Server.getUserInfo()</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2239">HADOOP-2239</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Added a new FileSystem, HftpsFileSystem, that allows access to HDFS data over HTTPS.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-771">HADOOP-771</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Added a new method to <tt>FileSystem</tt> API, <tt>delete(path, boolean)</tt>, 
+    and deprecated the previous <tt>delete(path)</tt> method.
+    The new method recursively deletes files only if boolean is set to true.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3239">HADOOP-3239</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Modified <tt>org.apache.hadoop.dfs.FSDirectory.getFileInfo(String)</tt> to return null when a file is not
+    found instead of throwing FileNotFoundException.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3091">HADOOP-3091</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Enhanced <tt>hadoop dfs -put</tt> command to accept multiple
+    sources when destination is a directory.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2192">HADOOP-2192</a>
+    </td>
+    <td>
+    dfs
+    </td>
+    <td>
+    Modified <tt>hadoop dfs -mv</tt> to be closer in functionality to
+    the Linux <tt>mv</tt> command by removing unnecessary output and return
+    an error message when moving non existent files/directories.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <u1:p></u1:p><a href="https://issues.apache.org/jira/browse/HADOOP-1985">HADOOP-1985</a>
+    </td>
+    <td>
+    dfs <br>
+    mapred
+    </td>
+    <td>
+    Added rack awareness for map tasks and moves the rack resolution logic to the 
+    NameNode and JobTracker. <p> The administrator can specify a
+    loadable class given by topology.node.switch.mapping.impl to specify the
+    class implementing the logic for rack resolution. The class must implement
+    a method - resolve(List&lt;String&gt; names), where names is the list of
+    DNS-names/IP-addresses that we want resolved. The return value is a list of
+    resolved network paths of the form /foo/rack, where rack is the rackID
+    where the node belongs to and foo is the switch where multiple racks are
+    connected, and so on. The default implementation of this class is packaged
+    along with hadoop and points to org.apache.hadoop.net.ScriptBasedMapping
+    and this class loads a script that can be used for rack resolution. The
+    script location is configurable. It is specified by
+    topology.script.file.name and defaults to an empty script. In the case
+    where the script name is empty, /default-rack is returned for all
+    dns-names/IP-addresses. The loadable topology.node.switch.mapping.impl provides
+    administrators fleixibilty to define how their site's node resolution
+    should happen. <br>
+    For mapred, one can also specify the level of the cache w.r.t the number of
+    levels in the resolved network path - defaults to two. This means that the
+    JobTracker will cache tasks at the host level and at the rack level. <br>
+    Known issue: the task caching will not work with levels greater than 2
+    (beyond racks). This bug is tracked in <a href="https://issues.apache.org/jira/browse/HADOOP-3296">HADOOP-3296</a>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2063">HADOOP-2063</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Added a new option <tt>-ignoreCrc</tt> to <tt>fs -get</tt> and <tt>fs -copyToLocal</tt>.  The option causes CRC checksums to be
+    ignored for this command so that corrupt files may be downloaded.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3001">HADOOP-3001</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Added a new Map/Reduce framework
+    counters that track the number of bytes read and written to HDFS, local,
+    KFS, and S3 file systems.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2027">HADOOP-2027</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Added a new FileSystem method <tt>getFileBlockLocations</tt> to return the number of bytes in each block in a file
+    via a single rpc to the NameNode. Deprecated <tt>getFileCacheHints</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2839">HADOOP-2839</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Removed deprecated method <tt>org.apache.hadoop.fs.FileSystem.globPaths()</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2563">HADOOP-2563</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Removed deprecated method <tt>org.apache.hadoop.fs.FileSystem.listPaths()</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-1593">HADOOP-1593</a>
+    </td>
+    <td>
+    fs
+    </td>
+    <td>
+    Modified FSShell commands to accept non-default paths. Now you can commands like <tt>hadoop dfs -ls hdfs://remotehost1:port/path</tt>
+    and <tt>hadoop dfs -ls hdfs://remotehost2:port/path</tt> without changing your Hadoop config.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3048">HADOOP-3048</a>
+    </td>
+    <td>
+    io
+    </td>
+    <td>
+    Added a new API and a default
+    implementation to convert and restore serializations of objects to strings.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3152">HADOOP-3152</a>
+    </td>
+    <td>
+    io
+    </td>
+    <td>
+    Add a static method
+    <tt>MapFile.setIndexInterval(Configuration, int interval)</tt> so that Map/Reduce
+    jobs using <tt>MapFileOutputFormat</tt> can set the index interval.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3073">HADOOP-3073</a>
+    </td>
+    <td>
+    ipc
+    </td>
+    <td>
+    <tt>SocketOutputStream.close()</tt> now closes the
+    underlying channel. This increase compatibility with
+    <tt>java.net.Socket.getOutputStream</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3041">HADOOP-3041</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Deprecated <tt>JobConf.setOutputPath</tt> and <tt>JobConf.getOutputPath</tt>.<p>
+    Deprecated <tt>OutputFormatBase</tt>. Added <tt>FileOutputFormat</tt>. Existing output
+    formats extending <tt>OutputFormatBase</tt> now extend <tt>FileOutputFormat</tt>. <p>
+    Added the following methods to <tt>FileOutputFormat</tt>:
+    <tt><ul>
+    <li>public static void setOutputPath(JobConf conf, Path outputDir)
+    <li>public static Path getOutputPath(JobConf conf)
+    <li>public static Path getWorkOutputPath(JobConf conf)
+    <li>static void setWorkOutputPath(JobConf conf, Path outputDir) 
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3204">HADOOP-3204</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Fixed <tt>ReduceTask.LocalFSMerger</tt> to handle errors and exceptions better. Prior to this all
+    exceptions except IOException would be silently ignored.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-1986">HADOOP-1986</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Programs that implement the raw
+    <tt>Mapper</tt> or <tt>Reducer</tt> interfaces will need modification to compile with this
+    release. For example, <p>
+    <pre>
+    class MyMapper implements Mapper { 
+        public void map(WritableComparable key, Writable val, 
+                    OutputCollector out, Reporter reporter) throws IOException { 
+            // ... 
+        } 
+        // ... 
+    }
+    </pre>
+    will need to be changed to refer to the parameterized type. For example: <p>
+    <pre>
+    class MyMapper implements Mapper&lt;WritableComparable, Writable, WritableComparable, Writable&gt; { 
+        public void map(WritableComparable key, Writable val, 
+                        OutputCollector&lt;WritableComparable, Writable&gt;
+                        out, Reporter reporter) throws IOException { 
+            // ... 
+        } 
+        // ... 
+    } 
+    </pre>
+    Similarly implementations of the following raw interfaces will need
+    modification:
+    <tt><ul>
+    <li>InputFormat
+    <li>OutputCollector 
+    <li>OutputFormat 
+    <li>Partitioner
+    <li>RecordReader 
+    <li>RecordWriter
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-910">HADOOP-910</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Reducers now perform merges of
+    shuffle data (both in-memory and on disk) while fetching map outputs.
+    Earlier, during shuffle they used to merge only the in-memory outputs.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2822">HADOOP-2822</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Removed the deprecated classes <tt>org.apache.hadoop.mapred.InputFormatBase</tt> 
+    and <tt>org.apache.hadoop.mapred.PhasedFileSystem</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2817">HADOOP-2817</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Removed the deprecated method 
+    <tt>org.apache.hadoop.mapred.ClusterStatus.getMaxTasks()</tt> 
+    and the deprecated configuration property <tt>mapred.tasktracker.tasks.maximum</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2825">HADOOP-2825</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Removed the deprecated method
+    <tt>org.apache.hadoop.mapred.MapOutputLocation.getFile(FileSystem fileSys, Path
+    localFilename, int reduce, Progressable pingee, int timeout)</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2818">HADOOP-2818</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Removed the deprecated methods
+    <tt>org.apache.hadoop.mapred.Counters.getDisplayName(String counter)</tt> and
+    <tt>org.apache.hadoop.mapred.Counters.getCounterNames()</tt>.
+    Undeprecated the method 
+    <tt>org.apache.hadoop.mapred.Counters.getCounter(String counterName)</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2826">HADOOP-2826</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Changed The signature of the method
+    <tt>public org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLIne(InputStream)</tt> to
+    <tt>UTF8ByteArrayUtils.readLIne(LineReader, Text)</tt>. Since the old
+    signature is not deprecated, any code using the old method must be changed
+    to use the new method.
+    <p>
+    Removed the deprecated methods <tt>org.apache.hadoop.mapred.FileSplit.getFile()</tt>
+    and <tt>org.apache.hadoop.mapred.LineRecordReader.readLine(InputStream in,
+    OutputStream out)</tt>.
+    <p>
+    Made the constructor <tt>org.apache.hadoop.mapred.LineRecordReader.LineReader(InputStream in, Configuration
+    conf)</tt> public.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2819">HADOOP-2819</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Removed these deprecated methods from <tt>org.apache.hadoop.JobConf</tt>:
+    <tt><ul>
+    <li>public Class getInputKeyClass() 
+    <li>public void setInputKeyClass(Class theClass) 
+    <li>public Class getInputValueClass() 
+    <li>public void setInputValueClass(Class theClass) 
+    </ul></tt>
+    and undeprecated these methods:
+    <tt><ul>
+    <li>getSpeculativeExecution() 
+    <li>public void setSpeculativeExecution(boolean speculativeExecution)
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3093">HADOOP-3093</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Added the following public methods to <tt>org.apache.hadoop.conf.Configuration</tt>:
+    <tt><ul>
+    <li>String[] Configuration.getStrings(String name, String... defaultValue)
+    <li>void Configuration.setStrings(String name, String... values)
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2399">HADOOP-2399</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    The key and value objects that are given
+    to the Combiner and Reducer are now reused between calls. This is much more
+    efficient, but the user can not assume the objects are constant.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3162">HADOOP-3162</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Deprecated the public methods <tt>org.apache.hadoop.mapred.JobConf.setInputPath(Path)</tt> and
+    <tt>org.apache.hadoop.mapred.JobConf.addInputPath(Path)</tt>.
+    <p>
+    Added the following public methods to <tt>org.apache.hadoop.mapred.FileInputFormat</tt>:
+    <tt><ul>
+    <li>public static void setInputPaths(JobConf job, Path... paths); <br>
+    <li>public static void setInputPaths(JobConf job, String commaSeparatedPaths); <br>
+    <li>public static void addInputPath(JobConf job, Path path); <br>
+    <li>public static void addInputPaths(JobConf job, String commaSeparatedPaths); <br>
+    </ul></tt>
+    Earlier code calling <tt>JobConf.setInputPath(Path)</tt> and <tt>JobConf.addInputPath(Path)</tt>
+    should now call <tt>FileInputFormat.setInputPaths(JobConf, Path...)</tt> and
+    <tt>FileInputFormat.addInputPath(Path)</tt> respectively.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2178">HADOOP-2178</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Provided a new facility to
+    store job history on DFS. Cluster administrator can now provide either localFS
+    location or DFS location using configuration property
+    <tt>mapred.job.history.location</tt> to store job histroy. History will also
+    be logged in user specified location if the configuration property
+    <tt>mapred.job.history.user.location</tt> is specified.
+    <p>
+    Removed these classes and method:
+    <tt><ul>
+    <li>org.apache.hadoop.mapred.DefaultJobHistoryParser.MasterIndex
+    <li>org.apache.hadoop.mapred.DefaultJobHistoryParser.MasterIndexParseListener
+    <li>org.apache.hadoop.mapred.DefaultJobHistoryParser.parseMasterIndex
+    </ul></tt>
+    <p>
+    Changed the signature of the public method
+    <tt>org.apache.hadoop.mapred.DefaultJobHistoryParser.parseJobTasks(File
+    jobHistoryFile, JobHistory.JobInfo job)</tt> to
+    <tt>DefaultJobHistoryParser.parseJobTasks(String jobHistoryFile,
+    JobHistory.JobInfo job, FileSystem fs)</tt>. <p>
+    Changed the signature of the public method
+    <tt>org.apache.hadoop.mapred.JobHistory.parseHistory(File path, Listener l)</tt>
+    to <tt>JobHistory.parseHistoryFromFS(String path, Listener l, FileSystem fs)</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2055">HADOOP-2055</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Users are now provided the ability to specify what paths to ignore when processing the job input directory
+    (apart from the filenames that start with "_" and ".").
+    To do this, two new methods were defined: 
+    <tt><ul>
+    <li>FileInputFormat.setInputPathFilter(JobConf, PathFilter)
+    <li>FileInputFormat.getInputPathFilter(JobConf)
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2116">HADOOP-2116</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Restructured the local job directory on the tasktracker. Users are provided with a job-specific shared directory
+    (<tt>mapred-local/taskTracker/jobcache/$jobid/work</tt>) for use as scratch
+    space, through configuration property and system property
+    <tt>job.local.dir</tt>. The directory <tt>../work</tt> is no longer available from the task's current working directory.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-1622">HADOOP-1622</a>
+    </td>
+    <td>
+    mapred
+    </td>
+    <td>
+    Added new command line options for <tt>hadoop jar</tt> command:
+    <p>
+    <tt>hadoop jar -files &lt;comma seperated list of files&gt; -libjars &lt;comma
+    seperated list of jars&gt; -archives &lt;comma seperated list of
+    archives&gt; </tt>
+    <p>
+    where the options have these meanings:
+    <p>
+    <ul>
+    <li><tt>-files</tt> options allows you to speficy comma seperated list of path which
+    would be present in your current working directory of your task <br>
+    <li><tt>-libjars</tt> option allows you to add jars to the classpaths of the maps and
+    reduces. <br>
+    <li><tt>-archives</tt> allows you to pass archives as arguments that are
+    unzipped/unjarred and a link with name of the jar/zip are created in the
+    current working directory if tasks.
+    </ul>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2823">HADOOP-2823</a>
+    </td>
+    <td>
+    record
+    </td>
+    <td>
+    Removed the deprecated methods in
+    <tt>org.apache.hadoop.record.compiler.generated.SimpleCharStream</tt>:
+    <tt><ul>
+    <li>public int getColumn() 
+    <li>and public int getLine() 
+    </ul></tt>
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2551">HADOOP-2551</a>
+    </td>
+    <td>
+    scripts
+    </td>
+    <td>
+    Introduced new environment variables to allow finer grained control of Java options passed to server and
+    client JVMs. See the new <tt>*_OPTS</tt> variables in <tt>conf/hadoop-env.sh</tt>.
+    </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-3099">HADOOP-3099</a>
+    </td>
+    <td>
+    util
+    </td>
+    <td>
+    Added a new <tt>-p</tt> option to <tt>distcp</tt> for preserving file and directory status:
+    <pre>
+    -p[rbugp] Preserve status
+        r: replication number 
+        b: block size 
+        u: user
+        g: group 
+        p: permission
+    </pre>
+    The <tt>-p</tt> option alone is equivalent to <tt>-prbugp</tt>
+   </td>
+   </tr>
+   <tr>
+    <td>
+    <a href="https://issues.apache.org/jira/browse/HADOOP-2821">HADOOP-2821</a>
+    </td>
+    <td>
+    util
+    </td>
+    <td>
+    Removed the deprecated classes <tt>org.apache.hadoop.util.ShellUtil</tt> and <tt>org.apache.hadoop.util.ToolBase</tt>.
+    </td>
+   </tr>
+  </tbody></table>
+
+</ul>
+
+</body></html>

BIN
docs/skin/images/rc-b-l-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-b-r-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png


BIN
docs/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png


BIN
docs/skin/images/rc-t-r-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png


BIN
docs/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png


+ 4 - 1
docs/streaming.html

@@ -153,7 +153,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://hadoop.apache.org/core/mailing_lists.html">Mailing Lists</a>
 </div>
 <div class="menuitem">
-<a href="changes.html">Release Notes</a>
+<a href="releasenotes.html">Release Notes</a>
+</div>
+<div class="menuitem">
+<a href="changes.html">All Changes</a>
 </div>
 </div>
 <div id="credit"></div>

+ 1 - 0
src/docs/src/documentation/conf/cli.xconf

@@ -212,6 +212,7 @@
    <exclude pattern="**/"/>
    <exclude pattern="api/**"/>
    <exclude pattern="changes.html"/>
+   <exclude pattern="releasenotes.html"/>
 
 <!--
   This is a workaround for FOR-284 "link rewriting broken when

+ 3 - 1
src/docs/src/documentation/content/xdocs/site.xml

@@ -51,7 +51,8 @@ See http://forrest.apache.org/docs/linking.html for more info.
     <wiki      label="Wiki"               href="ext:wiki" />
     <faq       label="FAQ"                href="ext:faq" />
     <lists     label="Mailing Lists"      href="ext:lists" />
-    <changes   label="Release Notes"      href="ext:changes" />
+    <relnotes  label="Release Notes"      href="ext:relnotes" />
+    <changes   label="All Changes"        href="ext:changes" />
   </docs>
 
   <external-refs>
@@ -81,6 +82,7 @@ See http://forrest.apache.org/docs/linking.html for more info.
       <python href="http://www.python.org" />
       <twisted-python href="http://twistedmatrix.com/trac/" />
     </hod>
+    <relnotes href="releasenotes.html" />
     <changes href="changes.html" />
     <api href="api/">
       <index href="index.html" />

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません