Sfoglia il codice sorgente

Merge -r 729055:729056 from trunk to 0.20 branch. Fixes: HADOOP-4920.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.20@737857 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 16 anni fa
parent
commit
194b463480
100 ha cambiato i file con 24 aggiunte e 27873 eliminazioni
  1. 2 0
      CHANGES.txt
  2. 22 28
      build.xml
  3. 0 170
      docs/ChangesFancyStyle.css
  4. 0 49
      docs/ChangesSimpleStyle.css
  5. 0 509
      docs/SLG_user_guide.html
  6. 0 100
      docs/SLG_user_guide.pdf
  7. 0 2
      docs/broken-links.xml
  8. 0 528
      docs/capacity_scheduler.html
  9. 0 140
      docs/capacity_scheduler.pdf
  10. 0 5161
      docs/changes.html
  11. 0 942
      docs/cluster_setup.html
  12. 0 151
      docs/cluster_setup.pdf
  13. 0 2
      docs/cn/broken-links.xml
  14. 0 730
      docs/cn/cluster_setup.html
  15. 0 209
      docs/cn/cluster_setup.pdf
  16. 0 1116
      docs/cn/commands_manual.html
  17. 0 261
      docs/cn/commands_manual.pdf
  18. 0 255
      docs/cn/core-default.html
  19. 0 563
      docs/cn/distcp.html
  20. 0 149
      docs/cn/distcp.pdf
  21. 0 1108
      docs/cn/hadoop-default.html
  22. 0 302
      docs/cn/hadoop_archives.html
  23. 0 137
      docs/cn/hadoop_archives.pdf
  24. 0 226
      docs/cn/hdfs-default.html
  25. 0 664
      docs/cn/hdfs_design.html
  26. 0 424
      docs/cn/hdfs_design.pdf
  27. 0 504
      docs/cn/hdfs_permissions_guide.html
  28. 0 129
      docs/cn/hdfs_permissions_guide.pdf
  29. 0 277
      docs/cn/hdfs_quota_admin_guide.html
  30. 0 47
      docs/cn/hdfs_quota_admin_guide.pdf
  31. 0 860
      docs/cn/hdfs_shell.html
  32. 0 347
      docs/cn/hdfs_shell.pdf
  33. 0 718
      docs/cn/hdfs_user_guide.html
  34. 0 195
      docs/cn/hdfs_user_guide.pdf
  35. 0 257
      docs/cn/hod.html
  36. 0 144
      docs/cn/hod.pdf
  37. 0 557
      docs/cn/hod_admin_guide.html
  38. 0 162
      docs/cn/hod_admin_guide.pdf
  39. 0 422
      docs/cn/hod_config_guide.html
  40. 0 140
      docs/cn/hod_config_guide.pdf
  41. 0 1251
      docs/cn/hod_user_guide.html
  42. 0 358
      docs/cn/hod_user_guide.pdf
  43. BIN
      docs/cn/images/built-with-forrest-button.png
  44. BIN
      docs/cn/images/core-logo.gif
  45. BIN
      docs/cn/images/favicon.ico
  46. BIN
      docs/cn/images/hadoop-logo.jpg
  47. BIN
      docs/cn/images/hdfsarchitecture.gif
  48. BIN
      docs/cn/images/hdfsdatanodes.gif
  49. BIN
      docs/cn/images/instruction_arrow.png
  50. 0 268
      docs/cn/index.html
  51. 0 160
      docs/cn/index.pdf
  52. 0 380
      docs/cn/linkmap.html
  53. 0 62
      docs/cn/linkmap.pdf
  54. 0 667
      docs/cn/mapred-default.html
  55. 0 3464
      docs/cn/mapred_tutorial.html
  56. 0 250
      docs/cn/mapred_tutorial.pdf
  57. 0 462
      docs/cn/native_libraries.html
  58. 0 107
      docs/cn/native_libraries.pdf
  59. 0 574
      docs/cn/quickstart.html
  60. 0 173
      docs/cn/quickstart.pdf
  61. 0 23
      docs/cn/skin/CommonMessages_de.xml
  62. 0 23
      docs/cn/skin/CommonMessages_en_US.xml
  63. 0 23
      docs/cn/skin/CommonMessages_es.xml
  64. 0 23
      docs/cn/skin/CommonMessages_fr.xml
  65. 0 166
      docs/cn/skin/basic.css
  66. 0 90
      docs/cn/skin/breadcrumbs-optimized.js
  67. 0 237
      docs/cn/skin/breadcrumbs.js
  68. 0 166
      docs/cn/skin/fontsize.js
  69. 0 40
      docs/cn/skin/getBlank.js
  70. 0 45
      docs/cn/skin/getMenu.js
  71. 0 1
      docs/cn/skin/images/README.txt
  72. BIN
      docs/cn/skin/images/add.jpg
  73. BIN
      docs/cn/skin/images/built-with-forrest-button.png
  74. BIN
      docs/cn/skin/images/chapter.gif
  75. BIN
      docs/cn/skin/images/chapter_open.gif
  76. 0 92
      docs/cn/skin/images/corner-imports.svg.xslt
  77. BIN
      docs/cn/skin/images/current.gif
  78. 0 28
      docs/cn/skin/images/dc.svg.xslt
  79. BIN
      docs/cn/skin/images/error.png
  80. BIN
      docs/cn/skin/images/external-link.gif
  81. BIN
      docs/cn/skin/images/fix.jpg
  82. BIN
      docs/cn/skin/images/forrest-credit-logo.png
  83. BIN
      docs/cn/skin/images/hack.jpg
  84. BIN
      docs/cn/skin/images/header_white_line.gif
  85. BIN
      docs/cn/skin/images/info.png
  86. BIN
      docs/cn/skin/images/instruction_arrow.png
  87. BIN
      docs/cn/skin/images/label.gif
  88. BIN
      docs/cn/skin/images/page.gif
  89. BIN
      docs/cn/skin/images/pdfdoc.gif
  90. BIN
      docs/cn/skin/images/poddoc.png
  91. 0 55
      docs/cn/skin/images/poddoc.svg.xslt
  92. BIN
      docs/cn/skin/images/printer.gif
  93. BIN
      docs/cn/skin/images/rc-b-l-15-1body-2menu-3menu.png
  94. BIN
      docs/cn/skin/images/rc-b-r-15-1body-2menu-3menu.png
  95. BIN
      docs/cn/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png
  96. BIN
      docs/cn/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png
  97. BIN
      docs/cn/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png
  98. BIN
      docs/cn/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png
  99. BIN
      docs/cn/skin/images/rc-t-r-15-1body-2menu-3menu.png
  100. BIN
      docs/cn/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png

+ 2 - 0
CHANGES.txt

@@ -314,6 +314,8 @@ Release 0.20.0 - Unreleased
     HADOOP-4939. Adds a test that would inject random failures for tasks in 
     large jobs and would also inject TaskTracker failures. (ddas)
 
+    HADOOP-4920.  Stop storing Forrest output in Subversion. (cutting)
+
   OPTIMIZATIONS
 
     HADOOP-3293. Fixes FileInputFormat to do provide locations for splits

+ 22 - 28
build.xml

@@ -40,11 +40,9 @@
   <property name="anttasks.dir" value="${basedir}/src/ant"/>
   <property name="lib.dir" value="${basedir}/lib"/>
   <property name="conf.dir" value="${basedir}/conf"/>
-  <property name="docs.dir" value="${basedir}/docs"/>
-  <property name="cndocs.dir" value="${basedir}/docs/cn"/>
   <property name="contrib.dir" value="${basedir}/src/contrib"/>
   <property name="docs.src" value="${basedir}/src/docs"/>
-  <property name="cndocs.src" value="${basedir}/src/docs/cn"/>
+  <property name="src.docs.cn" value="${basedir}/src/docs/cn"/>
   <property name="changes.src" value="${docs.src}/changes"/>
   <property name="c++.src" value="${basedir}/src/c++"/>
   <property name="c++.utils.src" value="${c++.src}/utils"/>
@@ -80,6 +78,7 @@
   <property name="build.c++.examples.pipes" 
             value="${build.c++}/examples/pipes"/>
   <property name="build.docs" value="${build.dir}/docs"/>
+  <property name="build.docs.cn" value="${build.dir}/docs/cn"/>
   <property name="build.javadoc" value="${build.docs}/api"/>
   <property name="build.javadoc.dev" value="${build.docs}/dev-api"/>
   <property name="build.encoding" value="ISO-8859-1"/>
@@ -137,7 +136,7 @@
   <property name="rat.reporting.classname" value="rat.Report"/>
 
   <property name="jdiff.build.dir" value="${build.docs}/jdiff"/>
-  <property name="jdiff.xml.dir" value="${docs.dir}/jdiff"/>
+  <property name="jdiff.xml.dir" value="${lib.dir}/jdiff"/>
   <property name="jdiff.stable" value="0.19.0"/>
   <property name="jdiff.stable.javadoc" 
             value="http://hadoop.apache.org/core/docs/r${jdiff.stable}/api/"/>
@@ -290,7 +289,7 @@
     </exec>
 	
    <exec executable="sh">
-       <arg line="src/fixFontsPath.sh ${cndocs.src}"/>
+       <arg line="src/fixFontsPath.sh ${src.docs.cn}"/>
    </exec>
   </target>
 
@@ -851,14 +850,14 @@
 	  failonerror="true">
       <env key="JAVA_HOME" value="${java5.home}"/>
     </exec>
-    <copy todir="${docs.dir}">
+    <copy todir="${build.docs}">
       <fileset dir="${docs.src}/build/site/" />
     </copy>
-    <style basedir="${core.src.dir}" destdir="${docs.dir}"
+    <style basedir="${core.src.dir}" destdir="${build.docs}"
            includes="core-default.xml" style="conf/configuration.xsl"/>
-    <style basedir="${hdfs.src.dir}" destdir="${docs.dir}"
+    <style basedir="${hdfs.src.dir}" destdir="${build.docs}"
            includes="hdfs-default.xml" style="conf/configuration.xsl"/>
-    <style basedir="${mapred.src.dir}" destdir="${docs.dir}"
+    <style basedir="${mapred.src.dir}" destdir="${build.docs}"
            includes="mapred-default.xml" style="conf/configuration.xsl"/>
     <antcall target="changes-to-html"/>
     <antcall target="cn-docs"/>
@@ -867,18 +866,18 @@
   <target name="cn-docs" depends="forrest.check, init" 
        description="Generate forrest-based Chinese documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." 
         if="forrest.home">
-    <exec dir="${cndocs.src}" executable="${forrest.home}/bin/forrest" failonerror="true">
+    <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true">
       <env key="LANG" value="en_US.utf8"/>
       <env key="JAVA_HOME" value="${java5.home}"/>
     </exec>
-    <copy todir="${cndocs.dir}">
-      <fileset dir="${cndocs.src}/build/site/" />
+    <copy todir="${build.docs.cn}">
+      <fileset dir="${src.docs.cn}/build/site/" />
     </copy>
-    <style basedir="${core.src.dir}" destdir="${cndocs.dir}"
+    <style basedir="${core.src.dir}" destdir="${build.docs.cn}"
            includes="core-default.xml" style="conf/configuration.xsl"/>
-    <style basedir="${hdfs.src.dir}" destdir="${cndocs.dir}"
+    <style basedir="${hdfs.src.dir}" destdir="${build.docs.cn}"
            includes="hdfs-default.xml" style="conf/configuration.xsl"/>
-    <style basedir="${mapred.src.dir}" destdir="${cndocs.dir}"
+    <style basedir="${mapred.src.dir}" destdir="${build.docs.cn}"
            includes="mapred-default.xml" style="conf/configuration.xsl"/>
     <antcall target="changes-to-html"/>
   </target>
@@ -976,15 +975,11 @@
        <group title="contrib: FailMon" packages="org.apache.hadoop.contrib.failmon*"/>
     </javadoc>
   </target>	
-<!--
-  <target name="jdiff.check" unless="jdiff.home">
-    <fail message="'jdiff.home' is not defined. Please pass -Djdiff.home=&lt;base of jdiff installation&gt; to Ant on the command-line." />
-  </target>
--->
-  <target name="api-xml" depends="javadoc,write-null">
+
+  <target name="api-xml" depends="ivy-retrieve-jdiff,javadoc,write-null">
     <javadoc>
        <doclet name="jdiff.JDiff"
-               path="${jdiff..jar}:${xerces.jar}">
+               path="${jdiff.jar}:${xerces.jar}">
          <param name="-apidir" value="${jdiff.xml.dir}"/>
          <param name="-apiname" value="hadoop ${version}"/>
        </doclet>
@@ -1029,11 +1024,11 @@
   </target>
 	
   <target name="changes-to-html" description="Convert CHANGES.txt into an html file">
-    <mkdir dir="${docs.dir}"/>
-    <exec executable="perl" input="CHANGES.txt" output="${docs.dir}/changes.html" failonerror="true">
+    <mkdir dir="${build.docs}"/>
+    <exec executable="perl" input="CHANGES.txt" output="${build.docs}/changes.html" failonerror="true">
       <arg value="${changes.src}/changes2html.pl"/>
     </exec>
-    <copy todir="${docs.dir}">
+    <copy todir="${build.docs}">
       <fileset dir="${changes.src}" includes="*.css"/>
     </copy>
   </target>
@@ -1043,7 +1038,7 @@
   <!-- ================================================================== -->
   <!--                                                                    -->
   <!-- ================================================================== -->
-  <target name="package" depends="compile, jar, javadoc, examples, tools-jar, jar-test, ant-tasks, package-librecordio"
+  <target name="package" depends="compile, jar, javadoc, docs, cn-docs, api-report, examples, tools-jar, jar-test, ant-tasks, package-librecordio"
 	  description="Build distribution">
     <mkdir dir="${dist.dir}"/>
     <mkdir dir="${dist.dir}/lib"/>
@@ -1095,7 +1090,6 @@
     </copy>
 
     <copy todir="${dist.dir}/docs">
-      <fileset dir="${docs.dir}" />
       <fileset dir="${build.docs}"/>
     </copy>
 
@@ -1184,7 +1178,7 @@
   <target name="clean" depends="clean-contrib" description="Clean.  Delete the build files, and their directories">
     <delete dir="${build.dir}"/>
     <delete dir="${docs.src}/build"/>
-    <delete dir="${cndocs.src}/build"/>
+    <delete dir="${src.docs.cn}/build"/>
   </target>
 
   <!-- ================================================================== -->

+ 0 - 170
docs/ChangesFancyStyle.css

@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  background: #C00;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.warning .content {
-  background: #FFF0F0;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.fixme .label {
-  background: #C6C600;
-  color: black;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.fixme .content {
-  padding: 5px 10px;
-}
-
-/**
- * Typography
- */
-
-body {
-  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
-  font-size: 100%;
-}
-
-#content {
-  font-family: Georgia, Palatino, Times, serif;
-  font-size: 95%;
-}
-#tabs {
-  font-size: 70%;
-}
-#menu {
-  font-size: 80%;
-}
-#footer {
-  font-size: 70%;
-}
-
-h1, h2, h3, h4, h5, h6 {
-  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
-  font-weight: bold;
-  margin-top: 1em;
-  margin-bottom: .5em;
-}
-
-h1 {
-    margin-top: 0;
-    margin-bottom: 1em;
-  font-size: 1.4em;
-  background-color: 73CAFF
-}
-#content h1 {
-  font-size: 160%;
-  margin-bottom: .5em;
-}
-#menu h1 {
-  margin: 0;
-  padding: 10px;
-  background: #336699;
-  color: white;
-}
-h2 { 
-  font-size: 120%;
-  background-color: 73CAFF
-}
-h3 { font-size: 100%; }
-h4 { font-size: 90%; }
-h5 { font-size: 80%; }
-h6 { font-size: 75%; }
-
-p {
-  line-height: 120%;
-  text-align: left;
-  margin-top: .5em;
-  margin-bottom: 1em;
-}
-
-#content li,
-#content th,
-#content td,
-#content li ul,
-#content li ol{
-  margin-top: .5em;
-  margin-bottom: .5em;
-}
-
-
-#content li li,
-#minitoc-area li{
-  margin-top: 0em;
-  margin-bottom: 0em;
-}
-
-#content .attribution {
-  text-align: right;
-  font-style: italic;
-  font-size: 85%;
-  margin-top: 1em;
-}
-
-.codefrag {
-  font-family: "Courier New", Courier, monospace;
-  font-size: 110%;
-}

+ 0 - 49
docs/ChangesSimpleStyle.css

@@ -1,49 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h1 {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h2 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-h3 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-a:link {
-  color: blue;
-}
-
-a:visited {
-  color: purple; 
-}
-
-li {
-  margin-top: 1em;
-  margin-bottom: 1em;
-}

+ 0 - 509
docs/SLG_user_guide.html

@@ -1,509 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title> HDFS Utilities: Synthetic Load Generator User Guide </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.20 Documentation</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">Overview</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">Hadoop Quick Start</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">Hadoop Cluster Setup</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Hadoop Map/Reduce Tutorial</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">Hadoop Command Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">Hadoop FS Shell Guide</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">Hadoop DistCp Guide</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop Native Libraries</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Hadoop Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS Architecture</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS Admin Guide: Quotas</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">HDFS Utilities</div>
-</div>
-<div class="menuitem">
-<a href="libhdfs.html">HDFS C API</a>
-</div>
-<div class="menuitem">
-<a href="hod_user_guide.html">HOD User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_admin_guide.html">HOD Admin Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_config_guide.html">HOD Config Guide</a>
-</div>
-<div class="menuitem">
-<a href="capacity_scheduler.html">Capacity Scheduler</a>
-</div>
-<div class="menuitem">
-<a href="vaidya.html">Hadoop Vaidya</a>
-</div>
-<div class="menuitem">
-<a href="api/index.html">API Docs</a>
-</div>
-<div class="menuitem">
-<a href="jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">Wiki</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
-</div>
-<div class="menuitem">
-<a href="releasenotes.html">Release Notes</a>
-</div>
-<div class="menuitem">
-<a href="changes.html">Change Log</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="SLG_user_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1> HDFS Utilities: Synthetic Load Generator User Guide </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#Description"> Description </a>
-</li>
-<li>
-<a href="#Synopsis"> Synopsis </a>
-</li>
-<li>
-<a href="#Test+Space+Population"> Test Space Population </a>
-<ul class="minitoc">
-<li>
-<a href="#Structure+Generator"> Structure Generator </a>
-</li>
-<li>
-<a href="#Test+Space+Generator"> Test Space Generator </a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-		
-<a name="N1000D"></a><a name="Description"></a>
-<h2 class="h3"> Description </h2>
-<div class="section">
-<p>
-        The synthetic load generator is a tool for testing NameNode behavior
-        under different client loads. The user can generate different mixes 
-        of read, write, and list requests by specifying the probabilities of
-        read and write. The user controls the intensity of the load by adjusting
-        parameters for the number of worker threads and the delay between 
-        operations. While load generators are running, the user can profile and
-        monitor the running of the NameNode. When a load generator exits, it
-        prints some NameNode statistics like the average execution time of each
-        kind of operation and the NameNode throughput.
-                       </p>
-</div>
-		
-<a name="N10017"></a><a name="Synopsis"></a>
-<h2 class="h3"> Synopsis </h2>
-<div class="section">
-<p>
-        
-<span class="codefrag">java LoadGenerator [options]</span>
-<br>
-                        
-</p>
-<p>
-        Options include:<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-readProbability &lt;read probability&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the probability of the read operation;
-                default is 0.3333. </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-writeProbability &lt;write probability&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the probability of the write 
-                operations; default is 0.3333.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-root &lt;test space root&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the root of the test space;
-                default is /testLoadSpace.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-maxDelayBetweenOps 
-                &lt;maxDelayBetweenOpsInMillis&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the maximum delay between two consecutive
-                operations in a thread; default is 0 indicating no delay.
-                </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-numOfThreads &lt;numOfThreads&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the number of threads to spawn; 
-                default is 200.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-elapsedTime &lt;elapsedTimeInSecs&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the number of seconds that the program 
-                will run; A value of zero indicates that the program runs
-                forever. The default value is 0.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-startTime &lt;startTimeInMillis&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the time that all worker threads 
-                start to run. By default it is 10 seconds after the main 
-                program starts running.This creates a barrier if more than
-                one load generator is running.
-        </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-seed &lt;seed&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the random generator seed for repeating 
-                requests to NameNode when running with a single thread;
-                default is the current time.</span>
-<br>
-			
-</p>
-<p>
-        After command line argument parsing, the load generator traverses 
-        the test space and builds a table of all directories and another table
-        of all files in the test space. It then waits until the start time to
-        spawn the number of worker threads as specified by the user. Each
-        thread sends a stream of requests to NameNode. At each iteration, 
-        it first decides if it is going to read a file, create a file, or
-        list a directory following the read and write probabilities specified
-        by the user. The listing probability is equal to 
-        <em>1-read probability-write probability</em>. When reading, 
-        it randomly picks a file in the test space and reads the entire file. 
-        When writing, it randomly picks a directory in the test space and 
-        creates a file there. To avoid two threads with the same load 
-        generator or from two different load generators create the same 
-        file, the file name consists of the current machine's host name 
-        and the thread id. The length of the file follows Gaussian 
-        distribution with an average size of 2 blocks and the standard 
-        deviation of 1. The new file is filled with byte 'a'. To avoid
-        the test space to grow indefinitely, the file is deleted immediately
-        after the file creation completes. While listing, it randomly 
-        picks a directory in the test space and lists its content. 
-        After an operation completes, the thread pauses for a random 
-        amount of time in the range of [0, maxDelayBetweenOps] if the 
-        specified maximum delay is not zero. All threads are stopped when 
-        the specified elapsed time is passed. Before exiting, the program 
-        prints the average execution for each kind of NameNode operations, 
-        and the number of requests served by the NameNode per second.
-                        </p>
-</div>
-                
-<a name="N10070"></a><a name="Test+Space+Population"></a>
-<h2 class="h3"> Test Space Population </h2>
-<div class="section">
-<p>
-        The user needs to populate a test space before she runs a 
-        load generator. The structure generator generates a random 
-        test space structure and the data generator creates the files 
-        and directories of the test space in Hadoop distributed file system.
-                        </p>
-<a name="N10079"></a><a name="Structure+Generator"></a>
-<h3 class="h4"> Structure Generator </h3>
-<p>
-        This tool generates a random namespace structure with the 
-        following constraints:
-                                </p>
-<ol>
-        
-<li>The number of subdirectories that a directory can have is 
-            a random number in [minWidth, maxWidth].</li>
-        
-<li>The maximum depth of each subdirectory is a random number 
-            [2*maxDepth/3, maxDepth].</li>
-        
-<li>Files are randomly placed in leaf directories. The size of 
-            each file follows Gaussian distribution with an average size 
-            of 1 block and a standard deviation of 1.</li>
-                                        
-</ol>
-<p>
-        The generated namespace structure is described by two files in 
-        the output directory. Each line of the first file contains the 
-        full name of a leaf directory. Each line of the second file 
-        contains the full name of a file and its size, separated by a blank.
-                                </p>
-<p>
-        The synopsis of the command is
-                                </p>
-<p>
-        
-<span class="codefrag">java StructureGenerator [options]</span>
-                                
-</p>
-<p>
-        Options include:<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-maxDepth &lt;maxDepth&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;maximum depth of the directory tree; 
-                default is 5.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-minWidth &lt;minWidth&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;minimum number of subdirectories per 
-                directories; default is 1.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-maxWidth &lt;maxWidth&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;maximum number of subdirectories per 
-                directories; default is 5.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-numOfFiles &lt;#OfFiles&gt;</span>
-<br> 
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the total number of files in the test 
-                space; default is 10.</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-avgFileSize &lt;avgFileSizeInBlocks&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;average size of blocks; default is 1.
-                </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-outDir &lt;outDir&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;output directory; default is the 
-                current directory. </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-seed &lt;seed&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;random number generator seed; 
-                default is the current time.</span>
-<br>
-                                
-</p>
-<a name="N100D8"></a><a name="Test+Space+Generator"></a>
-<h3 class="h4"> Test Space Generator </h3>
-<p>
-        This tool reads the directory structure and file structure from 
-        the input directory and creates the namespace in Hadoop distributed
-        file system. All files are filled with byte 'a'.
-                                </p>
-<p>
-        The synopsis of the command is
-                                </p>
-<p>
-        
-<span class="codefrag">java DataGenerator [options]</span>
-                                
-</p>
-<p>
-        Options include:<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-inDir &lt;inDir&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;input directory name where directory/file
-                structures are stored; default is the current directory.
-        </span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;-root &lt;test space root&gt;</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;the name of the root directory which the 
-                new namespace is going to be placed under; 
-                default is "/testLoadSpace".</span>
-<br>
-                                
-</p>
-</div>
-	
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2008 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 100
docs/SLG_user_guide.pdf


+ 0 - 2
docs/broken-links.xml

@@ -1,2 +0,0 @@
-<broken-links>
-</broken-links>

+ 0 - 528
docs/capacity_scheduler.html

@@ -1,528 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Capacity Scheduler</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.20 Documentation</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">Overview</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">Hadoop Quick Start</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">Hadoop Cluster Setup</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Hadoop Map/Reduce Tutorial</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">Hadoop Command Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">Hadoop FS Shell Guide</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">Hadoop DistCp Guide</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop Native Libraries</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Hadoop Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS Architecture</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS Admin Guide: Quotas</a>
-</div>
-<div class="menuitem">
-<a href="SLG_user_guide.html">HDFS Utilities</a>
-</div>
-<div class="menuitem">
-<a href="libhdfs.html">HDFS C API</a>
-</div>
-<div class="menuitem">
-<a href="hod_user_guide.html">HOD User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_admin_guide.html">HOD Admin Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_config_guide.html">HOD Config Guide</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Capacity Scheduler</div>
-</div>
-<div class="menuitem">
-<a href="vaidya.html">Hadoop Vaidya</a>
-</div>
-<div class="menuitem">
-<a href="api/index.html">API Docs</a>
-</div>
-<div class="menuitem">
-<a href="jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">Wiki</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
-</div>
-<div class="menuitem">
-<a href="releasenotes.html">Release Notes</a>
-</div>
-<div class="menuitem">
-<a href="changes.html">Change Log</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="capacity_scheduler.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Capacity Scheduler</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#Purpose">Purpose</a>
-</li>
-<li>
-<a href="#Features">Features</a>
-</li>
-<li>
-<a href="#Picking+a+task+to+run">Picking a task to run</a>
-</li>
-<li>
-<a href="#Reclaiming+capacity">Reclaiming capacity</a>
-</li>
-<li>
-<a href="#Installation">Installation</a>
-</li>
-<li>
-<a href="#Configuration">Configuration</a>
-<ul class="minitoc">
-<li>
-<a href="#Using+the+capacity+scheduler">Using the capacity scheduler</a>
-</li>
-<li>
-<a href="#Setting+up+queues">Setting up queues</a>
-</li>
-<li>
-<a href="#Configuring+properties+for+queues">Configuring properties for queues</a>
-</li>
-<li>
-<a href="#Reviewing+the+configuration+of+the+capacity+scheduler">Reviewing the configuration of the capacity scheduler</a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="Purpose"></a>
-<h2 class="h3">Purpose</h2>
-<div class="section">
-<p>This document describes the Capacity Scheduler, a pluggable Map/Reduce scheduler for Hadoop which provides a way to share large clusters.</p>
-</div>
-    
-    
-<a name="N10017"></a><a name="Features"></a>
-<h2 class="h3">Features</h2>
-<div class="section">
-<p>The Capacity Scheduler supports the following features:</p>
-<ul>
-        
-<li>
-          Support for multiple queues, where a job is submitted to a queue.
-        </li>
-        
-<li>
-          Queues are guaranteed a fraction of the capacity of the grid (their 
- 	      'guaranteed capacity') in the sense that a certain capacity of 
- 	      resources will be at their disposal. All jobs submitted to a 
- 	      queue will have access to the capacity guaranteed to the queue.
-        </li>
-        
-<li>
-          Free resources can be allocated to any queue beyond its guaranteed 
-          capacity. These excess allocated resources can be reclaimed and made 
-          available to another queue in order to meet its capacity guarantee.
-        </li>
-        
-<li>
-          The scheduler guarantees that excess resources taken from a queue 
-          will be restored to it within N minutes of its need for them.
-        </li>
-        
-<li>
-          Queues optionally support job priorities (disabled by default).
-        </li>
-        
-<li>
-          Within a queue, jobs with higher priority will have access to the 
-          queue's resources before jobs with lower priority. However, once a 
-          job is running, it will not be preempted for a higher priority job.
-        </li>
-        
-<li>
-          In order to prevent one or more users from monopolizing its 
-          resources, each queue enforces a limit on the percentage of 
-          resources allocated to a user at any given time, if there is 
-          competition for them.  
-        </li>
-        
-<li>
-          Support for memory-intensive jobs, wherein a job can optionally 
-          specify higher memory-requirements than the default, and the tasks 
-          of the job will only be run on TaskTrackers that have enough memory 
-          to spare.
-        </li>
-      
-</ul>
-</div>
-    
-    
-<a name="N1003C"></a><a name="Picking+a+task+to+run"></a>
-<h2 class="h3">Picking a task to run</h2>
-<div class="section">
-<p>Note that many of these steps can be, and will be, enhanced over time
-      to provide better algorithms.</p>
-<p>Whenever a TaskTracker is free, the Capacity Scheduler first picks a 
-      queue that needs to reclaim any resources the earliest (this is a queue
-      whose resources were temporarily being used by some other queue and now
-      needs access to those resources). If no such queue is found, it then picks
-      a queue which has most free space (whose ratio of # of running slots to 
-      guaranteed capacity is the lowest).</p>
-<p>Once a queue is selected, the scheduler picks a job in the queue. Jobs
-      are sorted based on when they're submitted and their priorities (if the 
-      queue supports priorities). Jobs are considered in order, and a job is 
-      selected if its user is within the user-quota for the queue, i.e., the 
-      user is not already using queue resources above his/her limit. The 
-      scheduler also makes sure that there is enough free memory in the 
-      TaskTracker to tun the job's task, in case the job has special memory
-      requirements.</p>
-<p>Once a job is selected, the scheduler picks a task to run. This logic 
-      to pick a task remains unchanged from earlier versions.</p>
-</div>
-    
-    
-<a name="N1004F"></a><a name="Reclaiming+capacity"></a>
-<h2 class="h3">Reclaiming capacity</h2>
-<div class="section">
-<p>Periodically, the scheduler determines:</p>
-<ul>
-	    
-<li>
-	      if a queue needs to reclaim capacity. This happens when a queue has
-	      at least one task pending and part of its guaranteed capacity is 
-	      being used by some other queue. If this happens, the scheduler notes
-	      the amount of resources it needs to reclaim for this queue within a 
-	      specified period of time (the reclaim time). 
-	    </li>
-	    
-<li>
-	      if a queue has not received all the resources it needed to reclaim,
-	      and its reclaim time is about to expire. In this case, the scheduler
-	      needs to kill tasks from queues running over capacity. This it does
-	      by killing the tasks that started the latest.
-	    </li>
-	  
-</ul>
-</div>
-
-    
-<a name="N10062"></a><a name="Installation"></a>
-<h2 class="h3">Installation</h2>
-<div class="section">
-<p>The capacity scheduler is available as a JAR file in the Hadoop
-        tarball under the <em>contrib/capacity-scheduler</em> directory. The name of 
-        the JAR file would be on the lines of hadoop-*-capacity-scheduler.jar.</p>
-<p>You can also build the scheduler from source by executing
-        <em>ant package</em>, in which case it would be available under
-        <em>build/contrib/capacity-scheduler</em>.</p>
-<p>To run the capacity scheduler in your Hadoop installation, you need 
-        to put it on the <em>CLASSPATH</em>. The easiest way is to copy the 
-        <span class="codefrag">hadoop-*-capacity-scheduler.jar</span> from 
-        to <span class="codefrag">HADOOP_HOME/lib</span>. Alternatively, you can modify 
-        <em>HADOOP_CLASSPATH</em> to include this jar, in 
-        <span class="codefrag">conf/hadoop-env.sh</span>.</p>
-</div>
-
-    
-<a name="N1008A"></a><a name="Configuration"></a>
-<h2 class="h3">Configuration</h2>
-<div class="section">
-<a name="N10090"></a><a name="Using+the+capacity+scheduler"></a>
-<h3 class="h4">Using the capacity scheduler</h3>
-<p>
-          To make the Hadoop framework use the capacity scheduler, set up
-          the following property in the site configuration:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-            
-<tr>
-              
-<td colspan="1" rowspan="1">Property</td>
-              <td colspan="1" rowspan="1">Value</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">mapred.jobtracker.taskScheduler</td>
-              <td colspan="1" rowspan="1">org.apache.hadoop.mapred.CapacityTaskScheduler</td>
-            
-</tr>
-          
-</table>
-<a name="N100B7"></a><a name="Setting+up+queues"></a>
-<h3 class="h4">Setting up queues</h3>
-<p>
-          You can define multiple queues to which users can submit jobs with
-          the capacity scheduler. To define multiple queues, you should edit
-          the site configuration for Hadoop and modify the
-          <em>mapred.queue.names</em> property.
-        </p>
-<p>
-          You can also configure ACLs for controlling which users or groups
-          have access to the queues.
-        </p>
-<p>
-          For more details, refer to
-          <a href="cluster_setup.html#Configuring+the+Hadoop+Daemons">Cluster 
-          Setup</a> documentation.
-        </p>
-<a name="N100CE"></a><a name="Configuring+properties+for+queues"></a>
-<h3 class="h4">Configuring properties for queues</h3>
-<p>The capacity scheduler can be configured with several properties
-        for each queue that control the behavior of the scheduler. This
-        configuration is in the <em>conf/capacity-scheduler.xml</em>. By
-        default, the configuration is set up for one queue, named 
-        <em>default</em>.</p>
-<p>To specify a property for a queue that is defined in the site
-        configuration, you should use the property name as
-        <em>mapred.capacity-scheduler.queue.&lt;queue-name&gt;.&lt;property-name&gt;</em>.
-        </p>
-<p>For example, to define the property <em>guaranteed-capacity</em>
-        for queue named <em>research</em>, you should specify the property
-        name as 
-        <em>mapred.capacity-scheduler.queue.research.guaranteed-capacity</em>.
-        </p>
-<p>The properties defined for queues and their descriptions are
-        listed in the table below:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-<th colspan="1" rowspan="1">Name</th><th colspan="1" rowspan="1">Description</th>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.capacity-scheduler.queue.&lt;queue-name&gt;.guaranteed-capacity</td>
-          	<td colspan="1" rowspan="1">Percentage of the number of slots in the cluster that are
-          	guaranteed to be available for jobs in this queue. 
-          	The sum of guaranteed capacities for all queues should be less 
-          	than or equal 100.</td>
-          
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.capacity-scheduler.queue.&lt;queue-name&gt;.reclaim-time-limit</td>
-          	<td colspan="1" rowspan="1">The amount of time, in seconds, before which resources 
-          	distributed to other queues will be reclaimed.</td>
-          
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.capacity-scheduler.queue.&lt;queue-name&gt;.supports-priority</td>
-          	<td colspan="1" rowspan="1">If true, priorities of jobs will be taken into account in scheduling 
-          	decisions.</td>
-          
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.capacity-scheduler.queue.&lt;queue-name&gt;.minimum-user-limit-percent</td>
-          	<td colspan="1" rowspan="1">Each queue enforces a limit on the percentage of resources 
-          	allocated to a user at any given time, if there is competition 
-          	for them. This user limit can vary between a minimum and maximum 
-          	value. The former depends on the number of users who have submitted
-          	jobs, and the latter is set to this property value. For example, 
-          	suppose the value of this property is 25. If two users have 
-          	submitted jobs to a queue, no single user can use more than 50% 
-          	of the queue resources. If a third user submits a job, no single 
-          	user can use more than 33% of the queue resources. With 4 or more 
-          	users, no user can use more than 25% of the queue's resources. A 
-          	value of 100 implies no user limits are imposed.</td>
-          
-</tr>
-        
-</table>
-<a name="N10130"></a><a name="Reviewing+the+configuration+of+the+capacity+scheduler"></a>
-<h3 class="h4">Reviewing the configuration of the capacity scheduler</h3>
-<p>
-          Once the installation and configuration is completed, you can review
-          it after starting the Map/Reduce cluster from the admin UI.
-        </p>
-<ul>
-          
-<li>Start the Map/Reduce cluster as usual.</li>
-          
-<li>Open the JobTracker web UI.</li>
-          
-<li>The queues you have configured should be listed under the <em>Scheduling
-              Information</em> section of the page.</li>
-          
-<li>The properties for the queues should be visible in the <em>Scheduling
-              Information</em> column against each queue.</li>
-        
-</ul>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2008 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 140
docs/capacity_scheduler.pdf


+ 0 - 5161
docs/changes.html

@@ -1,5161 +0,0 @@
-<!--
-**********************************************************
-** WARNING: This file is generated from CHANGES.txt by the 
-**          Perl script 'changes2html.pl'.
-**          Do *not* edit this file!
-**********************************************************
-          
-****************************************************************************
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-****************************************************************************
--->
-<html>
-<head>
-  <title>Hadoop Change Log</title>
-  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
-  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
-  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
-  <SCRIPT>
-    function toggleList(e) {
-      element = document.getElementById(e).style;
-      element.display == 'none' ? element.display = 'block' : element.display='none';
-    }
-    function collapse() {
-      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
-        var list = document.getElementsByTagName("ul")[i];
-        if (list.id != 'release_0.20.0_-_unreleased_' && list.id != 'release_0.19.1_-_unreleased_') {
-          list.style.display = "none";
-        }
-      }
-      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
-        document.getElementsByTagName("ol")[i].style.display = "none"; 
-      }
-    }
-    window.onload = collapse;
-  </SCRIPT>
-</head>
-<body>
-
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
-<h1>Hadoop Change Log</h1>
-
-<h2><a href="javascript:toggleList('release_0.20.0_-_unreleased_')">Release 0.20.0 - Unreleased
-</a></h2>
-<ul id="release_0.20.0_-_unreleased_">
-  <li><a href="javascript:toggleList('release_0.20.0_-_unreleased_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(15)
-    <ol id="release_0.20.0_-_unreleased_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4210">HADOOP-4210</a>. Fix findbugs warnings for equals implementations of mapred ID
-classes. Removed public, static ID::read and ID::forName; made ID an
-abstract class.<br />(Suresh Srinivas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4253">HADOOP-4253</a>. Fix various warnings generated by findbugs.
-Following deprecated methods in RawLocalFileSystem are removed:
- public String getName()
- public void lock(Path p, boolean shared)
- public void release(Path p)<br />(Suresh Srinivas via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4618">HADOOP-4618</a>. Move http server from FSNamesystem into NameNode.
-FSNamesystem.getNameNodeInfoPort() is removed.
-FSNamesystem.getDFSNameNodeMachine() and FSNamesystem.getDFSNameNodePort()
-  replaced by FSNamesystem.getDFSNameNodeAddress().
-NameNode(bindAddress, conf) is removed.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4567">HADOOP-4567</a>. GetFileBlockLocations returns the NetworkTopology
-information of the machines where the blocks reside.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4435">HADOOP-4435</a>. The JobTracker WebUI displays the amount of heap memory
-in use.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4628">HADOOP-4628</a>. Move Hive into a standalone subproject.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4188">HADOOP-4188</a>. Removes task's dependency on concrete filesystems.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1650">HADOOP-1650</a>. Upgrade to Jetty 6.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3986">HADOOP-3986</a>. Remove static Configuration from JobClient. (Amareshwari
-Sriramadasu via cdouglas)
-  JobClient::setCommandLineConfig is removed
-  JobClient::getCommandLineConfig is removed
-  JobShell, TestJobShell classes are removed
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4422">HADOOP-4422</a>. S3 file systems should not create bucket.<br />(David Phillips via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4035">HADOOP-4035</a>. Support memory based scheduling in capacity scheduler.<br />(Vinod Kumar Vavilapalli via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3497">HADOOP-3497</a>. Fix bug in overly restrictive file globbing with a
-PathFilter.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4445">HADOOP-4445</a>. Replace running task counts with running task
-percentage in capacity scheduler UI.<br />(Sreekanth Ramakrishnan via
-yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4631">HADOOP-4631</a>. Splits the configuration into three parts - one for core,
-one for mapred and the last one for HDFS.<br />(Sharad Agarwal via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3344">HADOOP-3344</a>. Fix libhdfs build to use autoconf and build the same
-architecture (32 vs 64 bit) of the JVM running Ant.  The libraries for
-pipes, utils, and libhdfs are now all in c++/&lt;os_osarch_jvmdatamodel&gt;/lib.<br />(Giridharan Kesavan via nigel)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.20.0_-_unreleased_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(10)
-    <ol id="release_0.20.0_-_unreleased_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4575">HADOOP-4575</a>. Add a proxy service for relaying HsftpFileSystem requests.
-Includes client authentication via user certificates and config-based
-access control.<br />(Kan Zhang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4661">HADOOP-4661</a>. Add DistCh, a new tool for distributed ch{mod,own,grp}.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4709">HADOOP-4709</a>. Add several new features and bug fixes to Chukwa.
-  Added Hadoop Infrastructure Care Center (UI for visualize data collected
-                                           by Chukwa)
-  Added FileAdaptor for streaming small file in one chunk
-  Added compression to archive and demux output
-  Added unit tests and validation for agent, collector, and demux map
-    reduce job
-  Added database loader for loading demux output (sequence file) to jdbc
-    connected database
-  Added algorithm to distribute collector load more evenly<br />(Jerome Boulon, Eric Yang, Andy Konwinski, Ariel Rabkin via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4179">HADOOP-4179</a>. Add Vaidya tool to analyze map/reduce job logs for performanc
-problems.<br />(Suhas Gogate via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4029">HADOOP-4029</a>. Add NameNode storage information to the dfshealth page and
-move DataNode information to a separated page.<br />(Boris Shkolnik via
-szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4348">HADOOP-4348</a>. Add service-level authorization for Hadoop.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4826">HADOOP-4826</a>. Introduce admin command saveNamespace.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3063">HADOOP-3063</a>  BloomMapFile - fail-fast version of MapFile for sparsely
-populated key space<br />(Andrzej Bialecki via stack)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1230">HADOOP-1230</a>. Add new map/reduce API and deprecate the old one. Generally,
-the old code should work without problem. The new api is in
-org.apache.hadoop.mapreduce and the old classes in org.apache.hadoop.mapred
-are deprecated. Differences in the new API:
-  1. All of the methods take Context objects that allow us to add new
-     methods without breaking compatability.
-  2. Mapper and Reducer now have a "run" method that is called once and
-     contains the control loop for the task, which lets applications
-     replace it.
-  3. Mapper and Reducer by default are Identity Mapper and Reducer.
-  4. The FileOutputFormats use part-r-00000 for the output of reduce 0 and
-     part-m-00000 for the output of map 0.
-  5. The reduce grouping comparator now uses the raw compare instead of
-     object compare.
-  6. The number of maps in FileInputFormat is controlled by min and max
-     split size rather than min size and the desired number of maps.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3305">HADOOP-3305</a>.  Use Ivy to manage dependencies.<br />(Giridharan Kesavan
-and Steve Loughran via cutting)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.20.0_-_unreleased_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(48)
-    <ol id="release_0.20.0_-_unreleased_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4749">HADOOP-4749</a>. Added a new counter REDUCE_INPUT_BYTES.<br />(Yongqiang He via
-zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4234">HADOOP-4234</a>. Fix KFS "glue" layer to allow applications to interface
-with multiple KFS metaservers.<br />(Sriram Rao via lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4245">HADOOP-4245</a>. Update to latest version of KFS "glue" library jar.<br />(Sriram Rao via lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4244">HADOOP-4244</a>. Change test-patch.sh to check Eclipse classpath no matter
-it is run by Hudson or not.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3180">HADOOP-3180</a>. Add name of missing class to WritableName.getClass
-IOException.<br />(Pete Wyckoff via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4178">HADOOP-4178</a>. Make the capacity scheduler's default values configurable.<br />(Sreekanth Ramakrishnan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4262">HADOOP-4262</a>. Generate better error message when client exception has null
-message.<br />(stevel via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4226">HADOOP-4226</a>. Refactor and document LineReader to make it more readily
-understandable.<br />(Yuri Pradkin via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4238">HADOOP-4238</a>. When listing jobs, if scheduling information isn't available
-print NA instead of empty output.<br />(Sreekanth Ramakrishnan via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4284">HADOOP-4284</a>. Support filters that apply to all requests, or global filters,
-to HttpServer.<br />(Kan Zhang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4276">HADOOP-4276</a>. Improve the hashing functions and deserialization of the
-mapred ID classes.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4485">HADOOP-4485</a>. Add a compile-native ant task, as a shorthand.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4454">HADOOP-4454</a>. Allow # comments in slaves file.<br />(Rama Ramasamy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3461">HADOOP-3461</a>. Remove hdfs.StringBytesWritable.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4437">HADOOP-4437</a>. Use Halton sequence instead of java.util.Random in
-PiEstimator.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4572">HADOOP-4572</a>. Change INode and its sub-classes to package private.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4187">HADOOP-4187</a>. Does a runtime lookup for JobConf/JobConfigurable, and if
-found, invokes the appropriate configure method.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4453">HADOOP-4453</a>. Improve ssl configuration and handling in HsftpFileSystem,
-particularly when used with DistCp.<br />(Kan Zhang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4583">HADOOP-4583</a>. Several code optimizations in HDFS.<br />(Suresh Srinivas via
-szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3923">HADOOP-3923</a>. Remove org.apache.hadoop.mapred.StatusHttpServer.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4622">HADOOP-4622</a>. Explicitly specify interpretor for non-native
-pipes binaries.<br />(Fredrik Hedberg via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4505">HADOOP-4505</a>. Add a unit test to test faulty setup task and cleanup
-task killing the job.<br />(Amareshwari Sriramadasu via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4608">HADOOP-4608</a>. Don't print a stack trace when the example driver gets an
-unknown program to run.<br />(Edward Yoon via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4645">HADOOP-4645</a>. Package HdfsProxy contrib project without the extra level
-of directories.<br />(Kan Zhang via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4126">HADOOP-4126</a>. Allow access to HDFS web UI on EC2<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4612">HADOOP-4612</a>. Removes RunJar's dependency on JobClient.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4185">HADOOP-4185</a>. Adds setVerifyChecksum() method to FileSystem.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4523">HADOOP-4523</a>. Prevent too many tasks scheduled on a node from bringing
-it down by monitoring for cumulative memory usage across tasks.<br />(Vinod Kumar Vavilapalli via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4640">HADOOP-4640</a>. Adds an input format that can split lzo compressed
-text files.<br />(johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4666">HADOOP-4666</a>. Launch reduces only after a few maps have run in the
-Fair Scheduler.<br />(Matei Zaharia via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4339">HADOOP-4339</a>. Remove redundant calls from FileSystem/FsShell when
-generating/processing ContentSummary.<br />(David Phillips via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2774">HADOOP-2774</a>. Add counters tracking records spilled to disk in MapTask and
-ReduceTask.<br />(Ravi Gummadi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4513">HADOOP-4513</a>. Initialize jobs asynchronously in the capacity scheduler.<br />(Sreekanth Ramakrishnan via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4649">HADOOP-4649</a>. Improve abstraction for spill indices.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3770">HADOOP-3770</a>. Add gridmix2, an iteration on the gridmix benchmark.<br />(Runping
-Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4708">HADOOP-4708</a>. Add support for dfsadmin commands in TestCLI.<br />(Boris Shkolnik
-via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4758">HADOOP-4758</a>. Add a splitter for metrics contexts to support more than one
-type of collector.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4722">HADOOP-4722</a>. Add tests for dfsadmin quota error messages.<br />(Boris Shkolnik
-via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4690">HADOOP-4690</a>.  fuse-dfs - create source file/function + utils + config +
-main source files.<br />(pete wyckoff via mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3750">HADOOP-3750</a>. Fix and enforce module dependencies.<br />(Sharad Agarwal via
-tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4747">HADOOP-4747</a>. Speed up FsShell::ls by removing redundant calls to the
-filesystem.<br />(David Phillips via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4305">HADOOP-4305</a>. Improves the blacklisting strategy, whereby, tasktrackers
-that are blacklisted are not given tasks to run from other jobs, subject
-to the following conditions (all must be met):
-1) The TaskTracker has been blacklisted by at least 4 jobs (configurable)
-2) The TaskTracker has been blacklisted 50% more number of times than
-   the average (configurable)
-3) The cluster has less than 50% trackers blacklisted
-Once in 24 hours, a TaskTracker blacklisted for all jobs is given a chance.
-Restarting the TaskTracker moves it out of the blacklist.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4688">HADOOP-4688</a>. Modify the MiniMRDFSSort unit test to spill multiple times,
-exercising the map-side merge code.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4737">HADOOP-4737</a>. Adds the KILLED notification when jobs get killed.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4728">HADOOP-4728</a>. Add a test exercising different namenode configurations.<br />(Boris Shkolnik via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4807">HADOOP-4807</a>. Adds JobClient commands to get the active/blacklisted tracker names.
-Also adds commands to display running/completed task attempt IDs.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4699">HADOOP-4699</a>. Remove checksum validation from map output servlet.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4838">HADOOP-4838</a>. Added a registry to automate metrics and mbeans management.<br />(Sanjay Radia via acmurthy)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.20.0_-_unreleased_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(2)
-    <ol id="release_0.20.0_-_unreleased_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3293">HADOOP-3293</a>. Fixes FileInputFormat to do provide locations for splits
-based on the rack/host that has the most number of bytes.<br />(Jothi Padmanabhan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4683">HADOOP-4683</a>. Fixes Reduce shuffle scheduler to invoke getMapCompletionEvents
-in a separate thread.<br />(Jothi Padmanabhan via ddas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.20.0_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(46)
-    <ol id="release_0.20.0_-_unreleased_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4204">HADOOP-4204</a>. Fix findbugs warnings related to unused variables, naive
-Number subclass instantiation, Map iteration, and badly scoped inner
-classes.<br />(Suresh Srinivas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4207">HADOOP-4207</a>. Update derby jar file to release 10.4.2 release.<br />(Prasad Chakka via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4325">HADOOP-4325</a>. SocketInputStream.read() should return -1 in case EOF.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4408">HADOOP-4408</a>. FsAction functions need not create new objects.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4440">HADOOP-4440</a>.  TestJobInProgressListener tests for jobs killed in queued
-state<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4346">HADOOP-4346</a>. Implement blocking connect so that Hadoop is not affected
-by selector problem with JDK default implementation.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4388">HADOOP-4388</a>. If there are invalid blocks in the transfer list, Datanode
-should handle them and keep transferring the remaining blocks.<br />(Suresh
-Srinivas via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4587">HADOOP-4587</a>. Fix a typo in Mapper javadoc.<br />(Koji Noguchi via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4530">HADOOP-4530</a>. In fsck, HttpServletResponse sendError fails with
-IllegalStateException.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4377">HADOOP-4377</a>. Fix a race condition in directory creation in
-NativeS3FileSystem.<br />(David Phillips via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4621">HADOOP-4621</a>. Fix javadoc warnings caused by duplicate jars.<br />(Kan Zhang via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4566">HADOOP-4566</a>. Deploy new hive code to support more types.<br />(Zheng Shao via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4571">HADOOP-4571</a>. Add chukwa conf files to svn:ignore list.<br />(Eric Yang via
-szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4589">HADOOP-4589</a>. Correct PiEstimator output messages and improve the code
-readability.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4650">HADOOP-4650</a>. Correct a mismatch between the default value of
-local.cache.size in the config and the source.<br />(Jeff Hammerbacher via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4606">HADOOP-4606</a>. Fix cygpath error if the log directory does not exist.<br />(szetszwo via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4141">HADOOP-4141</a>. Fix bug in ScriptBasedMapping causing potential infinite
-loop on misconfigured hadoop-site.<br />(Aaron Kimball via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4691">HADOOP-4691</a>. Correct a link in the javadoc of IndexedSortable.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4598">HADOOP-4598</a>. '-setrep' command skips under-replicated blocks.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4429">HADOOP-4429</a>. Set defaults for user, group in UnixUserGroupInformation so
-login fails more predictably when misconfigured.<br />(Alex Loddengaard via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4676">HADOOP-4676</a>. Fix broken URL in blacklisted tasktrackers page.<br />(Amareshwari
-Sriramadasu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3422">HADOOP-3422</a>  Ganglia counter metrics are all reported with the metric
-name "value", so the counter values can not be seen.<br />(Jason Attributor
-and Brian Bockelman via stack)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4704">HADOOP-4704</a>. Fix javadoc typos "the the".<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4677">HADOOP-4677</a>. Fix semantics of FileSystem::getBlockLocations to return
-meaningful values.<br />(Hong Tang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4669">HADOOP-4669</a>. Use correct operator when evaluating whether access time is
-enabled<br />(Dhruba Borthakur via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4732">HADOOP-4732</a>. Pass connection and read timeouts in the correct order when
-setting up fetch in reduce.<br />(Amareshwari Sriramadasu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4558">HADOOP-4558</a>. Fix capacity reclamation in capacity scheduler.<br />(Amar Kamat via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4770">HADOOP-4770</a>. Fix rungridmix_2 script to work with RunJar.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4738">HADOOP-4738</a>. When using git, the saveVersion script will use only the
-commit hash for the version and not the message, which requires escaping.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4576">HADOOP-4576</a>. Show pending job count instead of task count in the UI per
-queue in capacity scheduler.<br />(Sreekanth Ramakrishnan via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4623">HADOOP-4623</a>. Maintain running tasks even if speculative execution is off.<br />(Amar Kamat via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4786">HADOOP-4786</a>. Fix broken compilation error in
-TestTrackerBlacklistAcrossJobs.<br />(yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4785">HADOOP-4785</a>. Fixes theJobTracker heartbeat to not make two calls to
-System.currentTimeMillis().<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4792">HADOOP-4792</a>. Add generated Chukwa configuration files to version control
-ignore lists.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4796">HADOOP-4796</a>. Fix Chukwa test configuration, remove unused components.<br />(Eric
-Yang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4708">HADOOP-4708</a>. Add binaries missed in the initial checkin for Chukwa.<br />(Eric
-Yang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4805">HADOOP-4805</a>. Remove black list collector from Chukwa Agent HTTP Sender.<br />(Eric Yang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4837">HADOOP-4837</a>. Move HADOOP_CONF_DIR configuration to chukwa-env.sh<br />(Jerome
-Boulon via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4825">HADOOP-4825</a>. Use ps instead of jps for querying process status in Chukwa.<br />(Eric Yang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4844">HADOOP-4844</a>. Fixed javadoc for
-org.apache.hadoop.fs.permission.AccessControlException to document that
-it's deprecated in favour of
-org.apache.hadoop.security.AccessControlException.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4706">HADOOP-4706</a>. Close the underlying output stream in
-IFileOutputStream::close.<br />(Jothi Padmanabhan via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4855">HADOOP-4855</a>. Fixed command-specific help messages for refreshServiceAcl in
-DFSAdmin and MRAdmin.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4820">HADOOP-4820</a>. Remove unused method FSNamesystem::deleteInSafeMode.<br />(Suresh
-Srinivas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4698">HADOOP-4698</a>. Lower io.sort.mb to 10 in the tests and raise the junit memory
-limit to 512m from 256m.<br />(Nigel Daley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4860">HADOOP-4860</a>. Split TestFileTailingAdapters into three separate tests to
-avoid contention.<br />(Eric Yang via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3921">HADOOP-3921</a>. Fixed clover (code coverage) target to work with JDK 6.<br />(tomwhite via nigel)</li>
-    </ol>
-  </li>
-</ul>
-<h2><a href="javascript:toggleList('release_0.19.1_-_unreleased_')">Release 0.19.1 - Unreleased
-</a></h2>
-<ul id="release_0.19.1_-_unreleased_">
-  <li><a href="javascript:toggleList('release_0.19.1_-_unreleased_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.19.1_-_unreleased_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4739">HADOOP-4739</a>. Fix spelling and grammar, improve phrasing of some sections in
-mapred tutorial.<br />(Vivek Ratan via cdouglas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.19.1_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(6)
-    <ol id="release_0.19.1_-_unreleased_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4697">HADOOP-4697</a>. Fix getBlockLocations in KosmosFileSystem to handle multiple
-blocks correctly.<br />(Sriram Rao via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4420">HADOOP-4420</a>. Add null checks for job, caused by invalid job IDs.<br />(Aaron Kimball via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4632">HADOOP-4632</a>. Fix TestJobHistoryVersion to use test.build.dir instead of the
-current workding directory for scratch space.<br />(Amar Kamat via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4508">HADOOP-4508</a>. Fix FSDataOutputStream.getPos() for append.<br />(dhruba via
-szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4727">HADOOP-4727</a>. Fix a group checking bug in fill_stat_structure(...) in
-fuse-dfs.<br />(Brian Bockelman via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4836">HADOOP-4836</a>. Correct typos in mapred related documentation.<br />(Jord? Polo
-via szetszwo)</li>
-    </ol>
-  </li>
-</ul>
-<h2><a href="javascript:toggleList('older')">Older Releases</a></h2>
-<ul id="older">
-<h3><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_')">Release 0.19.0 - 2008-11-18
-</a></h3>
-<ul id="release_0.19.0_-_2008-11-18_">
-  <li><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(23)
-    <ol id="release_0.19.0_-_2008-11-18_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3595">HADOOP-3595</a>. Remove deprecated methods for mapred.combine.once
-functionality, which was necessary to providing backwards
-compatible combiner semantics for 0.18.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3667">HADOOP-3667</a>. Remove the following deprecated methods from JobConf:
-  addInputPath(Path)
-  getInputPaths()
-  getMapOutputCompressionType()
-  getOutputPath()
-  getSystemDir()
-  setInputPath(Path)
-  setMapOutputCompressionType(CompressionType style)
-  setOutputPath(Path)<br />(Amareshwari Sriramadasu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3652">HADOOP-3652</a>. Remove deprecated class OutputFormatBase.<br />(Amareshwari Sriramadasu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2885">HADOOP-2885</a>. Break the hadoop.dfs package into separate packages under
-hadoop.hdfs that reflect whether they are client, server, protocol,
-etc. DistributedFileSystem and DFSClient have moved and are now
-considered package private.<br />(Sanjay Radia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2325">HADOOP-2325</a>.  Require Java 6.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-372">HADOOP-372</a>.  Add support for multiple input paths with a different
-InputFormat and Mapper for each path.<br />(Chris Smith via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1700">HADOOP-1700</a>.  Support appending to file in HDFS.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3792">HADOOP-3792</a>. Make FsShell -test consistent with unix semantics, returning
-zero for true and non-zero for false.<br />(Ben Slusky via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3664">HADOOP-3664</a>. Remove the deprecated method InputFormat.validateInput,
-which is no longer needed.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3549">HADOOP-3549</a>. Give more meaningful errno's in libhdfs. In particular,
-EACCES is returned for permission problems.<br />(Ben Slusky via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4036">HADOOP-4036</a>. ResourceStatus was added to TaskTrackerStatus by <a href="http://issues.apache.org/jira/browse/HADOOP-3759">HADOOP-3759</a>,
-so increment the InterTrackerProtocol version.<br />(Hemanth Yamijala via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3150">HADOOP-3150</a>. Moves task promotion to tasks. Defines a new interface for
-committing output files. Moves job setup to jobclient, and moves jobcleanup
-to a separate task.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3446">HADOOP-3446</a>. Keep map outputs in memory during the reduce. Remove
-fs.inmemory.size.mb and replace with properties defining in memory map
-output retention during the shuffle and reduce relative to maximum heap
-usage.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3245">HADOOP-3245</a>. Adds the feature for supporting JobTracker restart. Running
-jobs can be recovered from the history file. The history file format has
-been modified to support recovery. The task attempt ID now has the
-JobTracker start time to disinguish attempts of the same TIP across
-restarts.<br />(Amar Ramesh Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4007">HADOOP-4007</a>. REMOVE DFSFileInfo - FileStatus is sufficient.<br />(Sanjay Radia via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3722">HADOOP-3722</a>. Fixed Hadoop Streaming and Hadoop Pipes to use the Tool
-interface and GenericOptionsParser.<br />(Enis Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2816">HADOOP-2816</a>. Cluster summary at name node web reports the space
-utilization as:
-Configured Capacity: capacity of all the data directories - Reserved space
-Present Capacity: Space available for dfs,i.e. remaining+used space
-DFS Used%: DFS used space/Present Capacity<br />(Suresh Srinivas via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3938">HADOOP-3938</a>. Disk space quotas for HDFS. This is similar to namespace
-quotas in 0.18.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4293">HADOOP-4293</a>. Make Configuration Writable and remove unreleased
-WritableJobConf. Configuration.write is renamed to writeXml.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4281">HADOOP-4281</a>. Change dfsadmin to report available disk space in a format
-consistent with the web interface as defined in <a href="http://issues.apache.org/jira/browse/HADOOP-2816">HADOOP-2816</a>.<br />(Suresh
-Srinivas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4430">HADOOP-4430</a>. Further change the cluster summary at name node web that was
-changed in <a href="http://issues.apache.org/jira/browse/HADOOP-2816">HADOOP-2816</a>:
-  Non DFS Used - This indicates the disk space taken by non DFS file from
-                 the Configured capacity
-  DFS Used % - DFS Used % of Configured Capacity
-  DFS Remaining % - Remaing % Configured Capacity available for DFS use
-DFS command line report reflects the same change. Config parameter
-dfs.datanode.du.pct is no longer used and is removed from the
-hadoop-default.xml.<br />(Suresh Srinivas via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4116">HADOOP-4116</a>. Balancer should provide better resource management.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4599">HADOOP-4599</a>. BlocksMap and BlockInfo made package private.<br />(shv)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(39)
-    <ol id="release_0.19.0_-_2008-11-18_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3341">HADOOP-3341</a>. Allow streaming jobs to specify the field separator for map
-and reduce input and output. The new configuration values are:
-  stream.map.input.field.separator
-  stream.map.output.field.separator
-  stream.reduce.input.field.separator
-  stream.reduce.output.field.separator
-All of them default to "\t".<br />(Zheng Shao via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3479">HADOOP-3479</a>. Defines the configuration file for the resource manager in
-Hadoop. You can configure various parameters related to scheduling, such
-as queues and queue properties here. The properties for a queue follow a
-naming convention,such as, hadoop.rm.queue.queue-name.property-name.<br />(Hemanth Yamijala via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3149">HADOOP-3149</a>. Adds a way in which map/reducetasks can create multiple
-outputs.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3714">HADOOP-3714</a>.  Add a new contrib, bash-tab-completion, which enables
-bash tab completion for the bin/hadoop script. See the README file
-in the contrib directory for the installation.<br />(Chris Smith via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3730">HADOOP-3730</a>. Adds a new JobConf constructor that disables loading
-default configurations.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3772">HADOOP-3772</a>. Add a new Hadoop Instrumentation api for the JobTracker and
-the TaskTracker, refactor Hadoop Metrics as an implementation of the api.<br />(Ari Rabkin via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2302">HADOOP-2302</a>. Provides a comparator for numerical sorting of key fields.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-153">HADOOP-153</a>. Provides a way to skip bad records.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-657">HADOOP-657</a>. Free disk space should be modelled and used by the scheduler
-to make scheduling decisions.<br />(Ari Rabkin via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3719">HADOOP-3719</a>. Initial checkin of Chukwa, which is a data collection and
-analysis framework.<br />(Jerome Boulon, Andy Konwinski, Ari Rabkin,
-and Eric Yang)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3873">HADOOP-3873</a>. Add -filelimit and -sizelimit options to distcp to cap the
-number of files/bytes copied in a particular run to support incremental
-updates and mirroring. (TszWo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3585">HADOOP-3585</a>. FailMon package for hardware failure monitoring and
-analysis of anomalies.<br />(Ioannis Koltsidas via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1480">HADOOP-1480</a>. Add counters to the C++ Pipes API.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3854">HADOOP-3854</a>. Add support for pluggable servlet filters in the HttpServers.
-(Tsz Wo (Nicholas) Sze via omalley)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3759">HADOOP-3759</a>. Provides ability to run memory intensive jobs without
-affecting other running tasks on the nodes.<br />(Hemanth Yamijala via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3746">HADOOP-3746</a>. Add a fair share scheduler.<br />(Matei Zaharia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3754">HADOOP-3754</a>. Add a thrift interface to access HDFS.<br />(dhruba via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3828">HADOOP-3828</a>. Provides a way to write skipped records to DFS.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3948">HADOOP-3948</a>. Separate name-node edits and fsimage directories.<br />(Lohit Vijayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3939">HADOOP-3939</a>. Add an option to DistCp to delete files at the destination
-not present at the source. (Tsz Wo (Nicholas) Sze via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3601">HADOOP-3601</a>. Add a new contrib module for Hive, which is a sql-like
-query processing tool that uses map/reduce.<br />(Ashish Thusoo via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3866">HADOOP-3866</a>. Added sort and multi-job updates in the JobTracker web ui.<br />(Craig Weisenfluh via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3698">HADOOP-3698</a>. Add access control to control who is allowed to submit or
-modify jobs in the JobTracker.<br />(Hemanth Yamijala via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1869">HADOOP-1869</a>. Support access times for HDFS files.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3941">HADOOP-3941</a>. Extend FileSystem API to return file-checksums.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3581">HADOOP-3581</a>. Prevents memory intensive user tasks from taking down
-nodes.<br />(Vinod K V via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3970">HADOOP-3970</a>. Provides a way to recover counters written to JobHistory.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3702">HADOOP-3702</a>. Adds ChainMapper and ChainReducer classes allow composing
-chains of Maps and Reduces in a single Map/Reduce job, something like
-MAP+ / REDUCE MAP*.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3445">HADOOP-3445</a>. Add capacity scheduler that provides guaranteed capacities to
-queues as a percentage of the cluster.<br />(Vivek Ratan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3992">HADOOP-3992</a>. Add a synthetic load generation facility to the test
-directory.<br />(hairong via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3981">HADOOP-3981</a>. Implement a distributed file checksum algorithm in HDFS
-and change DistCp to use file checksum for comparing src and dst files<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3829">HADOOP-3829</a>. Narrown down skipped records based on user acceptable value.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3930">HADOOP-3930</a>. Add common interfaces for the pluggable schedulers and the
-cli &amp; gui clients.<br />(Sreekanth Ramakrishnan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4176">HADOOP-4176</a>. Implement getFileChecksum(Path) in HftpFileSystem.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-249">HADOOP-249</a>. Reuse JVMs across Map-Reduce Tasks.
-Configuration changes to hadoop-default.xml:
-  add mapred.job.reuse.jvm.num.tasks<br />(Devaraj Das via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4070">HADOOP-4070</a>. Provide a mechanism in Hive for registering UDFs from the
-query language.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2536">HADOOP-2536</a>. Implement a JDBC based database input and output formats to
-allow Map-Reduce applications to work with databases.<br />(Fredrik Hedberg and
-Enis Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3019">HADOOP-3019</a>. A new library to support total order partitions.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3924">HADOOP-3924</a>. Added a 'KILLED' job status.<br />(Subramaniam Krishnan via
-acmurthy)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(78)
-    <ol id="release_0.19.0_-_2008-11-18_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4205">HADOOP-4205</a>. hive: metastore and ql to use the refactored SerDe library.<br />(zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4106">HADOOP-4106</a>. libhdfs: add time, permission and user attribute support
-(part 2).<br />(Pete Wyckoff through zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4104">HADOOP-4104</a>. libhdfs: add time, permission and user attribute support.<br />(Pete Wyckoff through zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3908">HADOOP-3908</a>. libhdfs: better error message if llibhdfs.so doesn't exist.<br />(Pete Wyckoff through zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3732">HADOOP-3732</a>. Delay intialization of datanode block verification till
-the verification thread is started.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1627">HADOOP-1627</a>. Various small improvements to 'dfsadmin -report' output.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3577">HADOOP-3577</a>. Tools to inject blocks into name node and simulated
-data nodes for testing.<br />(Sanjay Radia via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2664">HADOOP-2664</a>. Add a lzop compatible codec, so that files compressed by lzop
-may be processed by map/reduce.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3655">HADOOP-3655</a>. Add additional ant properties to control junit.<br />(Steve
-Loughran via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3543">HADOOP-3543</a>. Update the copyright year to 2008.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3587">HADOOP-3587</a>. Add a unit test for the contrib/data_join framework.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3402">HADOOP-3402</a>. Add terasort example program<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3660">HADOOP-3660</a>. Add replication factor for injecting blocks in simulated
-datanodes.<br />(Sanjay Radia via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3684">HADOOP-3684</a>. Add a cloning function to the contrib/data_join framework
-permitting users to define a more efficient method for cloning values from
-the reduce than serialization/deserialization.<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3478">HADOOP-3478</a>. Improves the handling of map output fetching. Now the
-randomization is by the hosts (and not the map outputs themselves).<br />(Jothi Padmanabhan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3617">HADOOP-3617</a>. Removed redundant checks of accounting space in MapTask and
-makes the spill thread persistent so as to avoid creating a new one for
-each spill.<br />(Chris Douglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3412">HADOOP-3412</a>. Factor the scheduler out of the JobTracker and make
-it pluggable.<br />(Tom White and Brice Arnould via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3756">HADOOP-3756</a>. Minor. Remove unused dfs.client.buffer.dir from
-hadoop-default.xml.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3747">HADOOP-3747</a>. Adds counter suport for MultipleOutputs.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3169">HADOOP-3169</a>. LeaseChecker daemon should not be started in DFSClient
-constructor. (TszWo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3824">HADOOP-3824</a>. Move base functionality of StatusHttpServer to a core
-package. (TszWo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3646">HADOOP-3646</a>. Add a bzip2 compatible codec, so bzip compressed data
-may be processed by map/reduce.<br />(Abdul Qadeer via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3861">HADOOP-3861</a>. MapFile.Reader and Writer should implement Closeable.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3791">HADOOP-3791</a>. Introduce generics into ReflectionUtils.<br />(Chris Smith via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3694">HADOOP-3694</a>. Improve unit test performance by changing
-MiniDFSCluster to listen only on 127.0.0.1.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3620">HADOOP-3620</a>. Namenode should synchronously resolve a datanode's network
-location when the datanode registers.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3860">HADOOP-3860</a>. NNThroughputBenchmark is extended with rename and delete
-benchmarks.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3892">HADOOP-3892</a>. Include unix group name in JobConf.<br />(Matei Zaharia via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3875">HADOOP-3875</a>. Change the time period between heartbeats to be relative to
-the end of the heartbeat rpc, rather than the start. This causes better
-behavior if the JobTracker is overloaded.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3853">HADOOP-3853</a>. Move multiple input format (<a href="http://issues.apache.org/jira/browse/HADOOP-372">HADOOP-372</a>) extension to
-library package.<br />(tomwhite via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-9">HADOOP-9</a>. Use roulette scheduling for temporary space when the size
-is not known.<br />(Ari Rabkin via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3202">HADOOP-3202</a>. Use recursive delete rather than FileUtil.fullyDelete.<br />(Amareshwari Sriramadasu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3368">HADOOP-3368</a>. Remove common-logging.properties from conf.<br />(Steve Loughran
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3851">HADOOP-3851</a>. Fix spelling mistake in FSNamesystemMetrics.<br />(Steve Loughran
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3780">HADOOP-3780</a>. Remove asynchronous resolution of network topology in the
-JobTracker<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3852">HADOOP-3852</a>. Add ShellCommandExecutor.toString method to make nicer
-error messages.<br />(Steve Loughran via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3844">HADOOP-3844</a>. Include message of local exception in RPC client failures.<br />(Steve Loughran via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3935">HADOOP-3935</a>. Split out inner classes from DataNode.java.<br />(johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3905">HADOOP-3905</a>. Create generic interfaces for edit log streams.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3062">HADOOP-3062</a>. Add metrics to DataNode and TaskTracker to record network
-traffic for HDFS reads/writes and MR shuffling.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3742">HADOOP-3742</a>. Remove HDFS from public java doc and add javadoc-dev for
-generative javadoc for developers.<br />(Sanjay Radia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3944">HADOOP-3944</a>. Improve documentation for public TupleWritable class in
-join package.<br />(Chris Douglas via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2330">HADOOP-2330</a>. Preallocate HDFS transaction log to improve performance.<br />(dhruba and hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3965">HADOOP-3965</a>. Convert DataBlockScanner into a package private class.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3488">HADOOP-3488</a>. Prevent hadoop-daemon from rsync'ing log files<br />(Stefan
-Groshupf and Craig Macdonald via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3342">HADOOP-3342</a>. Change the kill task actions to require http post instead of
-get to prevent accidental crawls from triggering it.<br />(enis via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3937">HADOOP-3937</a>. Limit the job name in the job history filename to 50
-characters.<br />(Matei Zaharia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3943">HADOOP-3943</a>. Remove unnecessary synchronization in
-NetworkTopology.pseudoSortByDistance.<br />(hairong via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3498">HADOOP-3498</a>. File globbing alternation should be able to span path
-components.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3361">HADOOP-3361</a>. Implement renames for NativeS3FileSystem.<br />(Albert Chern via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3605">HADOOP-3605</a>. Make EC2 scripts show an error message if AWS_ACCOUNT_ID is
-unset.<br />(Al Hoang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4147">HADOOP-4147</a>. Remove unused class JobWithTaskContext from class
-JobInProgress.<br />(Amareshwari Sriramadasu via johan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4151">HADOOP-4151</a>. Add a byte-comparable interface that both Text and
-BytesWritable implement.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4174">HADOOP-4174</a>. Move fs image/edit log methods from ClientProtocol to
-NamenodeProtocol.<br />(shv via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4181">HADOOP-4181</a>. Include a .gitignore and saveVersion.sh change to support
-developing under git.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4186">HADOOP-4186</a>. Factor LineReader out of LineRecordReader.<br />(tomwhite via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4184">HADOOP-4184</a>. Break the module dependencies between core, hdfs, and
-mapred.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4075">HADOOP-4075</a>. test-patch.sh now spits out ant commands that it runs.<br />(Ramya R via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4117">HADOOP-4117</a>. Improve configurability of Hadoop EC2 instances.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2411">HADOOP-2411</a>. Add support for larger CPU EC2 instance types.<br />(Chris K Wensel via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4083">HADOOP-4083</a>. Changed the configuration attribute queue.name to
-mapred.job.queue.name.<br />(Hemanth Yamijala via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4194">HADOOP-4194</a>. Added the JobConf and JobID to job-related methods in
-JobTrackerInstrumentation for better metrics.<br />(Mac Yang via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3975">HADOOP-3975</a>. Change test-patch script to report working the dir
-modifications preventing the suite from being run.<br />(Ramya R via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4124">HADOOP-4124</a>. Added a command-line switch to allow users to set job
-priorities, also allow it to be manipulated via the web-ui.<br />(Hemanth
-Yamijala via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2165">HADOOP-2165</a>. Augmented JobHistory to include the URIs to the tasks'
-userlogs.<br />(Vinod Kumar Vavilapalli via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4062">HADOOP-4062</a>. Remove the synchronization on the output stream when a
-connection is closed and also remove an undesirable exception when
-a client is stoped while there is no pending RPC request.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4227">HADOOP-4227</a>. Remove the deprecated class org.apache.hadoop.fs.ShellCommand.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4006">HADOOP-4006</a>. Clean up FSConstants and move some of the constants to
-better places.<br />(Sanjay Radia via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4279">HADOOP-4279</a>. Trace the seeds of random sequences in append unit tests to
-make itermitant failures reproducible.<br />(szetszwo via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4209">HADOOP-4209</a>. Remove the change to the format of task attempt id by
-incrementing the task attempt numbers by 1000 when the job restarts.<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4301">HADOOP-4301</a>. Adds forrest doc for the skip bad records feature.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4354">HADOOP-4354</a>. Separate TestDatanodeDeath.testDatanodeDeath() into 4 tests.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3790">HADOOP-3790</a>. Add more unit tests for testing HDFS file append.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4321">HADOOP-4321</a>. Include documentation for the capacity scheduler.<br />(Hemanth
-Yamijala via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4424">HADOOP-4424</a>. Change menu layout for Hadoop documentation (Boris Shkolnik
-via cdouglas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4438">HADOOP-4438</a>. Update forrest documentation to include missing FsShell
-commands.<br />(Suresh Srinivas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4105">HADOOP-4105</a>.  Add forrest documentation for libhdfs.<br />(Pete Wyckoff via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4510">HADOOP-4510</a>. Make getTaskOutputPath public.<br />(Chris Wensel via omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(11)
-    <ol id="release_0.19.0_-_2008-11-18_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3556">HADOOP-3556</a>. Removed lock contention in MD5Hash by changing the
-singleton MessageDigester by an instance per Thread using
-ThreadLocal.<br />(Iv?n de Prado via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3328">HADOOP-3328</a>. When client is writing data to DFS, only the last
-datanode in the pipeline needs to verify the checksum. Saves around
-30% CPU on intermediate datanodes.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3863">HADOOP-3863</a>. Use a thread-local string encoder rather than a static one
-that is protected by a lock.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3864">HADOOP-3864</a>. Prevent the JobTracker from locking up when a job is being
-initialized.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3816">HADOOP-3816</a>. Faster directory listing in KFS.<br />(Sriram Rao via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2130">HADOOP-2130</a>. Pipes submit job should have both blocking and non-blocking
-versions.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3769">HADOOP-3769</a>. Make the SampleMapper and SampleReducer from
-GenericMRLoadGenerator public, so they can be used in other contexts.<br />(Lingyun Yang via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3514">HADOOP-3514</a>. Inline the CRCs in intermediate files as opposed to reading
-it from a different .crc file.<br />(Jothi Padmanabhan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3638">HADOOP-3638</a>. Caches the iFile index files in memory to reduce seeks<br />(Jothi Padmanabhan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4225">HADOOP-4225</a>. FSEditLog.logOpenFile() should persist accessTime
-rather than modificationTime.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4380">HADOOP-4380</a>. Made several new classes (Child, JVMId,
-JobTrackerInstrumentation, QueueManager, ResourceEstimator,
-TaskTrackerInstrumentation, and TaskTrackerMetricsInst) in
-org.apache.hadoop.mapred  package private instead of public.<br />(omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.19.0_-_2008-11-18_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(152)
-    <ol id="release_0.19.0_-_2008-11-18_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3563">HADOOP-3563</a>.  Refactor the distributed upgrade code so that it is
-easier to identify datanode and namenode related code.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3640">HADOOP-3640</a>. Fix the read method in the NativeS3InputStream.<br />(tomwhite via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3711">HADOOP-3711</a>. Fixes the Streaming input parsing to properly find the
-separator.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3725">HADOOP-3725</a>. Prevent TestMiniMRMapDebugScript from swallowing exceptions.<br />(Steve Loughran via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3726">HADOOP-3726</a>. Throw exceptions from TestCLI setup and teardown instead of
-swallowing them.<br />(Steve Loughran via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3721">HADOOP-3721</a>. Refactor CompositeRecordReader and related mapred.join classes
-to make them clearer.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3720">HADOOP-3720</a>. Re-read the config file when dfsadmin -refreshNodes is invoked
-so dfs.hosts and dfs.hosts.exclude are observed.<br />(lohit vijayarenu via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3485">HADOOP-3485</a>. Allow writing to files over fuse.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3723">HADOOP-3723</a>. The flags to the libhdfs.create call can be treated as
-a bitmask.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3643">HADOOP-3643</a>. Filter out completed tasks when asking for running tasks in
-the JobTracker web/ui.<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3777">HADOOP-3777</a>. Ensure that Lzo compressors/decompressors correctly handle the
-case where native libraries aren't available.<br />(Chris Douglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3728">HADOOP-3728</a>. Fix SleepJob so that it doesn't depend on temporary files,
-this ensures we can now run more than one instance of SleepJob
-simultaneously.<br />(Chris Douglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3795">HADOOP-3795</a>. Fix saving image files on Namenode with different checkpoint
-stamps.<br />(Lohit Vijayarenu via mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3624">HADOOP-3624</a>. Improving createeditslog to create tree directory structure.<br />(Lohit Vijayarenu via mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3778">HADOOP-3778</a>. DFSInputStream.seek() did not retry in case of some errors.<br />(LN via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3661">HADOOP-3661</a>. The handling of moving files deleted through fuse-dfs to
-Trash made similar to the behaviour from dfs shell.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3819">HADOOP-3819</a>. Unset LANG and LC_CTYPE in saveVersion.sh to make it
-compatible with non-English locales.<br />(Rong-En Fan via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3848">HADOOP-3848</a>. Cache calls to getSystemDir in the TaskTracker instead of
-calling it for each task start.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3131">HADOOP-3131</a>. Fix reduce progress reporting for compressed intermediate
-data.<br />(Matei Zaharia via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3796">HADOOP-3796</a>. fuse-dfs configuration is implemented as file system
-mount options.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3836">HADOOP-3836</a>. Fix TestMultipleOutputs to correctly clean up.<br />(Alejandro
-Abdelnur via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3805">HADOOP-3805</a>. Improve fuse-dfs write performance.<br />(Pete Wyckoff via zshao)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3846">HADOOP-3846</a>. Fix unit test CreateEditsLog to generate paths correctly.<br />(Lohit Vjayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3904">HADOOP-3904</a>. Fix unit tests using the old dfs package name.
-(TszWo (Nicholas), SZE via johan)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3319">HADOOP-3319</a>. Fix some HOD error messages to go stderr instead of
-stdout.<br />(Vinod Kumar Vavilapalli via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3907">HADOOP-3907</a>. Move INodeDirectoryWithQuota to its own .java file.
-(Tsz Wo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3919">HADOOP-3919</a>. Fix attribute name in hadoop-default for
-mapred.jobtracker.instrumentation.<br />(Ari Rabkin via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3903">HADOOP-3903</a>. Change the package name for the servlets to be hdfs instead of
-dfs. (Tsz Wo (Nicholas) Sze via omalley)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3773">HADOOP-3773</a>. Change Pipes to set the default map output key and value
-types correctly.<br />(Koji Noguchi via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3952">HADOOP-3952</a>. Fix compilation error in TestDataJoin referencing dfs package.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3951">HADOOP-3951</a>. Fix package name for FSNamesystem logs and modify other
-hard-coded Logs to use the class name.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3889">HADOOP-3889</a>. Improve error reporting from HftpFileSystem, handling in
-DistCp. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3946">HADOOP-3946</a>. Fix TestMapRed after hadoop-3664.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3949">HADOOP-3949</a>. Remove duplicate jars from Chukwa.<br />(Jerome Boulon via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3933">HADOOP-3933</a>. DataNode sometimes sends up to io.byte.per.checksum bytes
-more than required to client.<br />(Ning Li via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3962">HADOOP-3962</a>. Shell command "fs -count" should support paths with different
-file systems. (Tsz Wo (Nicholas), SZE via mahadev)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3957">HADOOP-3957</a>. Fix javac warnings in DistCp and TestCopyFiles. (Tsz Wo
-(Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3958">HADOOP-3958</a>. Fix TestMapRed to check the success of test-job.<br />(omalley via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3985">HADOOP-3985</a>. Fix TestHDFSServerPorts to use random ports.<br />(Hairong Kuang
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3964">HADOOP-3964</a>. Fix javadoc warnings introduced by FailMon.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3785">HADOOP-3785</a>. Fix FileSystem cache to be case-insensitive for scheme and
-authority.<br />(Bill de hOra via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3506">HADOOP-3506</a>. Fix a rare NPE caused by error handling in S3.<br />(Tom White via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3705">HADOOP-3705</a>. Fix mapred.join parser to accept InputFormats named with
-underscore and static, inner classes.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4023">HADOOP-4023</a>. Fix javadoc warnings introduced when the HDFS javadoc was
-made private.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4030">HADOOP-4030</a>. Remove lzop from the default list of codecs.<br />(Arun Murthy via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3961">HADOOP-3961</a>. Fix task disk space requirement estimates for virtual
-input jobs. Delays limiting task placement until after 10% of the maps
-have finished.<br />(Ari Rabkin via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2168">HADOOP-2168</a>. Fix problem with C++ record reader's progress not being
-reported to framework.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3966">HADOOP-3966</a>. Copy findbugs generated output files to PATCH_DIR while
-running test-patch.<br />(Ramya R via lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4037">HADOOP-4037</a>. Fix the eclipse plugin for versions of kfs and log4j.<br />(nigel
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3950">HADOOP-3950</a>. Cause the Mini MR cluster to wait for task trackers to
-register before continuing.<br />(enis via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3910">HADOOP-3910</a>. Remove unused ClusterTestDFSNamespaceLogging and
-ClusterTestDFS. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3954">HADOOP-3954</a>. Disable record skipping by default.<br />(Sharad Agarwal via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4050">HADOOP-4050</a>. Fix TestFairScheduler to use absolute paths for the work
-directory.<br />(Matei Zaharia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4069">HADOOP-4069</a>. Keep temporary test files from TestKosmosFileSystem under
-test.build.data instead of /tmp.<br />(lohit via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4078">HADOOP-4078</a>. Create test files for TestKosmosFileSystem in separate
-directory under test.build.data.<br />(lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3968">HADOOP-3968</a>. Fix getFileBlockLocations calls to use FileStatus instead
-of Path reflecting the new API.<br />(Pete Wyckoff via lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3963">HADOOP-3963</a>. libhdfs does not exit on its own, instead it returns error
-to the caller and behaves as a true library.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4100">HADOOP-4100</a>. Removes the cleanupTask scheduling from the Scheduler
-implementations and moves it to the JobTracker.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4097">HADOOP-4097</a>. Make hive work well with speculative execution turned on.<br />(Joydeep Sen Sarma via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4113">HADOOP-4113</a>. Changes to libhdfs to not exit on its own, rather return
-an error code to the caller.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4054">HADOOP-4054</a>. Remove duplicate lease removal during edit log loading.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4071">HADOOP-4071</a>. FSNameSystem.isReplicationInProgress should add an
-underReplicated block to the neededReplication queue using method
-"add" not "update".<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4154">HADOOP-4154</a>. Fix type warnings in WritableUtils.<br />(szetszwo via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4133">HADOOP-4133</a>. Log files generated by Hive should reside in the
-build directory.<br />(Prasad Chakka via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4094">HADOOP-4094</a>. Hive now has hive-default.xml and hive-site.xml similar
-to core hadoop.<br />(Prasad Chakka via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4112">HADOOP-4112</a>. Handles cleanupTask in JobHistory<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3831">HADOOP-3831</a>. Very slow reading clients sometimes failed while reading.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4155">HADOOP-4155</a>. Use JobTracker's start time while initializing JobHistory's
-JobTracker Unique String.<br />(lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4099">HADOOP-4099</a>. Fix null pointer when using HFTP from an 0.18 server.<br />(dhruba via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3570">HADOOP-3570</a>. Includes user specified libjar files in the client side
-classpath path.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4129">HADOOP-4129</a>. Changed memory limits of TaskTracker and Tasks to be in
-KiloBytes rather than bytes.<br />(Vinod Kumar Vavilapalli via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4139">HADOOP-4139</a>. Optimize Hive multi group-by.<br />(Namin Jain via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3911">HADOOP-3911</a>. Add a check to fsck options to make sure -files is not
-the first option to resolve conflicts with GenericOptionsParser<br />(lohit)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3623">HADOOP-3623</a>. Refactor LeaseManager.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4125">HADOOP-4125</a>. Handles Reduce cleanup tip on the web ui.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4087">HADOOP-4087</a>. Hive Metastore API for php and python clients.<br />(Prasad Chakka via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4197">HADOOP-4197</a>. Update DATA_TRANSFER_VERSION for <a href="http://issues.apache.org/jira/browse/HADOOP-3981">HADOOP-3981</a>.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4138">HADOOP-4138</a>. Refactor the Hive SerDe library to better structure
-the interfaces to the serializer and de-serializer.<br />(Zheng Shao via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4195">HADOOP-4195</a>. Close compressor before returning to codec pool.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2403">HADOOP-2403</a>. Escapes some special characters before logging to
-history files.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4200">HADOOP-4200</a>. Fix a bug in the test-patch.sh script.<br />(Ramya R via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4084">HADOOP-4084</a>. Add explain plan capabilities to Hive Query Language.<br />(Ashish Thusoo via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4121">HADOOP-4121</a>. Preserve cause for exception if the initialization of
-HistoryViewer for JobHistory fails.<br />(Amareshwari Sri Ramadasu via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4213">HADOOP-4213</a>. Fixes NPE in TestLimitTasksPerJobTaskScheduler.<br />(Sreekanth Ramakrishnan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4077">HADOOP-4077</a>. Setting access and modification time for a file
-requires write permissions on the file.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3592">HADOOP-3592</a>. Fix a couple of possible file leaks in FileUtil<br />(Bill de hOra via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4120">HADOOP-4120</a>. Hive interactive shell records the time taken by a
-query.<br />(Raghotham Murthy via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4090">HADOOP-4090</a>. The hive scripts pick up hadoop from HADOOP_HOME
-and then the path.<br />(Raghotham Murthy via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4242">HADOOP-4242</a>. Remove extra ";" in FSDirectory that blocks compilation
-in some IDE's.<br />(szetszwo via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4249">HADOOP-4249</a>. Fix eclipse path to include the hsqldb.jar.<br />(szetszwo via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4247">HADOOP-4247</a>. Move InputSampler into org.apache.hadoop.mapred.lib, so that
-examples.jar doesn't depend on tools.jar.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4269">HADOOP-4269</a>. Fix the deprecation of LineReader by extending the new class
-into the old name and deprecating it. Also update the tests to test the
-new class.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4280">HADOOP-4280</a>. Fix conversions between seconds in C and milliseconds in
-Java for access times for files.<br />(Pete Wyckoff via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4254">HADOOP-4254</a>. -setSpaceQuota command does not convert "TB" extenstion to
-terabytes properly. Implementation now uses StringUtils for parsing this.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4259">HADOOP-4259</a>. Findbugs should run over tools.jar also.<br />(cdouglas via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4275">HADOOP-4275</a>. Move public method isJobValidName from JobID to a private
-method in JobTracker.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4173">HADOOP-4173</a>. fix failures in TestProcfsBasedProcessTree and
-TestTaskTrackerMemoryManager tests. ProcfsBasedProcessTree and
-memory management in TaskTracker are disabled on Windows.<br />(Vinod K V via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4189">HADOOP-4189</a>. Fixes the history blocksize &amp; intertracker protocol version
-issues introduced as part of <a href="http://issues.apache.org/jira/browse/HADOOP-3245">HADOOP-3245</a>.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4190">HADOOP-4190</a>. Fixes the backward compatibility issue with Job History.
-introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-3245">HADOOP-3245</a> and <a href="http://issues.apache.org/jira/browse/HADOOP-2403">HADOOP-2403</a>.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4237">HADOOP-4237</a>. Fixes the TestStreamingBadRecords.testNarrowDown testcase.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4274">HADOOP-4274</a>. Capacity scheduler accidently modifies the underlying
-data structures when browing the job lists.<br />(Hemanth Yamijala via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4309">HADOOP-4309</a>. Fix eclipse-plugin compilation.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4232">HADOOP-4232</a>. Fix race condition in JVM reuse when multiple slots become
-free.<br />(ddas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4302">HADOOP-4302</a>. Fix a race condition in TestReduceFetch that can yield false
-negatvies.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3942">HADOOP-3942</a>. Update distcp documentation to include features introduced in
-<a href="http://issues.apache.org/jira/browse/HADOOP-3873">HADOOP-3873</a>, <a href="http://issues.apache.org/jira/browse/HADOOP-3939">HADOOP-3939</a>. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4319">HADOOP-4319</a>. fuse-dfs dfs_read function returns as many bytes as it is
-told to read unlesss end-of-file is reached.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4246">HADOOP-4246</a>. Ensure we have the correct lower bound on the number of
-retries for fetching map-outputs; also fixed the case where the reducer
-automatically kills on too many unique map-outputs could not be fetched
-for small jobs.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4163">HADOOP-4163</a>. Report FSErrors from map output fetch threads instead of
-merely logging them.<br />(Sharad Agarwal via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4261">HADOOP-4261</a>. Adds a setup task for jobs. This is required so that we
-don't setup jobs that haven't been inited yet (since init could lead
-to job failure). Only after the init has successfully happened do we
-launch the setupJob task.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4256">HADOOP-4256</a>. Removes Completed and Failed Job tables from
-jobqueue_details.jsp.<br />(Sreekanth Ramakrishnan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4267">HADOOP-4267</a>. Occasional exceptions during shutting down HSQLDB is logged
-but not rethrown.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4018">HADOOP-4018</a>. The number of tasks for a single job cannot exceed a
-pre-configured maximum value.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4288">HADOOP-4288</a>. Fixes a NPE problem in CapacityScheduler.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4014">HADOOP-4014</a>. Create hard links with 'fsutil hardlink' on Windows.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4393">HADOOP-4393</a>. Merged org.apache.hadoop.fs.permission.AccessControlException
-and org.apache.hadoop.security.AccessControlIOException into a single
-class hadoop.security.AccessControlException.<br />(omalley via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4287">HADOOP-4287</a>. Fixes an issue to do with maintaining counts of running/pending
-maps/reduces.<br />(Sreekanth Ramakrishnan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4361">HADOOP-4361</a>. Makes sure that jobs killed from command line are killed
-fast (i.e., there is a slot to run the cleanup task soon).<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4400">HADOOP-4400</a>. Add "hdfs://" to fs.default.name on quickstart.html.<br />(Jeff Hammerbacher via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4378">HADOOP-4378</a>. Fix TestJobQueueInformation to use SleepJob rather than
-WordCount via TestMiniMRWithDFS.<br />(Sreekanth Ramakrishnan via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4376">HADOOP-4376</a>. Fix formatting in hadoop-default.xml for
-hadoop.http.filter.initializers.<br />(Enis Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4410">HADOOP-4410</a>. Adds an extra arg to the API FileUtil.makeShellPath to
-determine whether to canonicalize file paths or not.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4236">HADOOP-4236</a>. Ensure un-initialized jobs are killed correctly on
-user-demand.<br />(Sharad Agarwal via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4373">HADOOP-4373</a>. Fix calculation of Guaranteed Capacity for the
-capacity-scheduler.<br />(Hemanth Yamijala via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4053">HADOOP-4053</a>. Schedulers must be notified when jobs complete.<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4335">HADOOP-4335</a>. Fix FsShell -ls for filesystems without owners/groups.<br />(David
-Phillips via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4426">HADOOP-4426</a>. TestCapacityScheduler broke due to the two commits <a href="http://issues.apache.org/jira/browse/HADOOP-4053">HADOOP-4053</a>
-and <a href="http://issues.apache.org/jira/browse/HADOOP-4373">HADOOP-4373</a>. This patch fixes that.<br />(Hemanth Yamijala via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4418">HADOOP-4418</a>. Updates documentation in forrest for Mapred, streaming and pipes.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3155">HADOOP-3155</a>. Ensure that there is only one thread fetching
-TaskCompletionEvents on TaskTracker re-init.<br />(Dhruba Borthakur via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4425">HADOOP-4425</a>. Fix EditLogInputStream to overload the bulk read method.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4427">HADOOP-4427</a>. Adds the new queue/job commands to the manual.<br />(Sreekanth Ramakrishnan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4278">HADOOP-4278</a>. Increase debug logging for unit test TestDatanodeDeath.
-Fix the case when primary is dead.<br />(dhruba via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4423">HADOOP-4423</a>. Keep block length when the block recovery is triggered by
-append.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4449">HADOOP-4449</a>. Fix dfsadmin usage.<br />(Raghu Angadi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4455">HADOOP-4455</a>. Added TestSerDe so that unit tests can run successfully.<br />(Ashish Thusoo via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4457">HADOOP-4457</a>. Fixes an input split logging problem introduced by
-<a href="http://issues.apache.org/jira/browse/HADOOP-3245">HADOOP-3245</a>.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4464">HADOOP-4464</a>. Separate out TestFileCreationClient from TestFileCreation.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4404">HADOOP-4404</a>. saveFSImage() removes files from a storage directory that do
-not correspond to its type.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4149">HADOOP-4149</a>. Fix handling of updates to the job priority, by changing the
-list of jobs to be keyed by the priority, submit time, and job tracker id.<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4296">HADOOP-4296</a>. Fix job client failures by not retiring a job as soon as it
-is finished.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4439">HADOOP-4439</a>. Remove configuration variables that aren't usable yet, in
-particular mapred.tasktracker.tasks.maxmemory and mapred.task.max.memory.<br />(Hemanth Yamijala via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4230">HADOOP-4230</a>. Fix for serde2 interface, limit operator, select * operator,
-UDF trim functions and sampling.<br />(Ashish Thusoo via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4358">HADOOP-4358</a>. No need to truncate access time in INode. Also fixes NPE
-in CreateEditsLog.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4387">HADOOP-4387</a>. TestHDFSFileSystemContract fails on windows nightly builds.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4466">HADOOP-4466</a>. Ensure that SequenceFileOutputFormat isn't tied to Writables
-and can be used with other Serialization frameworks.<br />(Chris Wensel via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4525">HADOOP-4525</a>. Fix ipc.server.ipcnodelay originally missed in in <a href="http://issues.apache.org/jira/browse/HADOOP-2232">HADOOP-2232</a>.<br />(cdouglas via Clint Morgan)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4498">HADOOP-4498</a>. Ensure that JobHistory correctly escapes the job name so that
-regex patterns work.<br />(Chris Wensel via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4446">HADOOP-4446</a>. Modify guaranteed capacity labels in capacity scheduler's UI
-to reflect the information being displayed.<br />(Sreekanth Ramakrishnan via
-yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4282">HADOOP-4282</a>. Some user facing URLs are not filtered by user filters.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4595">HADOOP-4595</a>. Fixes two race conditions - one to do with updating free slot count,
-and another to do with starting the MapEventsFetcher thread.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4552">HADOOP-4552</a>. Fix a deadlock in RPC server.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4471">HADOOP-4471</a>. Sort running jobs by priority in the capacity scheduler.<br />(Amar Kamat via yhemanth)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4500">HADOOP-4500</a>. Fix MultiFileSplit to get the FileSystem from the relevant
-path rather than the JobClient.<br />(Joydeep Sen Sarma via cdouglas)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.18.3_-_unreleased_')">Release 0.18.3 - Unreleased
-</a></h3>
-<ul id="release_0.18.3_-_unreleased_">
-  <li><a href="javascript:toggleList('release_0.18.3_-_unreleased_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(2)
-    <ol id="release_0.18.3_-_unreleased_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4150">HADOOP-4150</a>. Include librecordio in hadoop releases.<br />(Giridharan Kesavan
-via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4668">HADOOP-4668</a>. Improve documentation for setCombinerClass to clarify the
-restrictions on combiners.<br />(omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.3_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(29)
-    <ol id="release_0.18.3_-_unreleased_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4499">HADOOP-4499</a>. DFSClient should invoke checksumOk only once.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4597">HADOOP-4597</a>. Calculate mis-replicated blocks when safe-mode is turned
-off manually.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3121">HADOOP-3121</a>. lsr should keep listing the remaining items but not
-terminate if there is any IOException.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4610">HADOOP-4610</a>. Always calculate mis-replicated blocks when safe-mode is
-turned off.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3883">HADOOP-3883</a>. Limit namenode to assign at most one generation stamp for
-a particular block within a short period.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4556">HADOOP-4556</a>. Block went missing.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4643">HADOOP-4643</a>. NameNode should exclude excessive replicas when counting
-live replicas for a block.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4703">HADOOP-4703</a>. Should not wait for proxy forever in lease recovering.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4647">HADOOP-4647</a>. NamenodeFsck should close the DFSClient it has created.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4616">HADOOP-4616</a>. Fuse-dfs can handle bad values from FileSystem.read call.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4061">HADOOP-4061</a>. Throttle Datanode decommission monitoring in Namenode.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4659">HADOOP-4659</a>. Root cause of connection failure is being lost to code that
-uses it for delaying startup.<br />(Steve Loughran and Hairong via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4614">HADOOP-4614</a>. Lazily open segments when merging map spills to avoid using
-too many file descriptors.<br />(Yuri Pradkin via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4257">HADOOP-4257</a>. The DFS client should pick only one datanode as the candidate
-to initiate lease recovery.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4713">HADOOP-4713</a>. Fix librecordio to handle records larger than 64k.<br />(Christian
-Kunz via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4635">HADOOP-4635</a>. Fix a memory leak in fuse dfs.<br />(pete wyckoff via mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4714">HADOOP-4714</a>. Report status between merges and make the number of records
-between progress reports configurable.<br />(Jothi Padmanabhan via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4726">HADOOP-4726</a>. Fix documentation typos "the the".<br />(Edward J. Yoon via
-szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4679">HADOOP-4679</a>. Datanode prints tons of log messages: waiting for threadgroup
-to exit, active threads is XX.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4746">HADOOP-4746</a>. Job output directory should be normalized.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4717">HADOOP-4717</a>. Removal of default port# in NameNode.getUri() causes a
-map/reduce job failed to prompt temporary output.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4778">HADOOP-4778</a>. Check for zero size block meta file when updating a block.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4742">HADOOP-4742</a>. Replica gets deleted by mistake.<br />(Wang Xu via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4702">HADOOP-4702</a>. Failed block replication leaves an incomplete block in
-receiver's tmp data directory.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4613">HADOOP-4613</a>. Fix block browsing on Web UI.<br />(Johan Oskarsson via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4806">HADOOP-4806</a>. HDFS rename should not use src path as a regular expression.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4795">HADOOP-4795</a>. Prevent lease monitor getting into an infinite loop when
-leases and the namespace tree does not match.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4620">HADOOP-4620</a>. Fixes Streaming to handle well the cases of map/reduce with empty
-input/output.<br />(Ravi Gummadi via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4857">HADOOP-4857</a>. Fixes TestUlimit to have exactly 1 map in the jobs spawned.<br />(Ravi Gummadi via ddas)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.18.2_-_2008-11-03_')">Release 0.18.2 - 2008-11-03
-</a></h3>
-<ul id="release_0.18.2_-_2008-11-03_">
-  <li><a href="javascript:toggleList('release_0.18.2_-_2008-11-03_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(16)
-    <ol id="release_0.18.2_-_2008-11-03_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3614">HADOOP-3614</a>. Fix a bug that Datanode may use an old GenerationStamp to get
-meta file.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4314">HADOOP-4314</a>. Simulated datanodes should not include blocks that are still
-being written in their block report.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4228">HADOOP-4228</a>. dfs datanode metrics, bytes_read and bytes_written, overflow
-due to incorrect type used.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4395">HADOOP-4395</a>. The FSEditLog loading is incorrect for the case OP_SET_OWNER.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4351">HADOOP-4351</a>. FSNamesystem.getBlockLocationsInternal throws
-ArrayIndexOutOfBoundsException.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4403">HADOOP-4403</a>. Make TestLeaseRecovery and TestFileCreation more robust.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4292">HADOOP-4292</a>. Do not support append() for LocalFileSystem.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4399">HADOOP-4399</a>. Make fuse-dfs multi-thread access safe.<br />(Pete Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4369">HADOOP-4369</a>. Use setMetric(...) instead of incrMetric(...) for metrics
-averages.<br />(Brian Bockelman via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4469">HADOOP-4469</a>. Rename and add the ant task jar file to the tar file.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3914">HADOOP-3914</a>. DFSClient sends Checksum Ok only once for a block.<br />(Christian Kunz via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4467">HADOOP-4467</a>. SerializationFactory now uses the current context ClassLoader
-allowing for user supplied Serialization instances.<br />(Chris Wensel via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4517">HADOOP-4517</a>. Release FSDataset lock before joining ongoing create threads.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4526">HADOOP-4526</a>. fsck failing with NullPointerException.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4483">HADOOP-4483</a> Honor the max parameter in DatanodeDescriptor.getBlockArray(..)<br />(Ahad Rana and Hairong Kuang via szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4340">HADOOP-4340</a>. Correctly set the exit code from JobShell.main so that the
-'hadoop jar' command returns the right code to the user.<br />(acmurthy)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.2_-_2008-11-03_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.18.2_-_2008-11-03_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2421">HADOOP-2421</a>.  Add jdiff output to documentation, listing all API
-changes from the prior release.<br />(cutting)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.18.1_-_2008-09-17_')">Release 0.18.1 - 2008-09-17
-</a></h3>
-<ul id="release_0.18.1_-_2008-09-17_">
-  <li><a href="javascript:toggleList('release_0.18.1_-_2008-09-17_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.18.1_-_2008-09-17_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3934">HADOOP-3934</a>. Upgrade log4j to 1.2.15.<br />(omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.1_-_2008-09-17_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(5)
-    <ol id="release_0.18.1_-_2008-09-17_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3995">HADOOP-3995</a>. In case of quota failure on HDFS, rename does not restore
-source filename.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3821">HADOOP-3821</a>. Prevent SequenceFile and IFile from duplicating codecs in
-CodecPool when closed more than once.<br />(Arun Murthy via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4040">HADOOP-4040</a>. Remove coded default of the IPC idle connection timeout
-from the TaskTracker, which was causing HDFS client connections to not be
-collected.<br />(ddas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4046">HADOOP-4046</a>. Made WritableComparable's constructor protected instead of
-private to re-enable class derivation.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3940">HADOOP-3940</a>. Fix in-memory merge condition to wait when there are no map
-outputs or when the final map outputs are being fetched without contention.<br />(cdouglas)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_')">Release 0.18.0 - 2008-08-19
-</a></h3>
-<ul id="release_0.18.0_-_2008-08-19_">
-  <li><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(23)
-    <ol id="release_0.18.0_-_2008-08-19_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2703">HADOOP-2703</a>.  The default options to fsck skips checking files
-that are being written to. The output of fsck is incompatible
-with previous release.<br />(lohit vijayarenu via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2865">HADOOP-2865</a>. FsShell.ls() printout format changed to print file names
-in the end of the line.<br />(Edward J. Yoon via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3283">HADOOP-3283</a>. The Datanode has a RPC server. It currently supports
-two RPCs: the first RPC retrives the metadata about a block and the
-second RPC sets the generation stamp of an existing block.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2797">HADOOP-2797</a>. Code related to upgrading to 0.14 (Block CRCs) is
-removed. As result, upgrade to 0.18 or later from 0.13 or earlier
-is not supported. If upgrading from 0.13 or earlier is required,
-please upgrade to an intermediate version (0.14-0.17) and then
-to this version.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-544">HADOOP-544</a>. This issue introduces new classes JobID, TaskID and
-TaskAttemptID, which should be used instead of their string counterparts.
-Functions in JobClient, TaskReport, RunningJob, jobcontrol.Job and
-TaskCompletionEvent that use string arguments are deprecated in favor
-of the corresponding ones that use ID objects. Applications can use
-xxxID.toString() and xxxID.forName() methods to convert/restore objects
-to/from strings.<br />(Enis Soztutar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2188">HADOOP-2188</a>. RPC client sends a ping rather than throw timeouts.
-RPC server does not throw away old RPCs. If clients and the server are on
-different versions, they are not able to function well. In addition,
-The property ipc.client.timeout is removed from the default hadoop
-configuration. It also removes metrics RpcOpsDiscardedOPsNum.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2181">HADOOP-2181</a>. This issue adds logging for input splits in Jobtracker log
-and jobHistory log. Also adds web UI for viewing input splits in job UI
-and history UI.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3226">HADOOP-3226</a>. Run combiners multiple times over map outputs as they
-are merged in both the map and the reduce tasks.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3329">HADOOP-3329</a>.  DatanodeDescriptor objects should not be stored in the
-fsimage.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2656">HADOOP-2656</a>.  The Block object has a generation stamp inside it.
-Existing blocks get a generation stamp of 0. This is needed to support
-appends.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3390">HADOOP-3390</a>. Removed deprecated ClientProtocol.abandonFileInProgress().
-(Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3405">HADOOP-3405</a>. Made some map/reduce internal classes non-public:
-MapTaskStatus, ReduceTaskStatus, JobSubmissionProtocol,
-CompletedJobStatusStore.<br />(enis via omaley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3265">HADOOP-3265</a>. Removed depcrecated API getFileCacheHints().<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3310">HADOOP-3310</a>. The namenode instructs the primary datanode to do lease
-recovery. The block gets a new  generation stamp.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2909">HADOOP-2909</a>. Improve IPC idle connection management. Property
-ipc.client.maxidletime is removed from the default configuration,
-instead it is defined as twice of the ipc.client.connection.maxidletime.
-A connection with outstanding requests won't be treated as idle.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3459">HADOOP-3459</a>. Change in the output format of dfs -ls to more closely match
-/bin/ls. New format is: perm repl owner group size date name<br />(Mukund Madhugiri via omally)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3113">HADOOP-3113</a>. An fsync invoked on a HDFS file really really
-persists data! The datanode moves blocks in the tmp directory to
-the real block directory on a datanode-restart.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3452">HADOOP-3452</a>. Change fsck to return non-zero status for a corrupt
-FileSystem.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3193">HADOOP-3193</a>. Include the address of the client that found the corrupted
-block in the log. Also include a CorruptedBlocks metric to track the size
-of the corrupted block map.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3512">HADOOP-3512</a>. Separate out the tools into a tools jar.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3598">HADOOP-3598</a>. Ensure that temporary task-output directories are not created
-if they are not necessary e.g. for Maps with no side-effect files.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3665">HADOOP-3665</a>. Modify WritableComparator so that it only creates instances
-of the keytype if the type does not define a WritableComparator. Calling
-the superclass compare will throw a NullPointerException. Also define
-a RawComparator for NullWritable and permit it to be written as a key
-to SequenceFiles.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3673">HADOOP-3673</a>. Avoid deadlock caused by DataNode RPC receoverBlock().
-(Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(25)
-    <ol id="release_0.18.0_-_2008-08-19_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3074">HADOOP-3074</a>. Provides a UrlStreamHandler for DFS and other FS,
-relying on FileSystem<br />(taton)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2585">HADOOP-2585</a>. Name-node imports namespace data from a recent checkpoint
-accessible via a NFS mount.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3061">HADOOP-3061</a>. Writable types for doubles and bytes.<br />(Andrzej
-Bialecki via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2857">HADOOP-2857</a>. Allow libhdfs to set jvm options.<br />(Craig Macdonald
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3317">HADOOP-3317</a>. Add default port for HDFS namenode.  The port in
-"hdfs:" URIs now defaults to 8020, so that one may simply use URIs
-of the form "hdfs://example.com/dir/file".<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2019">HADOOP-2019</a>. Adds support for .tar, .tgz and .tar.gz files in
-DistributedCache<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3058">HADOOP-3058</a>. Add FSNamesystem status metrics.<br />(Lohit Vjayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1915">HADOOP-1915</a>. Allow users to specify counters via strings instead
-of enumerations.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2065">HADOOP-2065</a>. Delay invalidating corrupt replicas of block until its
-is removed from under replicated state. If all replicas are found to
-be corrupt, retain all copies and mark the block as corrupt.<br />(Lohit Vjayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3221">HADOOP-3221</a>. Adds org.apache.hadoop.mapred.lib.NLineInputFormat, which
-splits files into splits each of N lines. N can be specified by
-configuration property "mapred.line.input.format.linespermap", which
-defaults to 1.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3336">HADOOP-3336</a>. Direct a subset of annotated FSNamesystem calls for audit
-logging.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3400">HADOOP-3400</a>. A new API FileSystem.deleteOnExit() that facilitates
-handling of temporary files in HDFS.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4">HADOOP-4</a>.  Add fuse-dfs to contrib, permitting one to mount an
-HDFS filesystem on systems that support FUSE, e.g., Linux.<br />(Pete Wyckoff via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3246">HADOOP-3246</a>. Add FTPFileSystem.<br />(Ankur Goel via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3250">HADOOP-3250</a>. Extend FileSystem API to allow appending to files.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3177">HADOOP-3177</a>. Implement Syncable interface for FileSystem.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1328">HADOOP-1328</a>. Implement user counters in streaming.<br />(tomwhite via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3187">HADOOP-3187</a>. Quotas for namespace management.<br />(Hairong Kuang via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3307">HADOOP-3307</a>. Support for Archives in Hadoop.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3460">HADOOP-3460</a>. Add SequenceFileAsBinaryOutputFormat to permit direct
-writes of serialized data.<br />(Koji Noguchi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3230">HADOOP-3230</a>. Add ability to get counter values from command
-line.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-930">HADOOP-930</a>. Add support for native S3 files.<br />(tomwhite via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3502">HADOOP-3502</a>. Quota API needs documentation in Forrest.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3413">HADOOP-3413</a>. Allow SequenceFile.Reader to use serialization
-framework.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3541">HADOOP-3541</a>. Import of the namespace from a checkpoint documented
-in hadoop user guide.<br />(shv)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(47)
-    <ol id="release_0.18.0_-_2008-08-19_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3677">HADOOP-3677</a>. Simplify generation stamp upgrade by making is a
-local upgrade on datandodes. Deleted distributed upgrade.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2928">HADOOP-2928</a>. Remove deprecated FileSystem.getContentLength().<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3130">HADOOP-3130</a>. Make the connect timeout smaller for getFile.<br />(Amar Ramesh Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3160">HADOOP-3160</a>. Remove deprecated exists() from ClientProtocol and
-FSNamesystem<br />(Lohit Vjayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2910">HADOOP-2910</a>. Throttle IPC Clients during bursts of requests or
-server slowdown. Clients retry connection for up to 15 minutes
-when socket connection times out.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3295">HADOOP-3295</a>. Allow TextOutputFormat to use configurable spearators.
-(Zheng Shao via cdouglas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3308">HADOOP-3308</a>. Improve QuickSort by excluding values eq the pivot from the
-partition.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2461">HADOOP-2461</a>. Trim property names in configuration.
-(Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2799">HADOOP-2799</a>. Deprecate o.a.h.io.Closable in favor of java.io.Closable.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3345">HADOOP-3345</a>. Enhance the hudson-test-patch target to cleanup messages,
-fix minor defects, and add eclipse plugin and python unit tests.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3144">HADOOP-3144</a>. Improve robustness of LineRecordReader by defining a maximum
-line length (mapred.linerecordreader.maxlength), thereby avoiding reading
-too far into the following split.<br />(Zheng Shao via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3334">HADOOP-3334</a>. Move lease handling from FSNamesystem into a seperate class.
-(Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3332">HADOOP-3332</a>. Reduces the amount of logging in Reducer's shuffle phase.<br />(Devaraj Das)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3355">HADOOP-3355</a>. Enhances Configuration class to accept hex numbers for getInt
-and getLong.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3350">HADOOP-3350</a>. Add an argument to distcp to permit the user to limit the
-number of maps.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3013">HADOOP-3013</a>. Add corrupt block reporting to fsck.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3377">HADOOP-3377</a>. Remove TaskRunner::replaceAll and replace with equivalent
-String::replace.<br />(Brice Arnould via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3398">HADOOP-3398</a>. Minor improvement to a utility function in that participates
-in backoff calculation.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3381">HADOOP-3381</a>. Clear referenced when directories are deleted so that
-effect of memory leaks are not multiplied.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2867">HADOOP-2867</a>. Adds the task's CWD to its LD_LIBRARY_PATH.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3232">HADOOP-3232</a>. DU class runs the 'du' command in a seperate thread so
-that it does not block user. DataNode misses heartbeats in large
-nodes otherwise.<br />(Johan Oskarsson via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3035">HADOOP-3035</a>. During block transfers between datanodes, the receiving
-datanode, now can report corrupt replicas received from src node to
-the namenode.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3434">HADOOP-3434</a>. Retain the cause of the bind failure in Server::bind.<br />(Steve Loughran via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3429">HADOOP-3429</a>. Increases the size of the buffers used for the communication
-for Streaming jobs.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3486">HADOOP-3486</a>. Change default for initial block report to 0 seconds
-and document it.<br />(Sanjay Radia via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3448">HADOOP-3448</a>. Improve the text in the assertion making sure the
-layout versions are consistent in the data node.<br />(Steve Loughran
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2095">HADOOP-2095</a>. Improve the Map-Reduce shuffle/merge by cutting down
-buffer-copies; changed intermediate sort/merge to use the new IFile format
-rather than SequenceFiles and compression of map-outputs is now
-implemented by compressing the entire file rather than SequenceFile
-compression. Shuffle also has been changed to use a simple byte-buffer
-manager rather than the InMemoryFileSystem.
-Configuration changes to hadoop-default.xml:
-  deprecated mapred.map.output.compression.type<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-236">HADOOP-236</a>. JobTacker now refuses connection from a task tracker with a
-different version number.<br />(Sharad Agarwal via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3427">HADOOP-3427</a>. Improves the shuffle scheduler. It now waits for notifications
-from shuffle threads when it has scheduled enough, before scheduling more.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2393">HADOOP-2393</a>. Moves the handling of dir deletions in the tasktracker to
-a separate thread.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3501">HADOOP-3501</a>. Deprecate InMemoryFileSystem.<br />(cutting via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3366">HADOOP-3366</a>. Stall the shuffle while in-memory merge is in progress.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2916">HADOOP-2916</a>. Refactor src structure, but leave package structure alone.<br />(Raghu Angadi via mukund)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3492">HADOOP-3492</a>. Add forrest documentation for user archives.<br />(Mahadev Konar via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3467">HADOOP-3467</a>. Improve documentation for FileSystem::deleteOnExit.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3379">HADOOP-3379</a>. Documents stream.non.zero.exit.status.is.failure for Streaming.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3096">HADOOP-3096</a>. Improves documentation about the Task Execution Environment in
-the Map-Reduce tutorial.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2984">HADOOP-2984</a>. Add forrest documentation for DistCp.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3406">HADOOP-3406</a>. Add forrest documentation for Profiling.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2762">HADOOP-2762</a>. Add forrest documentation for controls of memory limits on
-hadoop daemons and Map-Reduce tasks.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3535">HADOOP-3535</a>. Fix documentation and name of IOUtils.close to
-reflect that it should only be used in cleanup contexts.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3593">HADOOP-3593</a>. Updates the mapred tutorial.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3547">HADOOP-3547</a>. Documents the way in which native libraries can be distributed
-via the DistributedCache.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3606">HADOOP-3606</a>. Updates the Streaming doc.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3532">HADOOP-3532</a>. Add jdiff reports to the build scripts.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3100">HADOOP-3100</a>. Develop tests to test the DFS command line interface.<br />(mukund)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3688">HADOOP-3688</a>. Fix up HDFS docs.<br />(Robert Chansler via hairong)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(10)
-    <ol id="release_0.18.0_-_2008-08-19_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3274">HADOOP-3274</a>. The default constructor of BytesWritable creates empty
-byte array. (Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3272">HADOOP-3272</a>. Remove redundant copy of Block object in BlocksMap.<br />(Lohit Vjayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3164">HADOOP-3164</a>. Reduce DataNode CPU usage by using FileChannel.tranferTo().
-On Linux DataNode takes 5 times less CPU while serving data. Results may
-vary on other platforms.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3248">HADOOP-3248</a>. Optimization of saveFSImage.<br />(Dhruba via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3297">HADOOP-3297</a>. Fetch more task completion events from the job
-tracker and task tracker.<br />(ddas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3364">HADOOP-3364</a>. Faster image and log edits loading.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3369">HADOOP-3369</a>. Fast block processing during name-node startup.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1702">HADOOP-1702</a>. Reduce buffer copies when data is written to DFS.
-DataNodes take 30% less CPU while writing data.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3095">HADOOP-3095</a>. Speed up split generation in the FileInputSplit,
-especially for non-HDFS file systems. Deprecates
-InputFormat.validateInput.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3552">HADOOP-3552</a>. Add forrest documentation for Hadoop commands.<br />(Sharad Agarwal via cdouglas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.18.0_-_2008-08-19_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(144)
-    <ol id="release_0.18.0_-_2008-08-19_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2905">HADOOP-2905</a>. 'fsck -move' triggers NPE in NameNode.<br />(Lohit Vjayarenu via rangadi)</li>
-      <li>Increment ClientProtocol.versionID missed by <a href="http://issues.apache.org/jira/browse/HADOOP-2585">HADOOP-2585</a>.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3254">HADOOP-3254</a>. Restructure internal namenode methods that process
-heartbeats to use well-defined BlockCommand object(s) instead of
-using the base java Object. (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3176">HADOOP-3176</a>.  Change lease record when a open-for-write-file
-gets renamed.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3269">HADOOP-3269</a>.  Fix a case when namenode fails to restart
-while processing a lease record.  ((Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3282">HADOOP-3282</a>. Port issues in TestCheckpoint resolved.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3268">HADOOP-3268</a>. file:// URLs issue in TestUrlStreamHandler under Windows.<br />(taton)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3127">HADOOP-3127</a>. Deleting files in trash should really remove them.<br />(Brice Arnould via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3300">HADOOP-3300</a>. Fix locking of explicit locks in NetworkTopology.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3270">HADOOP-3270</a>. Constant DatanodeCommands are stored in static final
-immutable variables for better code clarity.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2793">HADOOP-2793</a>. Fix broken links for worst performing shuffle tasks in
-the job history page.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3313">HADOOP-3313</a>. Avoid unnecessary calls to System.currentTimeMillis
-in RPC::Invoker.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3318">HADOOP-3318</a>. Recognize "Darwin" as an alias for "Mac OS X" to
-support Soylatte.<br />(Sam Pullara via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3301">HADOOP-3301</a>. Fix misleading error message when S3 URI hostname
-contains an underscore.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3338">HADOOP-3338</a>. Fix Eclipse plugin to compile after <a href="http://issues.apache.org/jira/browse/HADOOP-544">HADOOP-544</a> was
-committed. Updated all references to use the new JobID representation.<br />(taton via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3337">HADOOP-3337</a>. Loading FSEditLog was broken by <a href="http://issues.apache.org/jira/browse/HADOOP-3283">HADOOP-3283</a> since it
-changed Writable serialization of DatanodeInfo. This patch handles it.
-(Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3101">HADOOP-3101</a>. Prevent JobClient from throwing an exception when printing
-usage.<br />(Edward J. Yoon via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3119">HADOOP-3119</a>. Update javadoc for Text::getBytes to better describe its
-behavior.<br />(Tim Nelson via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2294">HADOOP-2294</a>. Fix documentation in libhdfs to refer to the correct free
-function.<br />(Craig Macdonald via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3335">HADOOP-3335</a>. Prevent the libhdfs build from deleting the wrong
-files on make clean.<br />(cutting via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2930">HADOOP-2930</a>. Make {start,stop}-balancer.sh work even if hadoop-daemon.sh
-is not in the PATH.<br />(Spiros Papadimitriou via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3085">HADOOP-3085</a>. Catch Exception in metrics util classes to ensure that
-misconfigured metrics don't prevent others from updating.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3299">HADOOP-3299</a>. CompositeInputFormat should configure the sub-input
-formats.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3309">HADOOP-3309</a>. Lower io.sort.mb and fs.inmemory.size.mb for MiniMRDFSSort
-unit test so it passes on Windows.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3348">HADOOP-3348</a>. TestUrlStreamHandler should set URLStreamFactory after
-DataNodes are initialized.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3371">HADOOP-3371</a>. Ignore InstanceAlreadyExistsException from
-MBeanUtil::registerMBean.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3349">HADOOP-3349</a>. A file rename was incorrectly changing the name inside a
-lease record. (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3365">HADOOP-3365</a>. Removes an unnecessary copy of the key from SegmentDescriptor
-to MergeQueue.<br />(Devaraj Das)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3388">HADOOP-3388</a>. Fix for TestDatanodeBlockScanner to handle blocks with
-generation stamps in them.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3203">HADOOP-3203</a>. Fixes TaskTracker::localizeJob to pass correct file sizes
-for the jarfile and the jobfile.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3391">HADOOP-3391</a>. Fix a findbugs warning introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-3248">HADOOP-3248</a><br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3393">HADOOP-3393</a>. Fix datanode shutdown to call DataBlockScanner::shutdown and
-close its log, even if the scanner thread is not running.<br />(lohit vijayarenu
-via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3399">HADOOP-3399</a>. A debug message was logged at info level.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3396">HADOOP-3396</a>. TestDatanodeBlockScanner occationally fails.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3339">HADOOP-3339</a>. Some of the failures on 3rd datanode in DFS write pipelie
-are not detected properly. This could lead to hard failure of client's
-write operation.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3409">HADOOP-3409</a>. Namenode should save the root inode into fsimage.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3296">HADOOP-3296</a>. Fix task cache to work for more than two levels in the cache
-hierarchy. This also adds a new counter to track cache hits at levels
-greater than two.<br />(Amar Kamat via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3375">HADOOP-3375</a>. Lease paths were sometimes not removed from
-LeaseManager.sortedLeasesByPath. (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3424">HADOOP-3424</a>. Values returned by getPartition should be checked to
-make sure they are in the range 0 to #reduces - 1<br />(cdouglas via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3408">HADOOP-3408</a>. Change FSNamesystem to send its metrics as integers to
-accommodate collectors that don't support long values.<br />(lohit vijayarenu
-via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3403">HADOOP-3403</a>. Fixes a problem in the JobTracker to do with handling of lost
-tasktrackers.<br />(Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1318">HADOOP-1318</a>. Completed maps are not failed if the number of reducers are
-zero. (Amareshwari Sriramadasu via ddas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3351">HADOOP-3351</a>. Fixes the history viewer tool to not do huge StringBuffer
-allocations.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3419">HADOOP-3419</a>. Fixes TestFsck to wait for updates to happen before
-checking results to make the test more reliable.<br />(Lohit Vijaya
-Renu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3259">HADOOP-3259</a>. Makes failure to read system properties due to a
-security manager non-fatal.<br />(Edward Yoon via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3451">HADOOP-3451</a>. Update libhdfs to use FileSystem::getFileBlockLocations
-instead of removed getFileCacheHints.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3401">HADOOP-3401</a>. Update FileBench to set the new
-"mapred.work.output.dir" property to work post-3041.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2669">HADOOP-2669</a>. DFSClient locks pendingCreates appropriately.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3410">HADOOP-3410</a>. Fix KFS implemenation to return correct file
-modification time.<br />(Sriram Rao via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3340">HADOOP-3340</a>. Fix DFS metrics for BlocksReplicated, HeartbeatsNum, and
-BlockReportsAverageTime.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3435">HADOOP-3435</a>. Remove the assuption in the scripts that bash is at
-/bin/bash and fix the test patch to require bash instead of sh.<br />(Brice Arnould via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3471">HADOOP-3471</a>. Fix spurious errors from TestIndexedSort and add additional
-logging to let failures be reproducible.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3443">HADOOP-3443</a>. Avoid copying map output across partitions when renaming a
-single spill.<br />(omalley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3454">HADOOP-3454</a>. Fix Text::find to search only valid byte ranges.<br />(Chad Whipkey
-via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3417">HADOOP-3417</a>. Removes the static configuration variable,
-commandLineConfig from JobClient. Moves the cli parsing from
-JobShell to GenericOptionsParser.  Thus removes the class
-org.apache.hadoop.mapred.JobShell.<br />(Amareshwari Sriramadasu via
-ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2132">HADOOP-2132</a>. Only RUNNING/PREP jobs can be killed.<br />(Jothi Padmanabhan
-via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3476">HADOOP-3476</a>. Code cleanup in fuse-dfs.<br />(Peter Wyckoff via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2427">HADOOP-2427</a>. Ensure that the cwd of completed tasks is cleaned-up
-correctly on task-completion.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2565">HADOOP-2565</a>. Remove DFSPath cache of FileStatus.
-(Tsz Wo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3326">HADOOP-3326</a>. Cleanup the local-fs and in-memory merge in the ReduceTask by
-spawing only one thread each for the on-disk and in-memory merge.<br />(Sharad Agarwal via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3493">HADOOP-3493</a>. Fix TestStreamingFailure to use FileUtil.fullyDelete to
-ensure correct cleanup.<br />(Lohit Vijayarenu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3455">HADOOP-3455</a>. Fix NPE in ipc.Client in case of connection failure and
-improve its synchronization.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3240">HADOOP-3240</a>. Fix a testcase to not create files in the current directory.
-Instead the file is created in the test directory<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3496">HADOOP-3496</a>.  Fix failure in TestHarFileSystem.testArchives due to change
-in <a href="http://issues.apache.org/jira/browse/HADOOP-3095">HADOOP-3095</a>.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3135">HADOOP-3135</a>. Get the system directory from the JobTracker instead of from
-the conf.<br />(Subramaniam Krishnan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3503">HADOOP-3503</a>. Fix a race condition when client and namenode start
-simultaneous recovery of the same block.  (dhruba &amp; Tsz Wo
-(Nicholas), SZE)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3440">HADOOP-3440</a>. Fixes DistributedCache to not create symlinks for paths which
-don't have fragments even when createSymLink is true.<br />(Abhijit Bagri via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3463">HADOOP-3463</a>. Hadoop-daemons script should cd to $HADOOP_HOME.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3489">HADOOP-3489</a>. Fix NPE in SafeModeMonitor.<br />(Lohit Vijayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3509">HADOOP-3509</a>. Fix NPE in FSNamesystem.close. (Tsz Wo (Nicholas), SZE via
-shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3491">HADOOP-3491</a>. Name-node shutdown causes InterruptedException in
-ResolutionMonitor.<br />(Lohit Vijayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3511">HADOOP-3511</a>. Fixes namenode image to not set the root's quota to an
-invalid value when the quota was not saved in the image.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3516">HADOOP-3516</a>. Ensure the JobClient in HadoopArchives is initialized
-with a configuration.<br />(Subramaniam Krishnan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3513">HADOOP-3513</a>. Improve NNThroughputBenchmark log messages.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3519">HADOOP-3519</a>.  Fix NPE in DFS FileSystem rename.<br />(hairong via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3528">HADOOP-3528</a>. Metrics FilesCreated and files_deleted metrics
-do not match.<br />(Lohit via Mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3418">HADOOP-3418</a>. When a directory is deleted, any leases that point to files
-in the subdirectory are removed. ((Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3542">HADOOP-3542</a>. Diables the creation of _logs directory for the archives
-directory.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3544">HADOOP-3544</a>. Fixes a documentation issue for hadoop archives.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3517">HADOOP-3517</a>. Fixes a problem in the reducer due to which the last InMemory
-merge may be missed.<br />(Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3548">HADOOP-3548</a>. Fixes build.xml to copy all *.jar files to the dist.<br />(Owen O'Malley via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3363">HADOOP-3363</a>. Fix unformatted storage detection in FSImage.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3560">HADOOP-3560</a>. Fixes a problem to do with split creation in archives.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3545">HADOOP-3545</a>. Fixes a overflow problem in archives.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3561">HADOOP-3561</a>. Prevent the trash from deleting its parent directories.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3575">HADOOP-3575</a>. Fix the clover ant target after package refactoring.<br />(Nigel Daley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3539">HADOOP-3539</a>.  Fix the tool path in the bin/hadoop script under
-cygwin. (Tsz Wo (Nicholas), Sze via omalley)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3520">HADOOP-3520</a>.  TestDFSUpgradeFromImage triggers a race condition in the
-Upgrade Manager. Fixed.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3586">HADOOP-3586</a>. Provide deprecated, backwards compatibile semantics for the
-combiner to be run once and only once on each record.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3533">HADOOP-3533</a>. Add deprecated methods to provide API compatibility
-between 0.18 and 0.17. Remove the deprecated methods in trunk.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3580">HADOOP-3580</a>. Fixes a problem to do with specifying a har as an input to
-a job.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3333">HADOOP-3333</a>. Don't assign a task to a tasktracker that it failed to
-execute earlier (used to happen in the case of lost tasktrackers where
-the tasktracker would reinitialize and bind to a different port).<br />(Jothi Padmanabhan and Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3534">HADOOP-3534</a>. Log IOExceptions that happen in closing the name
-system when the NameNode shuts down. (Tsz Wo (Nicholas) Sze via omalley)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3546">HADOOP-3546</a>. TaskTracker re-initialization gets stuck in cleaning up.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3576">HADOOP-3576</a>. Fix NullPointerException when renaming a directory
-to its subdirectory. (Tse Wo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3320">HADOOP-3320</a>. Fix NullPointerException in NetworkTopology.getDistance().<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3569">HADOOP-3569</a>. KFS input stream read() now correctly reads 1 byte
-instead of 4.<br />(Sriram Rao via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3599">HADOOP-3599</a>. Fix JobConf::setCombineOnceOnly to modify the instance rather
-than a parameter.<br />(Owen O'Malley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3590">HADOOP-3590</a>. Null pointer exception in JobTracker when the task tracker is
-not yet resolved.<br />(Amar Ramesh Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3603">HADOOP-3603</a>. Fix MapOutputCollector to spill when io.sort.spill.percent is
-1.0 and to detect spills when emitted records write no data.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3615">HADOOP-3615</a>. Set DatanodeProtocol.versionID to the correct value.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3559">HADOOP-3559</a>. Fix the libhdfs test script and config to work with the
-current semantics.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3480">HADOOP-3480</a>.  Need to update Eclipse template to reflect current trunk.<br />(Brice Arnould via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3588">HADOOP-3588</a>. Fixed usability issues with archives.<br />(mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3635">HADOOP-3635</a>. Uncaught exception in DataBlockScanner.
-(Tsz Wo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3639">HADOOP-3639</a>. Exception when closing DFSClient while multiple files are
-open.<br />(Benjamin Gufler via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3572">HADOOP-3572</a>. SetQuotas usage interface has some minor bugs.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3649">HADOOP-3649</a>. Fix bug in removing blocks from the corrupted block map.<br />(Lohit Vijayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3604">HADOOP-3604</a>. Work around a JVM synchronization problem observed while
-retrieving the address of direct buffers from compression code by obtaining
-a lock during this call.<br />(Arun C Murthy via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3683">HADOOP-3683</a>. Fix dfs metrics to count file listings rather than files
-listed.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3597">HADOOP-3597</a>. Fix SortValidator to use filesystems other than the default as
-input. Validation job still runs on default fs.<br />(Jothi Padmanabhan via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3693">HADOOP-3693</a>. Fix archives, distcp and native library documentation to
-conform to style guidelines.<br />(Amareshwari Sriramadasu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3653">HADOOP-3653</a>. Fix test-patch target to properly account for Eclipse
-classpath jars.<br />(Brice Arnould via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3692">HADOOP-3692</a>. Fix documentation for Cluster setup and Quick start guides.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3691">HADOOP-3691</a>. Fix streaming and tutorial docs.<br />(Jothi Padmanabhan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3630">HADOOP-3630</a>. Fix NullPointerException in CompositeRecordReader from empty
-sources<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3706">HADOOP-3706</a>. Fix a ClassLoader issue in the mapred.join Parser that
-prevents it from loading user-specified InputFormats.<br />(Jingkei Ly via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3718">HADOOP-3718</a>. Fix KFSOutputStream::write(int) to output a byte instead of
-an int, per the OutputStream contract.<br />(Sriram Rao via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3647">HADOOP-3647</a>. Add debug logs to help track down a very occassional,
-hard-to-reproduce, bug in shuffle/merge on the reducer.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3716">HADOOP-3716</a>. Prevent listStatus in KosmosFileSystem from returning
-null for valid, empty directories.<br />(Sriram Rao via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3752">HADOOP-3752</a>. Fix audit logging to record rename events.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3737">HADOOP-3737</a>. Fix CompressedWritable to call Deflater::end to release
-compressor memory.<br />(Grant Glouser via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3670">HADOOP-3670</a>. Fixes JobTracker to clear out split bytes when no longer
-required.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3755">HADOOP-3755</a>. Update gridmix to work with HOD 0.4<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3743">HADOOP-3743</a>. Fix -libjars, -files, -archives options to work even if
-user code does not implement tools.<br />(Amareshwari Sriramadasu via mahadev)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3774">HADOOP-3774</a>. Fix typos in shell output. (Tsz Wo (Nicholas), SZE via
-cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3762">HADOOP-3762</a>. Fixed FileSystem cache to work with the default port.<br />(cutting
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3798">HADOOP-3798</a>. Fix tests compilation.<br />(Mukund Madhugiri via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3794">HADOOP-3794</a>. Return modification time instead of zero for KosmosFileSystem.<br />(Sriram Rao via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3806">HADOOP-3806</a>. Remove debug statement to stdout from QuickSort.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3776">HADOOP-3776</a>. Fix NPE at NameNode when datanode reports a block after it is
-deleted at NameNode.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3537">HADOOP-3537</a>. Disallow adding a datanode to a network topology when its
-network location is not resolved.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3571">HADOOP-3571</a>. Fix bug in block removal used in lease recovery.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3645">HADOOP-3645</a>. MetricsTimeVaryingRate returns wrong value for
-metric_avg_time.<br />(Lohit Vijayarenu via hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3521">HADOOP-3521</a>. Reverted the missing cast to float for sending Counters' values
-to Hadoop metrics which was removed by <a href="http://issues.apache.org/jira/browse/HADOOP-544">HADOOP-544</a>.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3820">HADOOP-3820</a>. Fixes two problems in the gridmix-env - a syntax error, and a
-wrong definition of USE_REAL_DATASET by default.<br />(Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3724">HADOOP-3724</a>. Fixes two problems related to storing and recovering lease
-in the fsimage.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3827">HADOOP-3827</a>.  Fixed compression of empty map-outputs.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3865">HADOOP-3865</a>. Remove reference to FSNamesystem from metrics preventing
-garbage collection.<br />(Lohit Vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3884">HADOOP-3884</a>.  Fix so that Eclipse plugin builds against recent
-Eclipse releases.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3837">HADOOP-3837</a>. Streaming jobs report progress status.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3897">HADOOP-3897</a>. Fix a NPE in secondary namenode.<br />(Lohit Vijayarenu via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3901">HADOOP-3901</a>. Fix bin/hadoop to correctly set classpath under cygwin.
-(Tsz Wo (Nicholas) Sze via omalley)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3947">HADOOP-3947</a>. Fix a problem in tasktracker reinitialization.<br />(Amareshwari Sriramadasu via ddas)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.17.3_-_unreleased_')">Release 0.17.3 - Unreleased
-</a></h3>
-<ul id="release_0.17.3_-_unreleased_">
-  <li><a href="javascript:toggleList('release_0.17.3_-_unreleased_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.17.3_-_unreleased_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4164">HADOOP-4164</a>. Chinese translation of the documentation.<br />(Xuebing Yan via
-omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.3_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(4)
-    <ol id="release_0.17.3_-_unreleased_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4277">HADOOP-4277</a>. Checksum verification was mistakenly disabled for
-LocalFileSystem.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4271">HADOOP-4271</a>. Checksum input stream can sometimes return invalid
-data to the user.<br />(Ning Li via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4318">HADOOP-4318</a>. DistCp should use absolute paths for cleanup.<br />(szetszwo)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-4326">HADOOP-4326</a>. ChecksumFileSystem does not override create(...) correctly.<br />(szetszwo)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.17.2_-_2008-08-11_')">Release 0.17.2 - 2008-08-11
-</a></h3>
-<ul id="release_0.17.2_-_2008-08-11_">
-  <li><a href="javascript:toggleList('release_0.17.2_-_2008-08-11_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(12)
-    <ol id="release_0.17.2_-_2008-08-11_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3678">HADOOP-3678</a>. Avoid spurious exceptions logged at DataNode when clients
-read from DFS.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3707">HADOOP-3707</a>. NameNode keeps a count of number of blocks scheduled
-to be written to a datanode and uses it to avoid allocating more
-blocks than a datanode can hold.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3760">HADOOP-3760</a>. Fix a bug with HDFS file close() mistakenly introduced
-by <a href="http://issues.apache.org/jira/browse/HADOOP-3681">HADOOP-3681</a>.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3681">HADOOP-3681</a>. DFSClient can get into an infinite loop while closing
-a file if there are some errors.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3002">HADOOP-3002</a>. Hold off block removal while in safe mode.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3685">HADOOP-3685</a>. Unbalanced replication target.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3758">HADOOP-3758</a>. Shutdown datanode on version mismatch instead of retrying
-continuously, preventing excessive logging at the namenode.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3633">HADOOP-3633</a>. Correct exception handling in DataXceiveServer, and throttle
-the number of xceiver threads in a data-node.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3370">HADOOP-3370</a>. Ensure that the TaskTracker.runningJobs data-structure is
-correctly cleaned-up on task completion.<br />(Zheng Shao via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3813">HADOOP-3813</a>. Fix task-output clean-up on HDFS to use the recursive
-FileSystem.delete rather than the FileUtil.fullyDelete.<br />(Amareshwari
-Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3859">HADOOP-3859</a>. Allow the maximum number of xceivers in the data node to
-be configurable.<br />(Johan Oskarsson via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3931">HADOOP-3931</a>. Fix corner case in the map-side sort that causes some values
-to be counted as too large and cause pre-mature spills to disk. Some values
-will also bypass the combiner incorrectly.<br />(cdouglas via omalley)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.17.1_-_2008-06-23_')">Release 0.17.1 - 2008-06-23
-</a></h3>
-<ul id="release_0.17.1_-_2008-06-23_">
-  <li><a href="javascript:toggleList('release_0.17.1_-_2008-06-23_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.17.1_-_2008-06-23_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3565">HADOOP-3565</a>. Fix the Java serialization, which is not enabled by
-default, to clear the state of the serializer between objects.<br />(tomwhite via omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.1_-_2008-06-23_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(2)
-    <ol id="release_0.17.1_-_2008-06-23_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3522">HADOOP-3522</a>. Improve documentation on reduce pointing out that
-input keys and values will be reused.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3487">HADOOP-3487</a>. Balancer uses thread pools for managing its threads;
-therefore provides better resource management.<br />(hairong)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.1_-_2008-06-23_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(8)
-    <ol id="release_0.17.1_-_2008-06-23_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2159">HADOOP-2159</a> Namenode stuck in safemode. The counter blockSafe should
-not be decremented for invalid blocks.<br />(hairong)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3472">HADOOP-3472</a> MapFile.Reader getClosest() function returns incorrect results
-when before is true<br />(Todd Lipcon via Stack)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3442">HADOOP-3442</a>. Limit recursion depth on the stack for QuickSort to prevent
-StackOverflowErrors. To avoid O(n*n) cases, when partitioning depth exceeds
-a multiple of log(n), change to HeapSort.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3477">HADOOP-3477</a>. Fix build to not package contrib/*/bin twice in
-distributions.<br />(Adam Heath via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3475">HADOOP-3475</a>. Fix MapTask to correctly size the accounting allocation of
-io.sort.mb.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3550">HADOOP-3550</a>. Fix the serialization data structures in MapTask where the
-value lengths are incorrectly calculated.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3526">HADOOP-3526</a>. Fix contrib/data_join framework by cloning values retained
-in the reduce.<br />(Spyros Blanas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1979">HADOOP-1979</a>. Speed up fsck by adding a buffered stream.<br />(Lohit
-Vijaya Renu via omalley)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_')">Release 0.17.0 - 2008-05-18
-</a></h3>
-<ul id="release_0.17.0_-_2008-05-18_">
-  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(26)
-    <ol id="release_0.17.0_-_2008-05-18_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2786">HADOOP-2786</a>.  Move hbase out of hadoop core
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2345">HADOOP-2345</a>.  New HDFS transactions to support appending
-to files.  Disk layout version changed from -11 to -12.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2192">HADOOP-2192</a>. Error messages from "dfs mv" command improved.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1902">HADOOP-1902</a>. "dfs du" command without any arguments operates on the
-current working directory.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2873">HADOOP-2873</a>.  Fixed bad disk format introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-2345">HADOOP-2345</a>.
-Disk layout version changed from -12 to -13. See changelist 630992<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1985">HADOOP-1985</a>.  This addresses rack-awareness for Map tasks and for
-HDFS in a uniform way.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1986">HADOOP-1986</a>.  Add support for a general serialization mechanism for
-Map Reduce.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-771">HADOOP-771</a>. FileSystem.delete() takes an explicit parameter that
-specifies whether a recursive delete is intended.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2470">HADOOP-2470</a>. Remove getContentLength(String), open(String, long, long)
-and isDir(String) from ClientProtocol. ClientProtocol version changed
-from 26 to 27. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2822">HADOOP-2822</a>. Remove deprecated code for classes InputFormatBase and
-PhasedFileSystem.<br />(Amareshwari Sriramadasu via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2116">HADOOP-2116</a>. Changes the layout of the task execution directory.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2828">HADOOP-2828</a>. The following deprecated methods in Configuration.java
-have been removed
-    getObject(String name)
-    setObject(String name, Object value)
-    get(String name, Object defaultValue)
-    set(String name, Object value)
-    Iterator entries()<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2824">HADOOP-2824</a>. Removes one deprecated constructor from MiniMRCluster.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2823">HADOOP-2823</a>. Removes deprecated methods getColumn(), getLine() from
-org.apache.hadoop.record.compiler.generated.SimpleCharStream.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3060">HADOOP-3060</a>. Removes one unused constructor argument from MiniMRCluster.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2854">HADOOP-2854</a>. Remove deprecated o.a.h.ipc.Server::getUserInfo().<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2563">HADOOP-2563</a>. Remove deprecated FileSystem::listPaths.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2818">HADOOP-2818</a>.  Remove deprecated methods in Counters.<br />(Amareshwari Sriramadasu via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2831">HADOOP-2831</a>. Remove deprecated o.a.h.dfs.INode::getAbsoluteName()<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2839">HADOOP-2839</a>. Remove deprecated FileSystem::globPaths.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2634">HADOOP-2634</a>. Deprecate ClientProtocol::exists.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2410">HADOOP-2410</a>.  Make EC2 cluster nodes more independent of each other.
-Multiple concurrent EC2 clusters are now supported, and nodes may be
-added to a cluster on the fly with new nodes starting in the same EC2
-availability zone as the cluster.  Ganglia monitoring and large
-instance sizes have also been added.<br />(Chris K Wensel via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2826">HADOOP-2826</a>. Deprecated FileSplit.getFile(), LineRecordReader.readLine().<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3239">HADOOP-3239</a>. getFileInfo() returns null for non-existing files instead
-of throwing FileNotFoundException.<br />(Lohit Vijayarenu via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3266">HADOOP-3266</a>. Removed HOD changes from CHANGES.txt, as they are now inside
-src/contrib/hod<br />(Hemanth Yamijala via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3280">HADOOP-3280</a>. Separate the configuration of the virtual memory size
-(mapred.child.ulimit) from the jvm heap size, so that 64 bit
-streaming applications are supported even when running with 32 bit
-jvms.<br />(acmurthy via omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(12)
-    <ol id="release_0.17.0_-_2008-05-18_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1398">HADOOP-1398</a>.  Add HBase in-memory block cache.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2178">HADOOP-2178</a>.  Job History on DFS.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2063">HADOOP-2063</a>. A new parameter to dfs -get command to fetch a file
-even if it is corrupted.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2219">HADOOP-2219</a>. A new command "df -count" that counts the number of
-files and directories.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2906">HADOOP-2906</a>. Add an OutputFormat capable of using keys, values, and
-config params to map records to different output files.<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2346">HADOOP-2346</a>. Utilities to support timeout while writing to sockets.
-DFSClient and DataNode sockets have 10min write timeout.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2951">HADOOP-2951</a>.  Add a contrib module that provides a utility to
-build or update Lucene indexes using Map/Reduce.<br />(Ning Li via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1622">HADOOP-1622</a>.  Allow multiple jar files for map reduce.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2055">HADOOP-2055</a>. Allows users to set PathFilter on the FileInputFormat.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2551">HADOOP-2551</a>. More environment variables like HADOOP_NAMENODE_OPTS
-for better control of HADOOP_OPTS for each component.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3001">HADOOP-3001</a>. Add job counters that measure the number of bytes
-read and written to HDFS, S3, KFS, and local file systems.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3048">HADOOP-3048</a>.  A new Interface and a default implementation to convert
-and restore serializations of objects to/from strings.<br />(enis)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(29)
-    <ol id="release_0.17.0_-_2008-05-18_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2655">HADOOP-2655</a>. Copy on write for data and metadata files in the
-presence of snapshots. Needed for supporting appends to HDFS
-files.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1967">HADOOP-1967</a>.  When a Path specifies the same scheme as the default
-FileSystem but no authority, the default FileSystem's authority is
-used.  Also add warnings for old-format FileSystem names, accessor
-methods for fs.default.name, and check for null authority in HDFS.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2895">HADOOP-2895</a>. Let the profiling string be configurable.<br />(Martin Traverso via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-910">HADOOP-910</a>. Enables Reduces to do merges for the on-disk map output files
-in parallel with their copying.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-730">HADOOP-730</a>. Use rename rather than copy for local renames.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2810">HADOOP-2810</a>. Updated the Hadoop Core logo.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2057">HADOOP-2057</a>.  Streaming should optionally treat a non-zero exit status
-of a child process as a failed task.<br />(Rick Cox via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2765">HADOOP-2765</a>. Enables specifying ulimits for streaming/pipes tasks<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2888">HADOOP-2888</a>. Make gridmix scripts more readily configurable and amenable
-to automated execution.<br />(Mukund Madhugiri via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2908">HADOOP-2908</a>.  A document that describes the DFS Shell command.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2981">HADOOP-2981</a>.  Update README.txt to reflect the upcoming use of
-cryptography.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2804">HADOOP-2804</a>.  Add support to publish CHANGES.txt as HTML when running
-the Ant 'docs' target.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2559">HADOOP-2559</a>. Change DFS block placement to allocate the first replica
-locally, the second off-rack, and the third intra-rack from the
-second.<br />(lohit vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2939">HADOOP-2939</a>. Make the automated patch testing process an executable
-Ant target, test-patch.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2239">HADOOP-2239</a>. Add HsftpFileSystem to permit transferring files over ssl.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2886">HADOOP-2886</a>.  Track individual RPC metrics.<br />(girish vaitheeswaran via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2373">HADOOP-2373</a>. Improvement in safe-mode reporting.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3091">HADOOP-3091</a>. Modify FsShell command -put to accept multiple sources.<br />(Lohit Vijaya Renu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3092">HADOOP-3092</a>. Show counter values from job -status command.<br />(Tom White via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1228">HADOOP-1228</a>.  Ant task to generate Eclipse project files.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3093">HADOOP-3093</a>. Adds Configuration.getStrings(name, default-value) and
-the corresponding setStrings.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3106">HADOOP-3106</a>. Adds documentation in forrest for debugging.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3099">HADOOP-3099</a>. Add an option to distcp to preserve user, group, and
-permission information. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2841">HADOOP-2841</a>. Unwrap AccessControlException and FileNotFoundException
-from RemoteException for DFSClient.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3152">HADOOP-3152</a>.  Make index interval configuable when using
-MapFileOutputFormat for map-reduce job.<br />(Rong-En Fan via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3143">HADOOP-3143</a>. Decrease number of slaves from 4 to 3 in TestMiniMRDFSSort,
-as Hudson generates false negatives under the current load.<br />(Nigel Daley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3174">HADOOP-3174</a>. Illustrative example for MultipleFileInputFormat.<br />(Enis
-Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2993">HADOOP-2993</a>. Clarify the usage of JAVA_HOME in the Quick Start guide.<br />(acmurthy via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3124">HADOOP-3124</a>. Make DataNode socket write timeout configurable.<br />(rangadi)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(12)
-    <ol id="release_0.17.0_-_2008-05-18_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2790">HADOOP-2790</a>.  Fixed inefficient method hasSpeculativeTask by removing
-repetitive calls to get the current time and late checking to see if
-we want speculation on at all.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2758">HADOOP-2758</a>. Reduce buffer copies in DataNode when data is read from
-HDFS, without negatively affecting read throughput.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2399">HADOOP-2399</a>. Input key and value to combiner and reducer is reused.
-(Owen O'Malley via ddas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2423">HADOOP-2423</a>.  Code optimization in FSNamesystem.mkdirs.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2606">HADOOP-2606</a>. ReplicationMonitor selects data-nodes to replicate directly
-from needed replication blocks instead of looking up for the blocks for
-each live data-node.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2148">HADOOP-2148</a>. Eliminate redundant data-node blockMap lookups.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2027">HADOOP-2027</a>. Return the number of bytes in each block in a file
-via a single rpc to the namenode to speed up job planning.<br />(Lohit Vijaya Renu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2902">HADOOP-2902</a>.  Replace uses of "fs.default.name" with calls to the
-accessor methods added in <a href="http://issues.apache.org/jira/browse/HADOOP-1967">HADOOP-1967</a>.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2119">HADOOP-2119</a>.  Optimize scheduling of jobs with large numbers of
-tasks by replacing static arrays with lists of runnable tasks.<br />(Amar Kamat via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2919">HADOOP-2919</a>.  Reduce the number of memory copies done during the
-map output sorting. Also adds two config variables:
-io.sort.spill.percent - the percentages of io.sort.mb that should
-                        cause a spill (default 80%)
-io.sort.record.percent - the percent of io.sort.mb that should
-                         hold key/value indexes (default 5%)<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3140">HADOOP-3140</a>. Doesn't add a task in the commit queue if the task hadn't
-generated any output.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3168">HADOOP-3168</a>. Reduce the amount of logging in streaming to an
-exponentially increasing number of records (up to 10,000
-records/log).<br />(Zheng Shao via omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.17.0_-_2008-05-18_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(102)
-    <ol id="release_0.17.0_-_2008-05-18_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
-errors.<br />(Mahadev Konar via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2190">HADOOP-2190</a>. bring behaviour '-ls' and '-du' closer to Linux shell
-commands in case of errors.<br />(Mahadev Konar via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2193">HADOOP-2193</a>. 'fs -rm' and 'fs -rmr' show error message when the target
-file does not exist.<br />(Mahadev Konar via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2738">HADOOP-2738</a> Text is not subclassable because set(Text) and compareTo(Object)
-access the other instance's private members directly.<br />(jimk)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2779">HADOOP-2779</a>.  Remove the references to HBase in the build.xml.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2194">HADOOP-2194</a>. dfs cat on a non-existent file throws FileNotFoundException.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2767">HADOOP-2767</a>. Fix for NetworkTopology erroneously skipping the last leaf
-node on a rack.<br />(Hairong Kuang and Mark Butler via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1593">HADOOP-1593</a>. FsShell works with paths in non-default FileSystem.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2191">HADOOP-2191</a>. du and dus command on non-existent directory gives
-appropriate error message.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2832">HADOOP-2832</a>. Remove tabs from code of DFSClient for better
-indentation.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2844">HADOOP-2844</a>. distcp closes file handles for sequence files.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2727">HADOOP-2727</a>. Fix links in Web UI of the hadoop daemons and some docs<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2871">HADOOP-2871</a>. Fixes a problem to do with file: URI in the JobHistory init.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2800">HADOOP-2800</a>.  Deprecate SetFile.Writer constructor not the whole class.<br />(Johan Oskarsson via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2891">HADOOP-2891</a>.  DFSClient.close() closes all open files.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2845">HADOOP-2845</a>.  Fix dfsadmin disk utilization report on Solaris.<br />(Martin Traverso via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2912">HADOOP-2912</a>. MiniDFSCluster restart should wait for namenode to exit
-safemode. This was causing TestFsck to fail.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2820">HADOOP-2820</a>. The following classes in streaming are removed :
-StreamLineRecordReader StreamOutputFormat StreamSequenceRecordReader.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2819">HADOOP-2819</a>. The following methods in JobConf are removed:
-getInputKeyClass() setInputKeyClass getInputValueClass()
-setInputValueClass(Class theClass) setSpeculativeExecution
-getSpeculativeExecution()<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2817">HADOOP-2817</a>. Removes deprecated mapred.tasktracker.tasks.maximum and
-ClusterStatus.getMaxTasks().<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2821">HADOOP-2821</a>. Removes deprecated ShellUtil and ToolBase classes from
-the util package.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2934">HADOOP-2934</a>. The namenode was encountreing a NPE while loading
-leases from the fsimage. Fixed.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2938">HADOOP-2938</a>. Some fs commands did not glob paths.
-(Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2943">HADOOP-2943</a>. Compression of intermediate map output causes failures
-in the merge.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2870">HADOOP-2870</a>.  DataNode and NameNode closes all connections while
-shutting down.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2973">HADOOP-2973</a>. Fix TestLocalDFS for Windows platform.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2971">HADOOP-2971</a>. select multiple times if it returns early in
-SocketIOWithTimeout.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2955">HADOOP-2955</a>. Fix TestCrcCorruption test failures caused by <a href="http://issues.apache.org/jira/browse/HADOOP-2758">HADOOP-2758</a><br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2657">HADOOP-2657</a>. A flush call on the DFSOutputStream flushes the last
-partial CRC chunk too.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2974">HADOOP-2974</a>. IPC unit tests used "0.0.0.0" to connect to server, which
-is not always supported.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2996">HADOOP-2996</a>. Fixes uses of StringBuffer in StreamUtils class.<br />(Dave Brosius via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2995">HADOOP-2995</a>. Fixes StreamBaseRecordReader's getProgress to return a
-floating point number.<br />(Dave Brosius via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2972">HADOOP-2972</a>. Fix for a NPE in FSDataset.invalidate.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2994">HADOOP-2994</a>. Code cleanup for DFSClient: remove redundant
-conversions from string to string.<br />(Dave Brosius via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3009">HADOOP-3009</a>. TestFileCreation sometimes fails because restarting
-minidfscluster sometimes creates datanodes with ports that are
-different from their original instance.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2992">HADOOP-2992</a>. Distributed Upgrade framework works correctly with
-more than one upgrade object.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2679">HADOOP-2679</a>. Fix a typo in libhdfs.<br />(Jason via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2976">HADOOP-2976</a>. When a lease expires, the Namenode ensures that
-blocks of the file are adequately replicated.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2901">HADOOP-2901</a>. Fixes the creation of info servers in the JobClient
-and JobTracker. Removes the creation from JobClient and removes
-additional info server from the JobTracker. Also adds the command
-line utility to view the history files (<a href="http://issues.apache.org/jira/browse/HADOOP-2896">HADOOP-2896</a>), and fixes
-bugs in JSPs to do with analysis - <a href="http://issues.apache.org/jira/browse/HADOOP-2742">HADOOP-2742</a>, <a href="http://issues.apache.org/jira/browse/HADOOP-2792">HADOOP-2792</a>.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2890">HADOOP-2890</a>. If different datanodes report the same block but
-with different sizes to the namenode, the namenode picks the
-replica(s) with the largest size as the only valid replica(s).<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2825">HADOOP-2825</a>. Deprecated MapOutputLocation.getFile() is removed.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2806">HADOOP-2806</a>. Fixes a streaming document.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3008">HADOOP-3008</a>. SocketIOWithTimeout throws InterruptedIOException if the
-thread is interrupted while it is waiting.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3006">HADOOP-3006</a>. Fix wrong packet size reported by DataNode when a block
-is being replicated.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3029">HADOOP-3029</a>. Datanode prints log message "firstbadlink" only if
-it detects a bad connection to another datanode in the pipeline.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3030">HADOOP-3030</a>. Release reserved space for file in InMemoryFileSystem if
-checksum reservation fails.<br />(Devaraj Das via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3036">HADOOP-3036</a>. Fix findbugs warnings in UpgradeUtilities.<br />(Konstantin
-Shvachko via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3025">HADOOP-3025</a>. ChecksumFileSystem supports the delete method with
-the recursive flag.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3012">HADOOP-3012</a>. dfs -mv file to user home directory throws exception if
-the user home directory does not exist.<br />(Mahadev Konar via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3066">HADOOP-3066</a>. Should not require superuser privilege to query if hdfs is in
-safe mode<br />(jimk)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3040">HADOOP-3040</a>. If the input line starts with the separator char, the key
-is set as empty.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3080">HADOOP-3080</a>. Removes flush calls from JobHistory.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3086">HADOOP-3086</a>. Adds the testcase missed during commit of hadoop-3040.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3046">HADOOP-3046</a>. Fix the raw comparators for Text and BytesWritables
-to use the provided length rather than recompute it.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3094">HADOOP-3094</a>. Fix BytesWritable.toString to avoid extending the sign bit<br />(Owen O'Malley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3067">HADOOP-3067</a>. DFSInputStream's position read does not close the sockets.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3073">HADOOP-3073</a>. close() on SocketInputStream or SocketOutputStream should
-close the underlying channel.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3087">HADOOP-3087</a>. Fixes a problem to do with refreshing of loadHistory.jsp.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3065">HADOOP-3065</a>. Better logging message if the rack location of a datanode
-cannot be determined.<br />(Devaraj Das via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3064">HADOOP-3064</a>. Commas in a file path should not be treated as delimiters.<br />(Hairong Kuang via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2997">HADOOP-2997</a>. Adds test for non-writable serialier. Also fixes a problem
-introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-2399">HADOOP-2399</a>.<br />(Tom White via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3114">HADOOP-3114</a>. Fix TestDFSShell on Windows.<br />(Lohit Vijaya Renu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3118">HADOOP-3118</a>.  Fix Namenode NPE while loading fsimage after a cluster
-upgrade from older disk format.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3161">HADOOP-3161</a>. Fix FIleUtil.HardLink.getLinkCount on Mac OS.<br />(nigel
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2927">HADOOP-2927</a>. Fix TestDU to acurately calculate the expected file size.<br />(shv via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3123">HADOOP-3123</a>. Fix the native library build scripts to work on Solaris.<br />(tomwhite via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3089">HADOOP-3089</a>.  Streaming should accept stderr from task before
-first key arrives.<br />(Rick Cox via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3146">HADOOP-3146</a>. A DFSOutputStream.flush method is renamed as
-DFSOutputStream.fsync.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3165">HADOOP-3165</a>. -put/-copyFromLocal did not treat input file "-" as stdin.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3041">HADOOP-3041</a>. Deprecate JobConf.setOutputPath and JobConf.getOutputPath.
-Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats
-extending OutputFormatBase, now extend FileOutputFormat. Add the following
-APIs in FileOutputFormat: setOutputPath, getOutputPath, getWorkOutputPath.<br />(Amareshwari Sriramadasu via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3083">HADOOP-3083</a>. The fsimage does not store leases. This would have to be
-reworked in the next release to support appends.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3166">HADOOP-3166</a>. Fix an ArrayIndexOutOfBoundsException in the spill thread
-and make exception handling more promiscuous to catch this condition.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3050">HADOOP-3050</a>. DataNode sends one and only one block report after
-it registers with the namenode.<br />(Hairong Kuang)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3044">HADOOP-3044</a>. NNBench sets the right configuration for the mapper.<br />(Hairong Kuang)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3178">HADOOP-3178</a>. Fix GridMix scripts for small and medium jobs
-to handle input paths differently.<br />(Mukund Madhugiri via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1911">HADOOP-1911</a>. Fix an infinite loop in DFSClient when all replicas of a
-block are bad<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3157">HADOOP-3157</a>. Fix path handling in DistributedCache and TestMiniMRLocalFS.<br />(Doug Cutting via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3018">HADOOP-3018</a>. Fix the eclipse plug-in contrib wrt removed deprecated
-methods<br />(taton)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3183">HADOOP-3183</a>. Fix TestJobShell to use 'ls' instead of java.io.File::exists
-since cygwin symlinks are unsupported.<br />(Mahadev konar via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3175">HADOOP-3175</a>. Fix FsShell.CommandFormat to handle "-" in arguments.<br />(Edward J. Yoon via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3220">HADOOP-3220</a>. Safemode message corrected.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3208">HADOOP-3208</a>. Fix WritableDeserializer to set the Configuration on
-deserialized Writables.<br />(Enis Soztutar via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3224">HADOOP-3224</a>. 'dfs -du /dir' does not return correct size.<br />(Lohit Vjayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3223">HADOOP-3223</a>. Fix typo in help message for -chmod.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1373">HADOOP-1373</a>. checkPath() should ignore case when it compares authoriy.<br />(Edward J. Yoon via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3204">HADOOP-3204</a>. Fixes a problem to do with ReduceTask's LocalFSMerger not
-catching Throwable.<br />(Amar Ramesh Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3229">HADOOP-3229</a>. Report progress when collecting records from the mapper and
-the combiner.<br />(Doug Cutting via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3225">HADOOP-3225</a>. Unwrapping methods of RemoteException should initialize
-detailedMassage field.<br />(Mahadev Konar, shv, cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3247">HADOOP-3247</a>. Fix gridmix scripts to use the correct globbing syntax and
-change maxentToSameCluster to run the correct number of jobs.<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3242">HADOOP-3242</a>. Fix the RecordReader of SequenceFileAsBinaryInputFormat to
-correctly read from the start of the split and not the beginning of the
-file.<br />(cdouglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3256">HADOOP-3256</a>. Encodes the job name used in the filename for history files.<br />(Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3162">HADOOP-3162</a>. Ensure that comma-separated input paths are treated correctly
-as multiple input paths.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3263">HADOOP-3263</a>. Ensure that the job-history log file always follows the
-pattern of hostname_timestamp_jobid_username_jobname even if username
-and/or jobname are not specfied. This helps to avoid wrong assumptions
-made about the job-history log filename in jobhistory.jsp.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3251">HADOOP-3251</a>. Fixes getFilesystemName in JobTracker and LocalJobRunner to
-use FileSystem.getUri instead of FileSystem.getName.<br />(Arun Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3237">HADOOP-3237</a>. Fixes TestDFSShell.testErrOutPut on Windows platform.<br />(Mahadev Konar via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3279">HADOOP-3279</a>. TaskTracker checks for SUCCEEDED task status in addition to
-COMMIT_PENDING status when it fails maps due to lost map.<br />(Devaraj Das)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3286">HADOOP-3286</a>. Prevent collisions in gridmix output dirs by increasing the
-granularity of the timestamp.<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3285">HADOOP-3285</a>. Fix input split locality when the splits align to
-fs blocks.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3372">HADOOP-3372</a>. Fix heap management in streaming tests.<br />(Arun Murthy via
-cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3031">HADOOP-3031</a>. Fix javac warnings in test classes.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3382">HADOOP-3382</a>. Fix memory leak when files are not cleanly closed<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3322">HADOOP-3322</a>. Fix to push MetricsRecord for rpc metrics.<br />(Eric Yang via
-mukund)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_')">Release 0.16.4 - 2008-05-05
-</a></h3>
-<ul id="release_0.16.4_-_2008-05-05_">
-  <li><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(3)
-    <ol id="release_0.16.4_-_2008-05-05_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
-already exists.<br />(rangadi via mukund)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3294">HADOOP-3294</a>. Fix distcp to check the destination length and retry the copy
-if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3186">HADOOP-3186</a>. Fix incorrect permission checkding for mv and renameTo
-in HDFS. (Tsz Wo (Nicholas), SZE via mukund)
-</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.16.3_-_2008-04-16_')">Release 0.16.3 - 2008-04-16
-</a></h3>
-<ul id="release_0.16.3_-_2008-04-16_">
-  <li><a href="javascript:toggleList('release_0.16.3_-_2008-04-16_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(7)
-    <ol id="release_0.16.3_-_2008-04-16_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3010">HADOOP-3010</a>. Fix ConcurrentModificationException in ipc.Server.Responder.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3154">HADOOP-3154</a>. Catch all Throwables from the SpillThread in MapTask, rather
-than IOExceptions only.<br />(ddas via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3159">HADOOP-3159</a>. Avoid file system cache being overwritten whenever
-configuration is modified. (Tsz Wo (Nicholas), SZE via hairong)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3139">HADOOP-3139</a>. Remove the consistency check for the FileSystem cache in
-closeAll() that causes spurious warnings and a deadlock.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3195">HADOOP-3195</a>. Fix TestFileSystem to be deterministic.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3069">HADOOP-3069</a>. Primary name-node should not truncate image when transferring
-it from the secondary.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3182">HADOOP-3182</a>. Change permissions of the job-submission directory to 777
-from 733 to ensure sharing of HOD clusters works correctly. (Tsz Wo
-(Nicholas), Sze and Amareshwari Sri Ramadasu via acmurthy)
-</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.16.2_-_2008-04-02_')">Release 0.16.2 - 2008-04-02
-</a></h3>
-<ul id="release_0.16.2_-_2008-04-02_">
-  <li><a href="javascript:toggleList('release_0.16.2_-_2008-04-02_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(18)
-    <ol id="release_0.16.2_-_2008-04-02_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3011">HADOOP-3011</a>. Prohibit distcp from overwriting directories on the
-destination filesystem with files.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3033">HADOOP-3033</a>. The BlockReceiver thread in the datanode writes data to
-the block file, changes file position (if needed) and flushes all by
-itself. The PacketResponder thread does not flush block file.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2978">HADOOP-2978</a>. Fixes the JobHistory log format for counters.<br />(Runping Qi via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2985">HADOOP-2985</a>. Fixes LocalJobRunner to tolerate null job output path.
-Also makes the _temporary a constant in MRConstants.java.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3003">HADOOP-3003</a>. FileSystem cache key is updated after a
-FileSystem object is created. (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3042">HADOOP-3042</a>. Updates the Javadoc in JobConf.getOutputPath to reflect
-the actual temporary path.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3007">HADOOP-3007</a>. Tolerate mirror failures while DataNode is replicating
-blocks as it used to before.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2944">HADOOP-2944</a>. Fixes a "Run on Hadoop" wizard NPE when creating a
-Location from the wizard.<br />(taton)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3049">HADOOP-3049</a>. Fixes a problem in MultiThreadedMapRunner to do with
-catching RuntimeExceptions.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3039">HADOOP-3039</a>. Fixes a problem to do with exceptions in tasks not
-killing jobs.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3027">HADOOP-3027</a>. Fixes a problem to do with adding a shutdown hook in
-FileSystem.<br />(Amareshwari Sriramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3056">HADOOP-3056</a>. Fix distcp when the target is an empty directory by
-making sure the directory is created first.<br />(cdouglas and acmurthy
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3070">HADOOP-3070</a>. Protect the trash emptier thread from null pointer
-exceptions.<br />(Koji Noguchi via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3084">HADOOP-3084</a>. Fix HftpFileSystem to work for zero-lenghth files.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3107">HADOOP-3107</a>. Fix NPE when fsck invokes getListings.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3104">HADOOP-3104</a>. Limit MultithreadedMapRunner to have a fixed length queue
-between the RecordReader and the map threads.<br />(Alejandro Abdelnur via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2833">HADOOP-2833</a>. Do not use "Dr. Who" as the default user in JobClient.
-A valid user name is required. (Tsz Wo (Nicholas), SZE via rangadi)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3128">HADOOP-3128</a>. Throw RemoteException in setPermissions and setOwner of
-DistributedFileSystem.<br />(shv via nigel)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_')">Release 0.16.1 - 2008-03-13
-</a></h3>
-<ul id="release_0.16.1_-_2008-03-13_">
-  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.16.1_-_2008-03-13_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2869">HADOOP-2869</a>. Deprecate SequenceFile.setCompressionType in favor of
-SequenceFile.createWriter, SequenceFileOutputFormat.setCompressionType,
-and JobConf.setMapOutputCompressionType. (Arun C Murthy via cdouglas)
-Configuration changes to hadoop-default.xml:
-  deprecated io.seqfile.compression.type
-</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(2)
-    <ol id="release_0.16.1_-_2008-03-13_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2371">HADOOP-2371</a>. User guide for file permissions in HDFS.<br />(Robert Chansler via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3098">HADOOP-3098</a>. Allow more characters in user and group names while
-using -chown and -chgrp commands.<br />(rangadi)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.1_-_2008-03-13_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(31)
-    <ol id="release_0.16.1_-_2008-03-13_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2789">HADOOP-2789</a>. Race condition in IPC Server Responder that could close
-connections early.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2785">HADOOP-2785</a>. minor. Fix a typo in Datanode block verification<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2788">HADOOP-2788</a>. minor. Fix help message for chgrp shell command (Raghu Angadi).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1188">HADOOP-1188</a>. fstime file is updated when a storage directory containing
-namespace image becomes inaccessible.<br />(shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2787">HADOOP-2787</a>. An application can set a configuration variable named
-dfs.umask to set the umask that is used by DFS.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2780">HADOOP-2780</a>. The default socket buffer size for DataNodes is 128K.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2716">HADOOP-2716</a>. Superuser privileges for the Balancer.
-(Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2754">HADOOP-2754</a>. Filter out .crc files from local file system listing.<br />(Hairong Kuang via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2733">HADOOP-2733</a>. Fix compiler warnings in test code.
-(Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2725">HADOOP-2725</a>. Modify distcp to avoid leaving partially copied files at
-the destination after encountering an error. (Tsz Wo (Nicholas), SZE
-via cdouglas)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2391">HADOOP-2391</a>. Cleanup job output directory before declaring a job as
-SUCCESSFUL.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2808">HADOOP-2808</a>. Minor fix to FileUtil::copy to mind the overwrite
-formal.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2683">HADOOP-2683</a>. Moving UGI out of the RPC Server.
-(Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2814">HADOOP-2814</a>. Fix for NPE in datanode in unit test TestDataTransferProtocol.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2811">HADOOP-2811</a>. Dump of counters in job history does not add comma between
-groups.<br />(runping via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2735">HADOOP-2735</a>. Enables setting TMPDIR for tasks.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2843">HADOOP-2843</a>. Fix protections on map-side join classes to enable derivation.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2840">HADOOP-2840</a>. Fix gridmix scripts to correctly invoke the java sort through
-the proper jar.<br />(Mukund Madhugiri via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2769">HADOOP-2769</a>.  TestNNThroughputBnechmark should not use a fixed port for
-the namenode http port.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2852">HADOOP-2852</a>. Update gridmix benchmark to avoid an artifically long tail.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2894">HADOOP-2894</a>. Fix a problem to do with tasktrackers failing to connect to
-JobTracker upon reinitialization. (Owen O'Malley via ddas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2903">HADOOP-2903</a>.  Fix exception generated by Metrics while using pushMetric().<br />(girish vaitheeswaran via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2904">HADOOP-2904</a>.  Fix to RPC metrics to log the correct host name.<br />(girish vaitheeswaran via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2918">HADOOP-2918</a>.  Improve error logging so that dfs writes failure with
-"No lease on file" can be diagnosed.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2923">HADOOP-2923</a>.  Add SequenceFileAsBinaryInputFormat, which was
-missed in the commit for <a href="http://issues.apache.org/jira/browse/HADOOP-2603">HADOOP-2603</a>.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2931">HADOOP-2931</a>. IOException thrown by DFSOutputStream had wrong stack
-trace in some cases.<br />(Michael Bieniosek via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2883">HADOOP-2883</a>. Write failures and data corruptions on HDFS files.
-The write timeout is back to what it was on 0.15 release. Also, the
-datnodes flushes the block file buffered output stream before
-sending a positive ack for the packet back to the client.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2756">HADOOP-2756</a>. NPE in DFSClient while closing DFSOutputStreams
-under load.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2958">HADOOP-2958</a>. Fixed FileBench which broke due to <a href="http://issues.apache.org/jira/browse/HADOOP-2391">HADOOP-2391</a> which performs
-a check for existence of the output directory and a trivial bug in
-GenericMRLoadGenerator where min/max word lenghts were identical since
-they were looking at the same config variables<br />(Chris Douglas via
-acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2915">HADOOP-2915</a>. Fixed FileSystem.CACHE so that a username is included
-in the cache key. (Tsz Wo (Nicholas), SZE via nigel)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2813">HADOOP-2813</a>. TestDU unit test uses its own directory to run its
-sequence of tests.<br />(Mahadev Konar via dhruba)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_')">Release 0.16.0 - 2008-02-07
-</a></h3>
-<ul id="release_0.16.0_-_2008-02-07_">
-  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(14)
-    <ol id="release_0.16.0_-_2008-02-07_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1245">HADOOP-1245</a>.  Use the mapred.tasktracker.tasks.maximum value
-configured on each tasktracker when allocating tasks, instead of
-the value configured on the jobtracker. InterTrackerProtocol
-version changed from 5 to 6.<br />(Michael Bieniosek via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1843">HADOOP-1843</a>. Removed code from Configuration and JobConf deprecated by
-<a href="http://issues.apache.org/jira/browse/HADOOP-785">HADOOP-785</a> and a minor fix to Configuration.toString. Specifically the
-important change is that mapred-default.xml is no longer supported and
-Configuration no longer supports the notion of default/final resources.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1302">HADOOP-1302</a>.  Remove deprecated abacus code from the contrib directory.
-This also fixes a configuration bug in AggregateWordCount, so that the
-job now works.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2288">HADOOP-2288</a>.  Enhance FileSystem API to support access control.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2184">HADOOP-2184</a>.  RPC Support for user permissions and authentication.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a>.  RPC Server uses any available port if the specified
-port is zero. Otherwise it uses the specified port. Also combines
-the configuration attributes for the servers' bind address and
-port from "x.x.x.x" and "y" to "x.x.x.x:y".
-Deprecated configuration variables:
-  dfs.info.bindAddress
-  dfs.info.port
-  dfs.datanode.bindAddress
-  dfs.datanode.port
-  dfs.datanode.info.bindAdress
-  dfs.datanode.info.port
-  dfs.secondary.info.bindAddress
-  dfs.secondary.info.port
-  mapred.job.tracker.info.bindAddress
-  mapred.job.tracker.info.port
-  mapred.task.tracker.report.bindAddress
-  tasktracker.http.bindAddress
-  tasktracker.http.port
-New configuration variables (post <a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>):
-  dfs.secondary.http.address
-  dfs.datanode.address
-  dfs.datanode.http.address
-  dfs.http.address
-  mapred.job.tracker.http.address
-  mapred.task.tracker.report.address
-  mapred.task.tracker.http.address<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2401">HADOOP-2401</a>.  Only the current leaseholder can abandon a block for
-a HDFS file.  ClientProtocol version changed from 20 to 21.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2381">HADOOP-2381</a>.  Support permission information in FileStatus. Client
-Protocol version changed from 21 to 22.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2110">HADOOP-2110</a>. Block report processing creates fewer transient objects.
-Datanode Protocol version changed from 10 to 11.<br />(Sanjay Radia via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2567">HADOOP-2567</a>.  Add FileSystem#getHomeDirectory(), which returns the
-user's home directory in a FileSystem as a fully-qualified path.
-FileSystem#getWorkingDirectory() is also changed to return a
-fully-qualified path, which can break applications that attempt
-to, e.g., pass LocalFileSystem#getWorkingDir().toString() directly
-to java.io methods that accept file names.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2514">HADOOP-2514</a>.  Change trash feature to maintain a per-user trash
-directory, named ".Trash" in the user's home directory.  The
-"fs.trash.root" parameter is no longer used.  Full source paths
-are also no longer reproduced within the trash.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2012">HADOOP-2012</a>. Periodic data verification on Datanodes.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1707">HADOOP-1707</a>. The DFSClient does not use a local disk file to cache
-writes to a HDFS file. Changed Data Transfer Version from 7 to 8.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2652">HADOOP-2652</a>. Fix permission issues for HftpFileSystem. This is an
-incompatible change since distcp may not be able to copy files
-from cluster A (compiled with this patch) to cluster B (compiled
-with previous versions). (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(13)
-    <ol id="release_0.16.0_-_2008-02-07_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1857">HADOOP-1857</a>.  Ability to run a script when a task fails to capture stack
-traces.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2299">HADOOP-2299</a>.  Defination of a login interface.  A simple implementation for
-Unix users and groups.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1652">HADOOP-1652</a>.  A utility to balance data among datanodes in a HDFS cluster.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2085">HADOOP-2085</a>.  A library to support map-side joins of consistently
-partitioned and sorted data sets.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2336">HADOOP-2336</a>. Shell commands to modify file permissions.<br />(rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1298">HADOOP-1298</a>. Implement file permissions for HDFS.
-(Tsz Wo (Nicholas) &amp; taton via cutting)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2447">HADOOP-2447</a>. HDFS can be configured to limit the total number of
-objects (inodes and blocks) in the file system.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2487">HADOOP-2487</a>. Added an option to get statuses for all submitted/run jobs.
-This information can be used to develop tools for analysing jobs.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1873">HADOOP-1873</a>. Implement user permissions for Map/Reduce framework.<br />(Hairong Kuang via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2532">HADOOP-2532</a>.  Add to MapFile a getClosest method that returns the key
-that comes just before if the key is not present.<br />(stack via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1883">HADOOP-1883</a>. Add versioning to Record I/O.<br />(Vivek Ratan via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2603">HADOOP-2603</a>.  Add SeqeunceFileAsBinaryInputFormat, which reads
-sequence files as BytesWritable/BytesWritable regardless of the
-key and value types used to write the file.<br />(cdouglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2367">HADOOP-2367</a>. Add ability to profile a subset of map/reduce tasks and fetch
-the result to the local filesystem of the submitting application. Also
-includes a general IntegerRanges extension to Configuration for setting
-positive, ranged parameters.<br />(Owen O'Malley via cdouglas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(54)
-    <ol id="release_0.16.0_-_2008-02-07_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2045">HADOOP-2045</a>.  Change committer list on website to a table, so that
-folks can list their organization, timezone, etc.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2058">HADOOP-2058</a>.  Facilitate creating new datanodes dynamically in
-MiniDFSCluster.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1855">HADOOP-1855</a>.  fsck verifies block placement policies and reports
-violations.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1604">HADOOP-1604</a>.  An system administrator can finalize namenode upgrades
-without running the cluster.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1839">HADOOP-1839</a>.  Link-ify the Pending/Running/Complete/Killed grid in
-jobdetails.jsp to help quickly narrow down and see categorized TIPs'
-details via jobtasks.jsp.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1210">HADOOP-1210</a>.  Log counters in job history.<br />(Owen O'Malley via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1912">HADOOP-1912</a>. Datanode has two new commands COPY and REPLACE. These are
-needed for supporting data rebalance.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2086">HADOOP-2086</a>. This patch adds the ability to add dependencies to a job
-(run via JobControl) after construction.<br />(Adrian Woodhead via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1185">HADOOP-1185</a>. Support changing the logging level of a server without
-restarting the server.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2134">HADOOP-2134</a>.  Remove developer-centric requirements from overview.html and
-keep it end-user focussed, specifically sections related to subversion and
-building Hadoop.<br />(Jim Kellerman via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1989">HADOOP-1989</a>. Support simulated DataNodes. This helps creating large virtual
-clusters for testing purposes.<br />(Sanjay Radia via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1274">HADOOP-1274</a>. Support different number of mappers and reducers per
-TaskTracker to  allow administrators to better configure and utilize
-heterogenous clusters.
-Configuration changes to hadoop-default.xml:
-  add mapred.tasktracker.map.tasks.maximum (default value of 2)
-  add mapred.tasktracker.reduce.tasks.maximum (default value of 2)
-  remove mapred.tasktracker.tasks.maximum (deprecated for 0.16.0)<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2104">HADOOP-2104</a>. Adds a description to the ant targets. This makes the
-output of "ant -projecthelp" sensible.<br />(Chris Douglas via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2127">HADOOP-2127</a>. Added a pipes sort example to benchmark trivial pipes
-application versus trivial java application.<br />(omalley via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2113">HADOOP-2113</a>. A new shell command "dfs -text" to view the contents of
-a gziped or SequenceFile.<br />(Chris Douglas via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2207">HADOOP-2207</a>.  Add a "package" target for contrib modules that
-permits each to determine what files are copied into release
-builds.<br />(stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1984">HADOOP-1984</a>. Makes the backoff for failed fetches exponential.
-Earlier, it was a random backoff from an interval.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1327">HADOOP-1327</a>.  Include website documentation for streaming.<br />(Rob Weltman
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2000">HADOOP-2000</a>.  Rewrite NNBench to measure namenode performance accurately.
-It now uses the map-reduce framework for load generation.<br />(Mukund Madhugiri via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2248">HADOOP-2248</a>. Speeds up the framework w.r.t Counters. Also has API
-updates to the Counters part.<br />(Owen O'Malley via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2326">HADOOP-2326</a>. The initial block report at Datanode startup time has
-a random backoff period.<br />(Sanjay Radia via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2432">HADOOP-2432</a>. HDFS includes the name of the file while throwing
-"File does not exist"  exception.<br />(Jim Kellerman via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2457">HADOOP-2457</a>. Added a 'forrest.home' property to the 'docs' target in
-build.xml.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2149">HADOOP-2149</a>.  A new benchmark for three name-node operation: file create,
-open, and block report, to evaluate the name-node performance
-for optimizations or new features.<br />(Konstantin Shvachko via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2466">HADOOP-2466</a>. Change FileInputFormat.computeSplitSize to a protected
-non-static method to allow sub-classes to provide alternate
-implementations.<br />(Alejandro Abdelnur via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2425">HADOOP-2425</a>. Change TextOutputFormat to handle Text specifically for better
-performance. Make NullWritable implement Comparable. Make TextOutputFormat
-treat NullWritable like null.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1719">HADOOP-1719</a>. Improves the utilization of shuffle copier threads.<br />(Amar Kamat via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2390">HADOOP-2390</a>. Added documentation for user-controls for intermediate
-map-outputs &amp; final job-outputs and native-hadoop libraries.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1660">HADOOP-1660</a>. Add the cwd of the map/reduce task to the java.library.path
-of the child-jvm to support loading of native libraries distributed via
-the DistributedCache.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2285">HADOOP-2285</a>. Speeds up TextInputFormat. Also includes updates to the
-Text API.<br />(Owen O'Malley via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2233">HADOOP-2233</a>. Adds a generic load generator for modeling MR jobs.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2369">HADOOP-2369</a>. Adds a set of scripts for simulating a mix of user map/reduce
-workloads.<br />(Runping Qi via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2547">HADOOP-2547</a>. Removes use of a 'magic number' in build.xml.<br />(Hrishikesh via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2268">HADOOP-2268</a>. Fix org.apache.hadoop.mapred.jobcontrol classes to use the
-List/Map interfaces rather than concrete ArrayList/HashMap classes
-internally.<br />(Adrian Woodhead via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2406">HADOOP-2406</a>. Add a benchmark for measuring read/write performance through
-the InputFormat interface, particularly with compression.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2131">HADOOP-2131</a>. Allow finer-grained control over speculative-execution. Now
-users can set it for maps and reduces independently.
-Configuration changes to hadoop-default.xml:
-  deprecated mapred.speculative.execution
-  add mapred.map.tasks.speculative.execution
-  add mapred.reduce.tasks.speculative.execution<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1965">HADOOP-1965</a>. Interleave sort/spill in teh map-task along with calls to the
-Mapper.map method. This is done by splitting the 'io.sort.mb' buffer into
-two and using one half for collecting map-outputs and the other half for
-sort/spill.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2464">HADOOP-2464</a>. Unit tests for chmod, chown, and chgrp using DFS.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1876">HADOOP-1876</a>. Persist statuses of completed jobs in HDFS so that the
-JobClient can query and get information about decommissioned jobs and also
-across JobTracker restarts.
-Configuration changes to hadoop-default.xml:
-  add mapred.job.tracker.persist.jobstatus.active (default value of false)
-  add mapred.job.tracker.persist.jobstatus.hours (default value of 0)
-  add mapred.job.tracker.persist.jobstatus.dir (default value of
-                                                /jobtracker/jobsInfo)<br />(Alejandro Abdelnur via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2077">HADOOP-2077</a>. Added version and build information to STARTUP_MSG for all
-hadoop daemons to aid error-reporting, debugging etc.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2398">HADOOP-2398</a>. Additional instrumentation for NameNode and RPC server.
-Add support for accessing instrumentation statistics via JMX.<br />(Sanjay radia via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2449">HADOOP-2449</a>. A return of the non-MR version of NNBench.<br />(Sanjay Radia via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1989">HADOOP-1989</a>. Remove 'datanodecluster' command from bin/hadoop.<br />(Sanjay Radia via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1742">HADOOP-1742</a>. Improve JavaDoc documentation for ClientProtocol, DFSClient,
-and FSNamesystem.<br />(Konstantin Shvachko)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2298">HADOOP-2298</a>. Add Ant target for a binary-only distribution.<br />(Hrishikesh via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2509">HADOOP-2509</a>. Add Ant target for Rat report (Apache license header
-reports).<br />(Hrishikesh via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2469">HADOOP-2469</a>.  WritableUtils.clone should take a Configuration
-instead of a JobConf.<br />(stack via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2659">HADOOP-2659</a>. Introduce superuser permissions for admin operations.
-(Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2596">HADOOP-2596</a>. Added a SequenceFile.createWriter api which allows the user
-to specify the blocksize, replication factor and the buffersize to be
-used for the underlying HDFS file.<br />(Alejandro Abdelnur via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2431">HADOOP-2431</a>. Test HDFS File Permissions.<br />(Hairong Kuang via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2232">HADOOP-2232</a>. Add an option to disable Nagle's algorithm in the IPC stack.<br />(Clint Morgan via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2342">HADOOP-2342</a>. Created a micro-benchmark for measuring
-local-file versus hdfs reads.<br />(Owen O'Malley via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2529">HADOOP-2529</a>. First version of HDFS User Guide.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2690">HADOOP-2690</a>. Add jar-test target to build.xml, separating compilation
-and packaging of the test classes.<br />(Enis Soztutar via cdouglas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(4)
-    <ol id="release_0.16.0_-_2008-02-07_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1898">HADOOP-1898</a>.  Release the lock protecting the last time of the last stack
-dump while the dump is happening.<br />(Amareshwari Sri Ramadasu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1900">HADOOP-1900</a>. Makes the heartbeat and task event queries interval
-dependent on the cluster size.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2208">HADOOP-2208</a>. Counter update frequency (from TaskTracker to JobTracker) is
-capped at 1 minute.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2284">HADOOP-2284</a>. Reduce the number of progress updates during the sorting in
-the map task.<br />(Amar Kamat via ddas)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.16.0_-_2008-02-07_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(90)
-    <ol id="release_0.16.0_-_2008-02-07_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2583">HADOOP-2583</a>.  Fixes a bug in the Eclipse plug-in UI to edit locations.
-Plug-in version is now synchronized with Hadoop version.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2100">HADOOP-2100</a>.  Remove faulty check for existence of $HADOOP_PID_DIR and let
-'mkdir -p' check &amp; create it.<br />(Michael Bieniosek via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1642">HADOOP-1642</a>.  Ensure jobids generated by LocalJobRunner are unique to
-avoid collissions and hence job-failures.<br />(Doug Cutting via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2096">HADOOP-2096</a>.  Close open file-descriptors held by streams while localizing
-job.xml in the JobTracker and while displaying it on the webui in
-jobconf.jsp.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2098">HADOOP-2098</a>.  Log start &amp; completion of empty jobs to JobHistory, which
-also ensures that we close the file-descriptor of the job's history log
-opened during job-submission.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2112">HADOOP-2112</a>.  Adding back changes to build.xml lost while reverting
-<a href="http://issues.apache.org/jira/browse/HADOOP-1622">HADOOP-1622</a> i.e. http://svn.apache.org/viewvc?view=rev&amp;revision=588771.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2089">HADOOP-2089</a>.  Fixes the command line argument handling to handle multiple
--cacheArchive in Hadoop streaming.<br />(Lohit Vijayarenu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2071">HADOOP-2071</a>.  Fix StreamXmlRecordReader to use a BufferedInputStream
-wrapped over the DFSInputStream since mark/reset aren't supported by
-DFSInputStream anymore.<br />(Lohit Vijayarenu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1348">HADOOP-1348</a>.  Allow XML comments inside configuration files.<br />(Rajagopal Natarajan and Enis Soztutar via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1952">HADOOP-1952</a>.  Improve handling of invalid, user-specified classes while
-configuring streaming jobs such as combiner, input/output formats etc.
-Now invalid options are caught, logged and jobs are failed early.<br />(Lohit
-Vijayarenu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2151">HADOOP-2151</a>. FileSystem.globPaths validates the list of Paths that
-it returns.<br />(Lohit Vijayarenu via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2121">HADOOP-2121</a>. Cleanup DFSOutputStream when the stream encountered errors
-when Datanodes became full.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1130">HADOOP-1130</a>. The FileSystem.closeAll() method closes all existing
-DFSClients.<br />(Chris Douglas via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2204">HADOOP-2204</a>. DFSTestUtil.waitReplication was not waiting for all replicas
-to get created, thus causing unit test failure.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2078">HADOOP-2078</a>. An zero size file may have no blocks associated with it.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2212">HADOOP-2212</a>. ChecksumFileSystem.getSumBufferSize might throw
-java.lang.ArithmeticException. The fix is to initialize bytesPerChecksum
-to 0.<br />(Michael Bieniosek via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2216">HADOOP-2216</a>.  Fix jobtasks.jsp to ensure that it first collects the
-taskids which satisfy the filtering criteria and then use that list to
-print out only the required task-reports, previously it was oblivious to
-the filtering and hence used the wrong index into the array of task-reports.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2272">HADOOP-2272</a>.  Fix findbugs target to reflect changes made to the location
-of the streaming jar file by <a href="http://issues.apache.org/jira/browse/HADOOP-2207">HADOOP-2207</a>.<br />(Adrian Woodhead via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2244">HADOOP-2244</a>.  Fixes the MapWritable.readFields to clear the instance
-field variable every time readFields is called. (Michael Stack via ddas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2245">HADOOP-2245</a>.  Fixes LocalJobRunner to include a jobId in the mapId. Also,
-adds a testcase for JobControl. (Adrian Woodhead via ddas).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2275">HADOOP-2275</a>. Fix erroneous detection of corrupted file when namenode
-fails to allocate any datanodes for newly allocated block.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2256">HADOOP-2256</a>. Fix a buf in the namenode that could cause it to encounter
-an infinite loop while deleting excess replicas that were created by
-block rebalancing.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2209">HADOOP-2209</a>. SecondaryNamenode process exits if it encounters exceptions
-that it cannot handle.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2314">HADOOP-2314</a>. Prevent TestBlockReplacement from occasionally getting
-into an infinite loop.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2300">HADOOP-2300</a>. This fixes a bug where mapred.tasktracker.tasks.maximum
-would be ignored even if it was set in hadoop-site.xml.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2349">HADOOP-2349</a>.  Improve code layout in file system transaction logging code.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2368">HADOOP-2368</a>.  Fix unit tests on Windows.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2363">HADOOP-2363</a>.  This fix allows running multiple instances of the unit test
-in parallel. The bug was introduced in <a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a> that changed
-port-rolling behaviour.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2271">HADOOP-2271</a>.  Fix chmod task to be non-parallel.<br />(Adrian Woodhead via
-omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2313">HADOOP-2313</a>.  Fail the build if building libhdfs fails.<br />(nigel via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2359">HADOOP-2359</a>.  Remove warning for interruptted exception when closing down
-minidfs.<br />(dhruba via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1841">HADOOP-1841</a>. Prevent slow clients from consuming threads in the NameNode.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2323">HADOOP-2323</a>. JobTracker.close() should not print stack traces for
-normal exit.<br />(jimk via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2376">HADOOP-2376</a>. Prevents sort example from overriding the number of maps.<br />(Owen O'Malley via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2434">HADOOP-2434</a>. FSDatasetInterface read interface causes HDFS reads to occur
-in 1 byte chunks, causing performance degradation.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2459">HADOOP-2459</a>. Fix package target so that src/docs/build files are not
-included in the release.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2215">HADOOP-2215</a>.  Fix documentation in cluster_setup.html &amp;
-mapred_tutorial.html reflect that mapred.tasktracker.tasks.maximum has
-been superceeded by mapred.tasktracker.{map|reduce}.tasks.maximum.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2459">HADOOP-2459</a>. Fix package target so that src/docs/build files are not
-included in the release.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2352">HADOOP-2352</a>. Remove AC_CHECK_LIB for libz and liblzo to ensure that
-libhadoop.so doesn't have a dependency on them.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2453">HADOOP-2453</a>. Fix the configuration for wordcount-simple example in Hadoop
-Pipes which currently produces an XML parsing error.<br />(Amareshwari Sri
-Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2476">HADOOP-2476</a>. Unit test failure while reading permission bits of local
-file system (on Windows) fixed.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2247">HADOOP-2247</a>.  Fine-tune the strategies for killing mappers and reducers
-due to failures while fetching map-outputs. Now the map-completion times
-and number of currently running reduces are taken into account by the
-JobTracker before  killing the mappers, while the progress made by the
-reducer and the number of fetch-failures vis-a-vis total number of
-fetch-attempts are taken into account before teh reducer kills itself.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2452">HADOOP-2452</a>. Fix eclipse plug-in build.xml to refers to the right
-location where hadoop-*-core.jar is generated.<br />(taton)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2492">HADOOP-2492</a>. Additional debugging in the rpc server to better
-diagnose ConcurrentModificationException.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2344">HADOOP-2344</a>. Enhance the utility for executing shell commands to read the
-stdout/stderr streams while waiting for the command to finish (to free up
-the buffers). Also, this patch throws away stderr of the DF utility.
-@deprecated
-  org.apache.hadoop.fs.ShellCommand for org.apache.hadoop.util.Shell
-  org.apache.hadoop.util.ShellUtil for
-    org.apache.hadoop.util.Shell.ShellCommandExecutor<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2511">HADOOP-2511</a>. Fix a javadoc warning in org.apache.hadoop.util.Shell
-introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-2344">HADOOP-2344</a>.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2442">HADOOP-2442</a>. Fix TestLocalFileSystemPermission.testLocalFSsetOwner
-to work on more platforms.<br />(Raghu Angadi via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2488">HADOOP-2488</a>. Fix a regression in random read performance.<br />(Michael Stack via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2523">HADOOP-2523</a>. Fix TestDFSShell.testFilePermissions on Windows.<br />(Raghu Angadi via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2535">HADOOP-2535</a>. Removed support for deprecated mapred.child.heap.size and
-fixed some indentation issues in TaskRunner. (acmurthy)
-Configuration changes to hadoop-default.xml:
-  remove mapred.child.heap.size
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2512">HADOOP-2512</a>. Fix error stream handling in Shell. Use exit code to
-detect shell command errors in RawLocalFileSystem.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2446">HADOOP-2446</a>. Fixes TestHDFSServerPorts and TestMRServerPorts so they
-do not rely on statically configured ports and cleanup better.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2537">HADOOP-2537</a>. Make build process compatible with Ant 1.7.0.<br />(Hrishikesh via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1281">HADOOP-1281</a>. Ensure running tasks of completed map TIPs (e.g. speculative
-tasks) are killed as soon as the TIP completed.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2571">HADOOP-2571</a>. Suppress a suprious warning in test code.<br />(cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2481">HADOOP-2481</a>. NNBench report its progress periodically.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2601">HADOOP-2601</a>. Start name-node on a free port for TestNNThroughputBenchmark.<br />(Konstantin Shvachko)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2494">HADOOP-2494</a>.  Set +x on contrib/*/bin/* in packaged tar bundle.<br />(stack via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2605">HADOOP-2605</a>. Remove bogus leading slash in task-tracker report bindAddress.<br />(Konstantin Shvachko)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2620">HADOOP-2620</a>. Trivial. 'bin/hadoop fs -help' did not list chmod, chown, and
-chgrp.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2614">HADOOP-2614</a>. The DFS WebUI accesses are configured to be from the user
-specified by dfs.web.ugi.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2543">HADOOP-2543</a>. Implement a "no-permission-checking" mode for smooth
-upgrade from a pre-0.16 install of HDFS.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-290">HADOOP-290</a>. A DataNode log message now prints the target of a replication
-request correctly.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2538">HADOOP-2538</a>. Redirect to a warning, if plaintext parameter is true but
-the filter parameter is not given in TaskLogServlet.<br />(Michael Bieniosek via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2582">HADOOP-2582</a>. Prevent 'bin/hadoop fs -copyToLocal' from creating
-zero-length files when the src does not exist.<br />(Lohit Vijayarenu via cdouglas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2189">HADOOP-2189</a>. Incrementing user counters should count as progress.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2649">HADOOP-2649</a>. The NameNode periodically computes replication work for
-the datanodes. The periodicity of this computation is now configurable.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2549">HADOOP-2549</a>. Correct disk size computation so that data-nodes could switch
-to other local drives if current is full.<br />(Hairong Kuang via shv)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2633">HADOOP-2633</a>. Fsck should call name-node methods directly rather than
-through rpc. (Tsz Wo (Nicholas), SZE via shv)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2687">HADOOP-2687</a>. Modify a few log message generated by dfs client to be
-logged only at INFO level.<br />(stack via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2402">HADOOP-2402</a>. Fix BlockCompressorStream to ensure it buffers data before
-sending it down to the compressor so that each write call doesn't
-compress.<br />(Chris Douglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2645">HADOOP-2645</a>. The Metrics initialization code does not throw
-exceptions when servers are restarted by MiniDFSCluster.<br />(Sanjay Radia via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2691">HADOOP-2691</a>. Fix a race condition that was causing the DFSClient
-to erroneously remove a good datanode from a pipeline that actually
-had another datanode that was bad.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1195">HADOOP-1195</a>. All code in FSNamesystem checks the return value
-of getDataNode for null before using it.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2640">HADOOP-2640</a>. Fix a bug in MultiFileSplitInputFormat that was always
-returning 1 split in some circumstances.<br />(Enis Soztutar via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2626">HADOOP-2626</a>. Fix paths with special characters to work correctly
-with the local filesystem.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2646">HADOOP-2646</a>. Fix SortValidator to work with fully-qualified
-working directories.<br />(Arun C Murthy via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2092">HADOOP-2092</a>. Added a ping mechanism to the pipes' task to periodically
-check if the parent Java task is running, and exit if the parent isn't
-alive and responding.<br />(Amareshwari Sri Ramadasu via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2714">HADOOP-2714</a>. TestDecommission failed on windows because the replication
-request was timing out.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2576">HADOOP-2576</a>. Namenode performance degradation over time triggered by
-large heartbeat interval.<br />(Raghu Angadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2713">HADOOP-2713</a>. TestDatanodeDeath failed on windows because the replication
-request was timing out.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2639">HADOOP-2639</a>. Fixes a problem to do with incorrect maintenance of values
-for runningMapTasks/runningReduceTasks.<br />(Amar Kamat and Arun Murthy
-via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2723">HADOOP-2723</a>. Fixed the check for checking whether to do user task
-profiling.<br />(Amareshwari Sri Ramadasu via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2734">HADOOP-2734</a>. Link forrest docs to new http://hadoop.apache.org<br />(Doug Cutting via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2641">HADOOP-2641</a>. Added Apache license headers to 95 files.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2732">HADOOP-2732</a>. Fix bug in path globbing.<br />(Hairong Kuang via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2404">HADOOP-2404</a>. Fix backwards compatability with hadoop-0.15 configuration
-files that was broken by <a href="http://issues.apache.org/jira/browse/HADOOP-2185">HADOOP-2185</a>.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2755">HADOOP-2755</a>. Fix fsck performance degradation because of permissions
-issue.  (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2768">HADOOP-2768</a>. Fix performance regression caused by <a href="http://issues.apache.org/jira/browse/HADOOP-1707">HADOOP-1707</a>.<br />(dhruba borthakur via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3108">HADOOP-3108</a>. Fix NPE in setPermission and setOwner.<br />(shv)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.15.3_-_2008-01-18_')">Release 0.15.3 - 2008-01-18
-</a></h3>
-<ul id="release_0.15.3_-_2008-01-18_">
-  <li><a href="javascript:toggleList('release_0.15.3_-_2008-01-18_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(4)
-    <ol id="release_0.15.3_-_2008-01-18_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2562">HADOOP-2562</a>. globPaths supports {ab,cd}.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2540">HADOOP-2540</a>. fsck reports missing blocks incorrectly.<br />(dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2570">HADOOP-2570</a>. "work" directory created unconditionally, and symlinks
-created from the task cwds.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2574">HADOOP-2574</a>. Fixed mapred_tutorial.xml to correct minor errors with the
-WordCount examples.<br />(acmurthy)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_')">Release 0.15.2 - 2008-01-02
-</a></h3>
-<ul id="release_0.15.2_-_2008-01-02_">
-  <li><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(11)
-    <ol id="release_0.15.2_-_2008-01-02_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2246">HADOOP-2246</a>.  Moved the changelog for <a href="http://issues.apache.org/jira/browse/HADOOP-1851">HADOOP-1851</a> from the NEW FEATURES
-section to the INCOMPATIBLE CHANGES section.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2238">HADOOP-2238</a>.  Fix TaskGraphServlet so that it sets the content type of
-the response appropriately.<br />(Paul Saab via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2129">HADOOP-2129</a>.  Fix so that distcp works correctly when source is
-HDFS but not the default filesystem.  HDFS paths returned by the
-listStatus() method are now fully-qualified.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2378">HADOOP-2378</a>.  Fixes a problem where the last task completion event would
-get created after the job completes.<br />(Alejandro Abdelnur via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2228">HADOOP-2228</a>.  Checks whether a job with a certain jobId is already running
-and then tries to create the JobInProgress object.<br />(Johan Oskarsson via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2422">HADOOP-2422</a>.  dfs -cat multiple files fail with 'Unable to write to
-output stream'.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2460">HADOOP-2460</a>.  When the namenode encounters ioerrors on writing a
-transaction log, it stops writing new transactions to that one.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2227">HADOOP-2227</a>.  Use the LocalDirAllocator uniformly for handling all of the
-temporary storage required for a given task. It also implies that
-mapred.local.dir.minspacestart is handled by checking if there is enough
-free-space on any one of the available disks.<br />(Amareshwari Sri Ramadasu
-via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2437">HADOOP-2437</a>.  Fix the LocalDirAllocator to choose the seed for the
-round-robin disk selections randomly. This helps in spreading data across
-multiple partitions much better.<br />(acmurhty)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2486">HADOOP-2486</a>. When the list of files from the InMemoryFileSystem is obtained
-for merging, this patch will ensure that only those files whose checksums
-have also got created (renamed) are returned.<br />(ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2456">HADOOP-2456</a>. Hardcode English locale to prevent NumberFormatException
-from occurring when starting the NameNode with certain locales.<br />(Matthias Friedrich via nigel)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.2_-_2008-01-02_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(4)
-    <ol id="release_0.15.2_-_2008-01-02_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2160">HADOOP-2160</a>.  Remove project-level, non-user documentation from
-releases, since it's now maintained in a separate tree.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1327">HADOOP-1327</a>.  Add user documentation for streaming.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2382">HADOOP-2382</a>.  Add hadoop-default.html to subversion.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2158">HADOOP-2158</a>. hdfsListDirectory calls FileSystem.listStatus instead
-of FileSystem.listPaths. This reduces the number of RPC calls on the
-namenode, thereby improving scalability.<br />(Christian Kunz via dhruba)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_')">Release 0.15.1 - 2007-11-27
-</a></h3>
-<ul id="release_0.15.1_-_2007-11-27_">
-  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.15.1_-_2007-11-27_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-713">HADOOP-713</a>.  Reduce CPU usage on namenode while listing directories.
-FileSystem.listPaths does not return the size of the entire subtree.
-Introduced a new API ClientProtocol.getContentLength that returns the
-size of the subtree.<br />(Dhruba Borthakur via dhruba)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(1)
-    <ol id="release_0.15.1_-_2007-11-27_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1917">HADOOP-1917</a>.  Addition of guides/tutorial for better overall
-documentation for Hadoop. Specifically:
-* quickstart.html is targetted towards first-time users and helps them
-  setup a single-node cluster and play with Hadoop.
-* cluster_setup.html helps admins to configure and setup non-trivial
-  hadoop clusters.
-* mapred_tutorial.html is a comprehensive Map-Reduce tutorial.<br />(acmurthy)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.1_-_2007-11-27_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(3)
-    <ol id="release_0.15.1_-_2007-11-27_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2174">HADOOP-2174</a>.  Removed the unnecessary Reporter.setStatus call from
-FSCopyFilesMapper.close which led to a NPE since the reporter isn't valid
-in the close method.<br />(Chris Douglas via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2172">HADOOP-2172</a>.  Restore performance of random access to local files
-by caching positions of local input streams, avoiding a system
-call.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2205">HADOOP-2205</a>.  Regenerate the Hadoop website since some of the changes made
-by <a href="http://issues.apache.org/jira/browse/HADOOP-1917">HADOOP-1917</a> weren't correctly copied over to the trunk/docs directory.
-Also fixed a couple of minor typos and broken links.<br />(acmurthy)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_')">Release 0.15.0 - 2007-11-2
-</a></h3>
-<ul id="release_0.15.0_-_2007-11-2_">
-  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(10)
-    <ol id="release_0.15.0_-_2007-11-2_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1708">HADOOP-1708</a>.  Make files appear in namespace as soon as they are
-created.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-999">HADOOP-999</a>.  A HDFS Client immediately informs the NameNode of a new
-file creation.  ClientProtocol version changed from 14 to 15.
-(Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-932">HADOOP-932</a>.  File locking interfaces and implementations (that were
-earlier deprecated) are removed.  Client Protocol version changed
-from 15 to 16.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1621">HADOOP-1621</a>.  FileStatus is now a concrete class and FileSystem.listPaths
-is deprecated and replaced with listStatus.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1656">HADOOP-1656</a>.  The blockSize of a file is stored persistently in the file
-inode.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1838">HADOOP-1838</a>.  The blocksize of files created with an earlier release is
-set to the default block size.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-785">HADOOP-785</a>.  Add support for 'final' Configuration parameters,
-removing support for 'mapred-default.xml', and changing
-'hadoop-site.xml' to not override other files.  Now folks should
-generally use 'hadoop-site.xml' for all configurations.  Values
-with a 'final' tag may not be overridden by subsequently loaded
-configuration files, e.g., by jobs.<br />(Arun C. Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1846">HADOOP-1846</a>. DatanodeReport in ClientProtocol can report live
-datanodes, dead datanodes or all datanodes. Client Protocol version
-changed from 17 to 18.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1851">HADOOP-1851</a>.  Permit specification of map output compression type
-and codec, independent of the final output's compression
-parameters.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1819">HADOOP-1819</a>.  Jobtracker cleanups, including binding ports before
-clearing state directories, so that inadvertently starting a
-second jobtracker doesn't trash one that's already running. Removed
-method JobTracker.getTracker() because the static variable, which
-stored the value caused initialization problems.<br />(omalley via cutting)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._new_features_')">  NEW FEATURES
-</a>&nbsp;&nbsp;&nbsp;(14)
-    <ol id="release_0.15.0_-_2007-11-2_._new_features_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-89">HADOOP-89</a>.  A client can access file data even before the creator
-has closed the file. Introduce a new command "tail" from dfs shell.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1636">HADOOP-1636</a>.  Allow configuration of the number of jobs kept in
-memory by the JobTracker.<br />(Michael Bieniosek via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1667">HADOOP-1667</a>.  Reorganize CHANGES.txt into sections to make it
-easier to read.  Also remove numbering, to make merging easier.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1610">HADOOP-1610</a>.  Add metrics for failed tasks.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1767">HADOOP-1767</a>.  Add "bin/hadoop job -list" sub-command.<br />(taton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1351">HADOOP-1351</a>.  Add "bin/hadoop job [-fail-task|-kill-task]" sub-commands
-to terminate a particular task-attempt.<br />(Enis Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1880">HADOOP-1880</a>. SleepJob : An example job that sleeps at each map and
-reduce task.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1809">HADOOP-1809</a>. Add a link in web site to #hadoop IRC channel.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1894">HADOOP-1894</a>. Add percentage graphs and mapred task completion graphs
-to Web User Interface. Users not using Firefox may install a plugin to
-their browsers to see svg graphics.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1914">HADOOP-1914</a>. Introduce a new NamenodeProtocol to allow secondary
-namenodes and rebalancing processes to communicate with a primary
-namenode.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1963">HADOOP-1963</a>.  Add a FileSystem implementation for the Kosmos
-Filesystem (KFS).<br />(Sriram Rao via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1822">HADOOP-1822</a>.  Allow the specialization and configuration of socket
-factories. Provide a StandardSocketFactory, and a SocksSocketFactory to
-allow the use of SOCKS proxies. (taton).
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1968">HADOOP-1968</a>. FileSystem supports wildcard input syntax "{ }".<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2566">HADOOP-2566</a>. Add globStatus method to the FileSystem interface
-and deprecate globPath and listPath.<br />(Hairong Kuang via hairong)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(8)
-    <ol id="release_0.15.0_-_2007-11-2_._optimizations_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1910">HADOOP-1910</a>.  Reduce the number of RPCs that DistributedFileSystem.create()
-makes to the namenode.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1565">HADOOP-1565</a>.  Reduce memory usage of NameNode by replacing
-TreeMap in HDFS Namespace with ArrayList.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1743">HADOOP-1743</a>.  Change DFS INode from a nested class to standalone
-class, with specialized subclasses for directories and files, to
-save memory on the namenode.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1759">HADOOP-1759</a>.  Change file name in INode from String to byte[],
-saving memory on the namenode.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1766">HADOOP-1766</a>.  Save memory in namenode by having BlockInfo extend
-Block, and replace many uses of Block with BlockInfo.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1687">HADOOP-1687</a>.  Save memory in namenode by optimizing BlockMap
-representation.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1774">HADOOP-1774</a>. Remove use of INode.parent in Block CRC upgrade.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1788">HADOOP-1788</a>.  Increase the buffer size on the Pipes command socket.<br />(Amareshwari Sri Ramadasu and Christian Kunz via omalley)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(64)
-    <ol id="release_0.15.0_-_2007-11-2_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1946">HADOOP-1946</a>.  The Datanode code does not need to invoke du on
-every heartbeat.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1935">HADOOP-1935</a>. Fix a NullPointerException in internalReleaseCreate.<br />(Dhruba Borthakur)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1933">HADOOP-1933</a>. The nodes listed in include and exclude files
-are always listed in the datanode report.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1953">HADOOP-1953</a>. The job tracker should wait beteween calls to try and delete
-the system directory<br />(Owen O'Malley via devaraj)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1932">HADOOP-1932</a>. TestFileCreation fails with message saying filestatus.dat
-is of incorrect size.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1573">HADOOP-1573</a>. Support for 0 reducers in PIPES.<br />(Owen O'Malley via devaraj)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1500">HADOOP-1500</a>. Fix typographical errors in the DFS WebUI.<br />(Nigel Daley via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1076">HADOOP-1076</a>. Periodic checkpoint can continue even if an earlier
-checkpoint encountered an error.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1887">HADOOP-1887</a>. The Namenode encounters an ArrayIndexOutOfBoundsException
-while listing a directory that had a file that was
-being actively written to.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1904">HADOOP-1904</a>. The Namenode encounters an exception because the
-list of blocks per datanode-descriptor was corrupted.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1762">HADOOP-1762</a>. The Namenode fsimage does not contain a list of
-Datanodes.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1890">HADOOP-1890</a>. Removed debugging prints introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-1774">HADOOP-1774</a>.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1763">HADOOP-1763</a>. Too many lost task trackers on large clusters due to
-insufficient number of RPC handler threads on the JobTracker.<br />(Devaraj Das)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1463">HADOOP-1463</a>.  HDFS report correct usage statistics for disk space
-used by HDFS.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1692">HADOOP-1692</a>.  In DFS ant task, don't cache the Configuration.<br />(Chris Douglas via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1726">HADOOP-1726</a>.  Remove lib/jetty-ext/ant.jar.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1772">HADOOP-1772</a>.  Fix hadoop-daemon.sh script to get correct hostname
-under Cygwin.  (Tsz Wo (Nicholas), SZE via cutting)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1749">HADOOP-1749</a>.  Change TestDFSUpgrade to sort files, fixing sporadic
-test failures.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1748">HADOOP-1748</a>.  Fix tasktracker to be able to launch tasks when log
-directory is relative.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1775">HADOOP-1775</a>.  Fix a NullPointerException and an
-IllegalArgumentException in MapWritable.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1795">HADOOP-1795</a>.  Fix so that jobs can generate output file names with
-special characters.<br />(Fr??d??ric Bertin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1810">HADOOP-1810</a>.  Fix incorrect value type in MRBench (SmallJobs)<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1806">HADOOP-1806</a>.  Fix ant task to compile again, also fix default
-builds to compile ant tasks.<br />(Chris Douglas via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1758">HADOOP-1758</a>.  Fix escape processing in librecordio to not be
-quadratic.<br />(Vivek Ratan via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1817">HADOOP-1817</a>.  Fix MultiFileSplit to read and write the split
-length, so that it is not always zero in map tasks.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1853">HADOOP-1853</a>.  Fix contrib/streaming to accept multiple -cacheFile
-options.<br />(Prachi Gupta via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1818">HADOOP-1818</a>. Fix MultiFileInputFormat so that it does not return
-empty splits when numPaths &lt; numSplits.<br />(Thomas Friol via enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1840">HADOOP-1840</a>. Fix race condition which leads to task's diagnostic
-messages getting lost.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1885">HADOOP-1885</a>. Fix race condition in MiniDFSCluster shutdown.<br />(Chris Douglas via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1889">HADOOP-1889</a>.  Fix path in EC2 scripts for building your own AMI.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1892">HADOOP-1892</a>.  Fix a NullPointerException in the JobTracker when
-trying to fetch a task's diagnostic messages from the JobClient.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1897">HADOOP-1897</a>.  Completely remove about.html page from the web site.<br />(enis)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1907">HADOOP-1907</a>.  Fix null pointer exception when getting task diagnostics
-in JobClient.<br />(Christian Kunz via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1882">HADOOP-1882</a>.  Remove spurious asterisks from decimal number displays.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1783">HADOOP-1783</a>.  Make S3 FileSystem return Paths fully-qualified with
-scheme and host.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1925">HADOOP-1925</a>.  Make pipes' autoconf script look for libsocket and libnsl, so
-that it can compile under Solaris.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1940">HADOOP-1940</a>.  TestDFSUpgradeFromImage must shut down its MiniDFSCluster.<br />(Chris Douglas via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1930">HADOOP-1930</a>.  Fix the blame for failed fetchs on the right host.<br />(Arun C.
-Murthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1934">HADOOP-1934</a>.  Fix the platform name on Mac to use underscores rather than
-spaces.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1959">HADOOP-1959</a>.  Use "/" instead of File.separator in the StatusHttpServer.<br />(jimk via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1626">HADOOP-1626</a>.  Improve dfsadmin help messages.<br />(Lohit Vijayarenu via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1695">HADOOP-1695</a>.  The SecondaryNamenode waits for the Primary NameNode to
-start up.<br />(Dhruba Borthakur)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1983">HADOOP-1983</a>.  Have Pipes flush the command socket when progress is sent
-to prevent timeouts during long computations.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1875">HADOOP-1875</a>.  Non-existant directories or read-only directories are
-filtered from dfs.client.buffer.dir.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1992">HADOOP-1992</a>.  Fix the performance degradation in the sort validator.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1874">HADOOP-1874</a>.  Move task-outputs' promotion/discard to a separate thread
-distinct from the main heartbeat-processing thread. The main upside being
-that we do not lock-up the JobTracker during HDFS operations, which
-otherwise may lead to lost tasktrackers if the NameNode is unresponsive.<br />(Devaraj Das via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2026">HADOOP-2026</a>. Namenode prints out one log line for "Number of transactions"
-at most once every minute.<br />(Dhruba Borthakur)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2022">HADOOP-2022</a>.  Ensure that status information for successful tasks is correctly
-recorded at the JobTracker, so that, for example, one may view correct
-information via taskdetails.jsp. This bug was introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-1874">HADOOP-1874</a>.<br />(Amar Kamat via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2031">HADOOP-2031</a>.  Correctly maintain the taskid which takes the TIP to
-completion, failing which the case of lost tasktrackers isn't handled
-properly i.e. the map TIP is incorrectly left marked as 'complete' and it
-is never rescheduled elsewhere, leading to hung reduces.<br />(Devaraj Das via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2018">HADOOP-2018</a>. The source datanode of a data transfer waits for
-a response from the target datanode before closing the data stream.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2023">HADOOP-2023</a>. Disable TestLocalDirAllocator on Windows.<br />(Hairong Kuang via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2016">HADOOP-2016</a>.  Ignore status-updates from FAILED/KILLED tasks at the
-TaskTracker. This fixes a race-condition which caused the tasks to wrongly
-remain in the RUNNING state even after being killed by the JobTracker and
-thus handicap the cleanup of the task's output sub-directory.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1771">HADOOP-1771</a>. Fix a NullPointerException in streaming caused by an
-IOException in MROutputThread.<br />(lohit vijayarenu via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2028">HADOOP-2028</a>. Fix distcp so that the log dir does not need to be
-specified and the destination does not need to exist.<br />(Chris Douglas via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2044">HADOOP-2044</a>. The namenode protects all lease manipulations using a
-sortedLease lock.<br />(Dhruba Borthakur)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2051">HADOOP-2051</a>. The TaskCommit thread should not die for exceptions other
-than the InterruptedException. This behavior is there for the other long
-running threads in the JobTracker.<br />(Arun C Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1973">HADOOP-1973</a>. The FileSystem object would be accessed on the JobTracker
-through a RPC in the InterTrackerProtocol. The check for the object being
-null was missing and hence NPE would be thrown sometimes. This issue fixes
-that problem.<br />(Amareshwari Sri Ramadasu via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2033">HADOOP-2033</a>.  The SequenceFile.Writer.sync method was a no-op, which caused
-very uneven splits for applications like distcp that count on them.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2070">HADOOP-2070</a>.  Added a flush method to pipes' DownwardProtocol and call
-that before waiting for the application to finish to ensure all buffered
-data is flushed.<br />(Owen O'Malley via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2080">HADOOP-2080</a>.  Fixed calculation of the checksum file size when the values
-are large.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2048">HADOOP-2048</a>.  Change error handling in distcp so that each map copies
-as much as possible before reporting the error. Also report progress on
-every copy.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2073">HADOOP-2073</a>.  Change size of VERSION file after writing contents to it.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2102">HADOOP-2102</a>.  Fix the deprecated ToolBase to pass its Configuration object
-to the superceding ToolRunner to ensure it picks up the appropriate
-configuration resources.<br />(Dennis Kubes and Enis Soztutar via acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2103">HADOOP-2103</a>.  Fix minor javadoc bugs introduce by <a href="http://issues.apache.org/jira/browse/HADOOP-2046">HADOOP-2046</a>.<br />(Nigel
-Daley via acmurthy)</li>
-    </ol>
-  </li>
-  <li><a href="javascript:toggleList('release_0.15.0_-_2007-11-2_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(37)
-    <ol id="release_0.15.0_-_2007-11-2_._improvements_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1908">HADOOP-1908</a>. Restructure data node code so that block sending and
-receiving are seperated from data transfer header handling.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1921">HADOOP-1921</a>. Save the configuration of completed/failed jobs and make them
-available via the web-ui.<br />(Amar Kamat via devaraj)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1266">HADOOP-1266</a>. Remove dependency of package org.apache.hadoop.net on
-org.apache.hadoop.dfs.<br />(Hairong Kuang via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1779">HADOOP-1779</a>. Replace INodeDirectory.getINode() by a getExistingPathINodes()
-to allow the retrieval of all existing INodes along a given path in a
-single lookup. This facilitates removal of the 'parent' field in the
-inode.<br />(Christophe Taton via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1756">HADOOP-1756</a>. Add toString() to some Writable-s.<br />(ab)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1727">HADOOP-1727</a>.  New classes: MapWritable and SortedMapWritable.<br />(Jim Kellerman via ab)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1651">HADOOP-1651</a>.  Improve progress reporting.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1595">HADOOP-1595</a>.  dfsshell can wait for a file to achieve its intended
-replication target. (Tsz Wo (Nicholas), SZE via dhruba)
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1693">HADOOP-1693</a>.  Remove un-needed log fields in DFS replication classes,
-since the log may be accessed statically.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1231">HADOOP-1231</a>.  Add generics to Mapper and Reducer interfaces.<br />(tomwhite via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1436">HADOOP-1436</a>.  Improved command-line APIs, so that all tools need
-not subclass ToolBase, and generic parameter parser is public.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1703">HADOOP-1703</a>.  DFS-internal code cleanups, removing several uses of
-the obsolete UTF8.<br />(Christophe Taton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1731">HADOOP-1731</a>.  Add Hadoop's version to contrib jar file names.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1689">HADOOP-1689</a>.  Make shell scripts more portable.  All shell scripts
-now explicitly depend on bash, but do not require that bash be
-installed in a particular location, as long as it is on $PATH.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1744">HADOOP-1744</a>.  Remove many uses of the deprecated UTF8 class from
-the HDFS namenode.<br />(Christophe Taton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1654">HADOOP-1654</a>.  Add IOUtils class, containing generic io-related
-utility methods.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1158">HADOOP-1158</a>.  Change JobTracker to record map-output transmission
-errors and use them to trigger speculative re-execution of tasks.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1601">HADOOP-1601</a>.  Change GenericWritable to use ReflectionUtils for
-instance creation, avoiding classloader issues, and to implement
-Configurable.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1750">HADOOP-1750</a>.  Log standard output and standard error when forking
-task processes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1803">HADOOP-1803</a>.  Generalize build.xml to make files in all
-src/contrib/*/bin directories executable.<br />(stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1739">HADOOP-1739</a>.  Let OS always choose the tasktracker's umbilical
-port.  Also switch default address for umbilical connections to
-loopback.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1812">HADOOP-1812</a>. Let OS choose ports for IPC and RPC unit tests.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1825">HADOOP-1825</a>.  Create $HADOOP_PID_DIR when it does not exist.<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1425">HADOOP-1425</a>.  Replace uses of ToolBase with the Tool interface.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1569">HADOOP-1569</a>.  Reimplement DistCP to use the standard FileSystem/URI
-code in Hadoop so that you can copy from and to all of the supported file
-systems.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1018">HADOOP-1018</a>.  Improve documentation w.r.t handling of lost hearbeats between
-TaskTrackers and JobTracker.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1718">HADOOP-1718</a>.  Add ant targets for measuring code coverage with clover.<br />(simonwillnauer via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1592">HADOOP-1592</a>.  Log error messages to the client console when tasks
-fail.<br />(Amar Kamat via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1879">HADOOP-1879</a>.  Remove some unneeded casts.<br />(Nilay Vaish via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1878">HADOOP-1878</a>.  Add space between priority links on job details
-page.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-120">HADOOP-120</a>.  In ArrayWritable, prevent creation with null value
-class, and improve documentation.<br />(Cameron Pope via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1926">HADOOP-1926</a>. Add a random text writer example/benchmark so that we can
-benchmark compression codecs on random data.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1906">HADOOP-1906</a>. Warn the user if they have an obsolete madred-default.xml
-file in their configuration directory.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1971">HADOOP-1971</a>.  Warn when job does not specify a jar.<br />(enis via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1942">HADOOP-1942</a>. Increase the concurrency of transaction logging to
-edits log. Reduce the number of syncs by double-buffering the changes
-to the transaction log.<br />(Dhruba Borthakur)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2046">HADOOP-2046</a>.  Improve mapred javadoc.<br />(Arun C. Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2105">HADOOP-2105</a>.  Improve overview.html to clarify supported platforms,
-software pre-requisites for hadoop, how to install them on various
-platforms and a better general description of hadoop and it's utility.<br />(Jim Kellerman via acmurthy)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.14.4_-_2007-11-26_')">Release 0.14.4 - 2007-11-26
-</a></h3>
-<ul id="release_0.14.4_-_2007-11-26_">
-  <li><a href="javascript:toggleList('release_0.14.4_-_2007-11-26_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(3)
-    <ol id="release_0.14.4_-_2007-11-26_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2140">HADOOP-2140</a>.  Add missing Apache Licensing text at the front of several
-C and C++ files.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2169">HADOOP-2169</a>.  Fix the DT_SONAME field of libhdfs.so to set it to the
-correct value of 'libhdfs.so', currently it is set to the absolute path of
-libhdfs.so.<br />(acmurthy)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2001">HADOOP-2001</a>.  Make the job priority updates and job kills synchronized on
-the JobTracker. Deadlock was seen in the JobTracker because of the lack of
-this synchronization.<br />(Arun C Murthy via ddas)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.14.3_-_2007-10-19_')">Release 0.14.3 - 2007-10-19
-</a></h3>
-<ul id="release_0.14.3_-_2007-10-19_">
-  <li><a href="javascript:toggleList('release_0.14.3_-_2007-10-19_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(3)
-    <ol id="release_0.14.3_-_2007-10-19_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2053">HADOOP-2053</a>. Fixed a dangling reference to a memory buffer in the map
-output sorter.<br />(acmurthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2036">HADOOP-2036</a>. Fix a NullPointerException in JvmMetrics class.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2043">HADOOP-2043</a>. Release 0.14.2 was compiled with Java 1.6 rather than
-Java 1.5.<br />(cutting)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.14.2_-_2007-10-09_')">Release 0.14.2 - 2007-10-09
-</a></h3>
-<ul id="release_0.14.2_-_2007-10-09_">
-  <li><a href="javascript:toggleList('release_0.14.2_-_2007-10-09_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(9)
-    <ol id="release_0.14.2_-_2007-10-09_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1948">HADOOP-1948</a>. Removed spurious error message during block crc upgrade.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1862">HADOOP-1862</a>.  reduces are getting stuck trying to find map outputs.<br />(Arun C. Murthy via ddas)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1977">HADOOP-1977</a>. Fixed handling of ToolBase cli options in JobClient.<br />(enis via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1972">HADOOP-1972</a>.  Fix LzoCompressor to ensure the user has actually asked
-to finish compression.<br />(arun via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1970">HADOOP-1970</a>.  Fix deadlock in progress reporting in the task.<br />(Vivek
-Ratan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1978">HADOOP-1978</a>.  Name-node removes edits.new after a successful startup.<br />(Konstantin Shvachko via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1955">HADOOP-1955</a>.  The Namenode tries to not pick the same source Datanode for
-a replication request if the earlier replication request for the same
-block and that source Datanode had failed.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1961">HADOOP-1961</a>.  The -get option to dfs-shell works when a single filename
-is specified.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1997">HADOOP-1997</a>.  TestCheckpoint closes the edits file after writing to it,
-otherwise the rename of this file on Windows fails.<br />(Konstantin Shvachko via dhruba)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.14.1_-_2007-09-04_')">Release 0.14.1 - 2007-09-04
-</a></h3>
-<ul id="release_0.14.1_-_2007-09-04_">
-  <li><a href="javascript:toggleList('release_0.14.1_-_2007-09-04_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(3)
-    <ol id="release_0.14.1_-_2007-09-04_._bug_fixes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1740">HADOOP-1740</a>.  Fix null pointer exception in sorting map outputs.<br />(Devaraj
-Das via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1790">HADOOP-1790</a>.  Fix tasktracker to work correctly on multi-homed
-boxes.<br />(Torsten Curdt via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1798">HADOOP-1798</a>.  Fix jobtracker to correctly account for failed
-tasks.<br />(omalley via cutting)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.14.0_-_2007-08-17_')">Release 0.14.0 - 2007-08-17
-</a></h3>
-<ul id="release_0.14.0_-_2007-08-17_">
-  <li><a href="javascript:toggleList('release_0.14.0_-_2007-08-17_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(160)
-    <ol id="release_0.14.0_-_2007-08-17_._incompatible_changes_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.
-CONFIG/API - dfs.block.size must now be a multiple of
-  io.byte.per.checksum, otherwise new files can not be written.
-LAYOUT - DFS layout version changed from -6 to -7, which will require an
-  upgrade from previous versions.
-PROTOCOL - Datanode RPC protocol version changed from 7 to 8.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1283">HADOOP-1283</a>
-API - deprecated file locking API.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-894">HADOOP-894</a>
-PROTOCOL - changed ClientProtocol to fetch parts of block locations.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1336">HADOOP-1336</a>
-CONFIG - Enable speculative execution by default.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1197">HADOOP-1197</a>
-API - deprecated method for Configuration.getObject, because
-  Configurations should only contain strings.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1343">HADOOP-1343</a>
-API - deprecate Configuration.set(String,Object) so that only strings are
-  put in Configrations.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1207">HADOOP-1207</a>
-CLI - Fix FsShell 'rm' command to continue when a non-existent file is
-  encountered.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1473">HADOOP-1473</a>
-CLI/API - Job, TIP, and Task id formats have changed and are now unique
-  across job tracker restarts.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1400">HADOOP-1400</a>
-API - JobClient constructor now takes a JobConf object instead of a
-  Configuration object.
-<p/>
-  NEW FEATURES and BUG FIXES
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1197">HADOOP-1197</a>.  In Configuration, deprecate getObject() and add
-getRaw(), which skips variable expansion.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1343">HADOOP-1343</a>.  In Configuration, deprecate set(String,Object) and
-implement Iterable.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1344">HADOOP-1344</a>.  Add RunningJob#getJobName().<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1342">HADOOP-1342</a>.  In aggregators, permit one to limit the number of
-unique values per key.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1340">HADOOP-1340</a>.  Set the replication factor of the MD5 file in the filecache
-to be the same as the replication factor of the original file.<br />(Dhruba Borthakur via tomwhite.)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1355">HADOOP-1355</a>.  Fix null pointer dereference in
-TaskLogAppender.append(LoggingEvent).<br />(Arun C Murthy via tomwhite.)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1357">HADOOP-1357</a>.  Fix CopyFiles to correctly avoid removing "/".<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-234">HADOOP-234</a>.  Add pipes facility, which permits writing MapReduce
-programs in C++.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1359">HADOOP-1359</a>.  Fix a potential NullPointerException in HDFS.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1364">HADOOP-1364</a>.  Fix inconsistent synchronization in SequenceFile.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1379">HADOOP-1379</a>.  Add findbugs target to build.xml.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1364">HADOOP-1364</a>.  Fix various inconsistent synchronization issues.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1393">HADOOP-1393</a>.  Remove a potential unexpected negative number from
-uses of random number generator.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1387">HADOOP-1387</a>.  A number of "performance" code-cleanups suggested
-by findbugs.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1401">HADOOP-1401</a>.  Add contrib/hbase javadoc to tree.<br />(stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-894">HADOOP-894</a>.  Change HDFS so that the client only retrieves a limited
-number of block locations per request from the namenode.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1406">HADOOP-1406</a>.  Plug a leak in MapReduce's use of metrics.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1394">HADOOP-1394</a>.  Implement "performance" code-cleanups in HDFS
-suggested by findbugs.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1413">HADOOP-1413</a>.  Add example program that uses Knuth's dancing links
-algorithm to solve pentomino problems.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1226">HADOOP-1226</a>.  Change HDFS so that paths it returns are always
-fully qualified.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-800">HADOOP-800</a>.  Improvements to HDFS web-based file browser.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1408">HADOOP-1408</a>.  Fix a compiler warning by adding a class to replace
-a generic.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1376">HADOOP-1376</a>.  Modify RandomWriter example so that it can generate
-data for the Terasort benchmark.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1429">HADOOP-1429</a>.  Stop logging exceptions during normal IPC server
-shutdown.<br />(stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1461">HADOOP-1461</a>.  Fix the synchronization of the task tracker to
-avoid lockups in job cleanup.<br />(Arun C Murthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1446">HADOOP-1446</a>.  Update the TaskTracker metrics while the task is
-running.<br />(Devaraj via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1414">HADOOP-1414</a>.  Fix a number of issues identified by FindBugs as
-"Bad Practice".<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1392">HADOOP-1392</a>.  Fix "correctness" bugs identified by FindBugs in
-fs and dfs packages.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1412">HADOOP-1412</a>.  Fix "dodgy" bugs identified by FindBugs in fs and
-io packages.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1261">HADOOP-1261</a>.  Remove redundant events from HDFS namenode's edit
-log when a datanode restarts.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1336">HADOOP-1336</a>.  Re-enable speculative execution by
-default.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1311">HADOOP-1311</a>.  Fix a bug in BytesWritable#set() where start offset
-was ignored.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1450">HADOOP-1450</a>.  Move checksumming closer to user code, so that
-checksums are created before data is stored in large buffers and
-verified after data is read from large buffers, to better catch
-memory errors.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1447">HADOOP-1447</a>.  Add support in contrib/data_join for text inputs.<br />(Senthil Subramanian via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1456">HADOOP-1456</a>.  Fix TestDecommission assertion failure by setting
-the namenode to ignore the load on datanodes while allocating
-replicas.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1396">HADOOP-1396</a>.  Fix FileNotFoundException on DFS block.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1467">HADOOP-1467</a>.  Remove redundant counters from WordCount example.<br />(Owen O'Malley via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1139">HADOOP-1139</a>.  Log HDFS block transitions at INFO level, to better
-enable diagnosis of problems.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1269">HADOOP-1269</a>.  Finer grained locking in HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1438">HADOOP-1438</a>.  Improve HDFS documentation, correcting typos and
-making images appear in PDF.  Also update copyright date for all
-docs.<br />(Luke Nezda via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1457">HADOOP-1457</a>.  Add counters for monitoring task assignments.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1472">HADOOP-1472</a>.  Fix so that timed-out tasks are counted as failures
-rather than as killed.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1234">HADOOP-1234</a>.  Fix a race condition in file cache that caused
-tasktracker to not be able to find cached files.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1482">HADOOP-1482</a>.  Fix secondary namenode to roll info port.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1300">HADOOP-1300</a>.  Improve removal of excess block replicas to be
-rack-aware.  Attempts are now made to keep replicas on more
-racks.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1417">HADOOP-1417</a>.  Disable a few FindBugs checks that generate a lot
-of spurious warnings.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1320">HADOOP-1320</a>.  Rewrite RandomWriter example to bypass reduce.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1449">HADOOP-1449</a>.  Add some examples to contrib/data_join.<br />(Senthil Subramanian via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1459">HADOOP-1459</a>.  Fix so that, in HDFS, getFileCacheHints() returns
-hostnames instead of IP addresses.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1493">HADOOP-1493</a>.  Permit specification of "java.library.path" system
-property in "mapred.child.java.opts" configuration property.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1372">HADOOP-1372</a>.  Use LocalDirAllocator for HDFS temporary block
-files, so that disk space, writability, etc. is considered.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1193">HADOOP-1193</a>.  Pool allocation of compression codecs.  This
-eliminates a memory leak that could cause OutOfMemoryException,
-and also substantially improves performance.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1492">HADOOP-1492</a>.  Fix a NullPointerException handling version
-mismatch during datanode registration.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1442">HADOOP-1442</a>.  Fix handling of zero-length input splits.<br />(Senthil Subramanian via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1444">HADOOP-1444</a>.  Fix HDFS block id generation to check pending
-blocks for duplicates.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1207">HADOOP-1207</a>.  Fix FsShell's 'rm' command to not stop when one of
-the named files does not exist.<br />(Tsz Wo Sze via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1475">HADOOP-1475</a>.  Clear tasktracker's file cache before it
-re-initializes, to avoid confusion.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1505">HADOOP-1505</a>.  Remove spurious stacktrace in ZlibFactory
-introduced in <a href="http://issues.apache.org/jira/browse/HADOOP-1093">HADOOP-1093</a>.<br />(Michael Stack via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1484">HADOOP-1484</a>.  Permit one to kill jobs from the web ui.  Note that
-this is disabled by default.  One must set
-"webinterface.private.actions" to enable this.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1003">HADOOP-1003</a>.  Remove flushing of namenode edit log from primary
-namenode lock, increasing namenode throughput.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1023">HADOOP-1023</a>.  Add links to searchable mail archives.<br />(tomwhite via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1504">HADOOP-1504</a>.  Fix terminate-hadoop-cluster script in contrib/ec2
-to only terminate Hadoop instances, and not other instances
-started by the same user.<br />(tomwhite via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1462">HADOOP-1462</a>.  Improve task progress reporting.  Progress reports
-are no longer blocking since i/o is performed in a separate
-thread.  Reporting during sorting and more is also more
-consistent.<br />(Vivek Ratan via cutting)</li>
-      <li>[ intentionally blank ]
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1453">HADOOP-1453</a>.  Remove some unneeded calls to FileSystem#exists()
-when opening files, reducing the namenode load somewhat.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1489">HADOOP-1489</a>.  Fix text input truncation bug due to mark/reset.
-Add a unittest.<br />(Bwolen Yang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1455">HADOOP-1455</a>.  Permit specification of arbitrary job options on
-pipes command line.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1501">HADOOP-1501</a>.  Better randomize sending of block reports to
-namenode, so reduce load spikes.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1147">HADOOP-1147</a>.  Remove @author tags from Java source files.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1283">HADOOP-1283</a>.  Convert most uses of UTF8 in the namenode to be
-String.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1511">HADOOP-1511</a>.  Speedup hbase unit tests.<br />(stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1517">HADOOP-1517</a>.  Remove some synchronization in namenode to permit
-finer grained locking previously added.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1512">HADOOP-1512</a>.  Fix failing TestTextInputFormat on Windows.<br />(Senthil Subramanian via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1518">HADOOP-1518</a>.  Add a session id to job metrics, for use by HOD.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1292">HADOOP-1292</a>.  Change 'bin/hadoop fs -get' to first copy files to
-a temporary name, then rename them to their final name, so that
-failures don't leave partial files.<br />(Tsz Wo Sze via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1377">HADOOP-1377</a>.  Add support for modification time to FileSystem and
-implement in HDFS and local implementations.  Also, alter access
-to file properties to be through a new FileStatus interface.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1515">HADOOP-1515</a>.  Add MultiFileInputFormat, which can pack multiple,
-typically small, input files into each split.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1514">HADOOP-1514</a>.  Make reducers report progress while waiting for map
-outputs, so they're not killed.<br />(Vivek Ratan via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1508">HADOOP-1508</a>.  Add an Ant task for FsShell operations.  Also add
-new FsShell commands "touchz", "test" and "stat".<br />(Chris Douglas via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1028">HADOOP-1028</a>.  Add log messages for server startup and shutdown.<br />(Tsz Wo Sze via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1485">HADOOP-1485</a>.  Add metrics for monitoring shuffle.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1536">HADOOP-1536</a>.  Remove file locks from libhdfs tests.<br />(Dhruba Borthakur via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1520">HADOOP-1520</a>.  Add appropriate synchronization to FSEditsLog.<br />(Dhruba Borthakur via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1513">HADOOP-1513</a>.  Fix a race condition in directory creation.<br />(Devaraj via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1546">HADOOP-1546</a>.  Remove spurious column from HDFS web UI.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1556">HADOOP-1556</a>.  Make LocalJobRunner delete working files at end of
-job run.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1571">HADOOP-1571</a>.  Add contrib lib directories to root build.xml
-javadoc classpath.<br />(Michael Stack via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1554">HADOOP-1554</a>.  Log killed tasks to the job history and display them on the
-web/ui.<br />(Devaraj Das via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1533">HADOOP-1533</a>.  Add persistent error logging for distcp. The logs are stored
-    into a specified hdfs directory.<br />(Senthil Subramanian via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1286">HADOOP-1286</a>.  Add support to HDFS for distributed upgrades, which
-permits coordinated upgrade of datanode data.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1580">HADOOP-1580</a>.  Improve contrib/streaming so that subprocess exit
-status is displayed for errors.<br />(John Heidemann via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1448">HADOOP-1448</a>.  In HDFS, randomize lists of non-local block
-locations returned to client, so that load is better balanced.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1578">HADOOP-1578</a>.  Fix datanode to send its storage id to namenode
-during registration.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1584">HADOOP-1584</a>.  Fix a bug in GenericWritable which limited it to
-128 types instead of 256.<br />(Espen Amble Kolstad via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1473">HADOOP-1473</a>.  Make job ids unique across jobtracker restarts.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1582">HADOOP-1582</a>.  Fix hdfslib to return 0 instead of -1 at
-end-of-file, per C conventions.<br />(Christian Kunz via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-911">HADOOP-911</a>.  Fix a multithreading bug in libhdfs.<br />(Christian Kunz)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1486">HADOOP-1486</a>.  Fix so that fatal exceptions in namenode cause it
-to exit.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1470">HADOOP-1470</a>.  Factor checksum generation and validation out of
-ChecksumFileSystem so that it can be reused by FileSystem's with
-built-in checksumming.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1590">HADOOP-1590</a>.  Use relative urls in jobtracker jsp pages, so that
-webapp can be used in non-root contexts.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1596">HADOOP-1596</a>.  Fix the parsing of taskids by streaming and improve the
-error reporting.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1535">HADOOP-1535</a>.  Fix the user-controlled grouping to the reduce function.<br />(Vivek Ratan via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1585">HADOOP-1585</a>.  Modify GenericWritable to declare the classes as subtypes
-of Writable<br />(Espen Amble Kolstad via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1576">HADOOP-1576</a>.  Fix errors in count of completed tasks when
-speculative execution is enabled.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1598">HADOOP-1598</a>.  Fix license headers: adding missing; updating old.<br />(Enis Soztutar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1547">HADOOP-1547</a>.  Provide examples for aggregate library.<br />(Runping Qi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1570">HADOOP-1570</a>.  Permit jobs to enable and disable the use of
-hadoop's native library.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1433">HADOOP-1433</a>.  Add job priority.<br />(Johan Oskarsson via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1597">HADOOP-1597</a>.  Add status reports and post-upgrade options to HDFS
-distributed upgrade.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1524">HADOOP-1524</a>.  Permit user task logs to appear as they're
-created.<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1599">HADOOP-1599</a>.  Fix distcp bug on Windows.<br />(Senthil Subramanian via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1562">HADOOP-1562</a>.  Add JVM metrics, including GC and logging stats.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1613">HADOOP-1613</a>.  Fix "DFS Health" page to display correct time of
-last contact.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.  Add optimized checksum support to HDFS.  Checksums
-are now stored with each block, rather than as parallel files.
-This reduces the namenode's memory requirements and increases
-data integrity.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1400">HADOOP-1400</a>.  Make JobClient retry requests, so that clients can
-survive jobtracker problems.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1564">HADOOP-1564</a>.  Add unit tests for HDFS block-level checksums.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1620">HADOOP-1620</a>.  Reduce the number of abstract FileSystem methods,
-simplifying implementations.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1625">HADOOP-1625</a>.  Fix a "could not move files" exception in datanode.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1624">HADOOP-1624</a>.  Fix an infinite loop in datanode.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1084">HADOOP-1084</a>.  Switch mapred file cache to use file modification
-time instead of checksum to detect file changes, as checksums are
-no longer easily accessed.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1623">HADOOP-1623</a>.  Fix an infinite loop when copying directories.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1603">HADOOP-1603</a>.  Fix a bug in namenode initialization where
-default replication is sometimes reset to one on restart.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1635">HADOOP-1635</a>.  Remove hardcoded keypair name and fix launch-hadoop-cluster
-to support later versions of ec2-api-tools.<br />(Stu Hood via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1638">HADOOP-1638</a>.  Fix contrib EC2 scripts to support NAT addressing.<br />(Stu Hood via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1632">HADOOP-1632</a>.  Fix an IllegalArgumentException in fsck.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1619">HADOOP-1619</a>.  Fix FSInputChecker to not attempt to read past EOF.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1640">HADOOP-1640</a>.  Fix TestDecommission on Windows.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1587">HADOOP-1587</a>.  Fix TestSymLink to get required system properties.<br />(Devaraj Das via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1628">HADOOP-1628</a>.  Add block CRC protocol unit tests.<br />(Raghu Angadi via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1653">HADOOP-1653</a>.  FSDirectory code-cleanups. FSDirectory.INode
-becomes a static class.<br />(Christophe Taton via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1066">HADOOP-1066</a>.  Restructure documentation to make more user
-friendly.<br />(Connie Kleinjans and Jeff Hammerbacher via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1551">HADOOP-1551</a>.  libhdfs supports setting replication factor and
-retrieving modification time of files.<br />(Sameer Paranjpye via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1647">HADOOP-1647</a>.  FileSystem.getFileStatus returns valid values for "/".<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1657">HADOOP-1657</a>.  Fix NNBench to ensure that the block size is a
-multiple of bytes.per.checksum.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1553">HADOOP-1553</a>.  Replace user task output and log capture code to use shell
-redirection instead of copier threads in the TaskTracker. Capping the
-size of the output is now done via tail in memory and thus should not be
-large. The output of the tasklog servlet is not forced into UTF8 and is
-not buffered entirely in memory. (omalley)
-Configuration changes to hadoop-default.xml:
-  remove mapred.userlog.num.splits
-  remove mapred.userlog.purge.splits
-  change default mapred.userlog.limit.kb to 0 (no limit)
-  change default mapred.userlog.retain.hours to 24
-Configuration changes to log4j.properties:
-  remove log4j.appender.TLA.noKeepSplits
-  remove log4j.appender.TLA.purgeLogSplits
-  remove log4j.appender.TLA.logsRetainHours
-URL changes:
-  http://&lt;tasktracker&gt;/tasklog.jsp -&gt; http://&lt;tasktracker&gt;tasklog with
-    parameters limited to start and end, which may be positive (from
-    start) or negative (from end).
-Environment:
-  require bash (v2 or later) and tail
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1659">HADOOP-1659</a>.  Fix a job id/job name mixup.<br />(Arun C. Murthy via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1665">HADOOP-1665</a>.  With HDFS Trash enabled and the same file was created
-and deleted more than once, the suceeding deletions creates Trash item
-names suffixed with a integer.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1666">HADOOP-1666</a>.  FsShell object can be used for multiple fs commands.<br />(Dhruba Borthakur via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1654">HADOOP-1654</a>.  Remove performance regression introduced by Block CRC.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1680">HADOOP-1680</a>.  Improvements to Block CRC upgrade messages.<br />(Raghu Angadi via dhruba)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-71">HADOOP-71</a>.  Allow Text and SequenceFile Map/Reduce inputs from non-default
-filesystems.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1568">HADOOP-1568</a>.  Expose HDFS as xml/http filesystem to provide cross-version
-compatability.<br />(Chris Douglas via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1668">HADOOP-1668</a>.  Added an INCOMPATIBILITY section to CHANGES.txt.<br />(nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1629">HADOOP-1629</a>.  Added a upgrade test for <a href="http://issues.apache.org/jira/browse/HADOOP-1134">HADOOP-1134</a>.<br />(Raghu Angadi via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1698">HADOOP-1698</a>.  Fix performance problems on map output sorting for jobs
-with large numbers of reduces.<br />(Devaraj Das via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1716">HADOOP-1716</a>.  Fix a Pipes wordcount example to remove the 'file:'
-schema from its output path.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1714">HADOOP-1714</a>.  Fix TestDFSUpgradeFromImage to work on Windows.<br />(Raghu Angadi via nigel)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1663">HADOOP-1663</a>.  Return a non-zero exit code if streaming fails.<br />(Lohit Renu
-via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1712">HADOOP-1712</a>.  Fix an unhandled exception on datanode during block
-CRC upgrade.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1717">HADOOP-1717</a>.  Fix TestDFSUpgradeFromImage to work on Solaris.<br />(nigel via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1437">HADOOP-1437</a>.  Add Eclipse plugin in contrib.<br />(Eugene Hung and Christophe Taton via cutting)</li>
-    </ol>
-  </li>
-</ul>
-<h3><a href="javascript:toggleList('release_0.13.0_-_2007-06-08_')">Release 0.13.0 - 2007-06-08
-</a></h3>
-    <ol id="release_0.13.0_-_2007-06-08_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1047">HADOOP-1047</a>.  Fix TestReplication to succeed more reliably.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1063">HADOOP-1063</a>.  Fix a race condition in MiniDFSCluster test code.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1101">HADOOP-1101</a>.  In web ui, split shuffle statistics from reduce
-statistics, and add some task averages.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1071">HADOOP-1071</a>.  Improve handling of protocol version mismatch in
-JobTracker.<br />(Tahir Hashmi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1116">HADOOP-1116</a>.  Increase heap size used for contrib unit tests.<br />(Philippe Gassmann via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1120">HADOOP-1120</a>.  Add contrib/data_join, tools to simplify joining
-data from multiple sources using MapReduce.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1064">HADOOP-1064</a>.  Reduce log level of some DFSClient messages.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1137">HADOOP-1137</a>.  Fix StatusHttpServer to work correctly when
-resources are in a jar file.<br />(Benjamin Reed via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1094">HADOOP-1094</a>.  Optimize generated Writable implementations for
-records to not allocate a new BinaryOutputArchive or
-BinaryInputArchive per call.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1068">HADOOP-1068</a>.  Improve error message for clusters with 0 datanodes.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1122">HADOOP-1122</a>.  Fix divide-by-zero exception in FSNamesystem
-chooseTarget method.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1131">HADOOP-1131</a>.  Add a closeAll() static method to FileSystem.<br />(Philippe Gassmann via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1085">HADOOP-1085</a>.  Improve port selection in HDFS and MapReduce test
-code.  Ports are now selected by the OS during testing rather than
-by probing for free ports, improving test reliability.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1153">HADOOP-1153</a>.  Fix HDFS daemons to correctly stop their threads.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1146">HADOOP-1146</a>.  Add a counter for reduce input keys and rename the
-"reduce input records" counter to be "reduce input groups".<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1165">HADOOP-1165</a>.  In records, replace idential generated toString
-methods with a method on the base class.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1164">HADOOP-1164</a>.  Fix TestReplicationPolicy to specify port zero, so
-that a free port is automatically selected.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1166">HADOOP-1166</a>.  Add a NullOutputFormat and use it in the
-RandomWriter example.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1169">HADOOP-1169</a>.  Fix a cut/paste error in CopyFiles utility so that
-S3-based source files are correctly copied.<br />(Michael Stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1167">HADOOP-1167</a>.  Remove extra synchronization in InMemoryFileSystem.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1110">HADOOP-1110</a>.  Fix an off-by-one error counting map inputs.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1178">HADOOP-1178</a>.  Fix a NullPointerException during namenode startup.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1011">HADOOP-1011</a>.  Fix a ConcurrentModificationException when viewing
-job history.<br />(Tahir Hashmi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-672">HADOOP-672</a>.  Improve help for fs shell commands.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1170">HADOOP-1170</a>.  Improve datanode performance by removing device
-checks from common operations.<br />(Igor Bolotin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1090">HADOOP-1090</a>.  Fix SortValidator's detection of whether the input
-file belongs to the sort-input or sort-output directory.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1081">HADOOP-1081</a>.  Fix bin/hadoop on Darwin.<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1045">HADOOP-1045</a>.  Add contrib/hbase, a BigTable-like online database.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1156">HADOOP-1156</a>.  Fix a NullPointerException in MiniDFSCluster.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-702">HADOOP-702</a>.  Add tools to help automate HDFS upgrades.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1163">HADOOP-1163</a>.  Fix ganglia metrics to aggregate metrics from different
-hosts properly.<br />(Michael Bieniosek via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1194">HADOOP-1194</a>.  Make compression style record level for map output
-compression.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1187">HADOOP-1187</a>.  Improve DFS Scalability: avoid scanning entire list of
-datanodes in getAdditionalBlocks.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1133">HADOOP-1133</a>.  Add tool to analyze and debug namenode on a production
-cluster.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1151">HADOOP-1151</a>.  Remove spurious printing to stderr in streaming
-PipeMapRed.<br />(Koji Noguchi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-988">HADOOP-988</a>.  Change namenode to use a single map of blocks to metadata.<br />(Raghu Angadi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1203">HADOOP-1203</a>.  Change UpgradeUtilities used by DFS tests to use
-MiniDFSCluster to start and stop NameNode/DataNodes.<br />(Nigel Daley via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1217">HADOOP-1217</a>.  Add test.timeout property to build.xml, so that
-long-running unit tests may be automatically terminated.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1149">HADOOP-1149</a>.  Improve DFS Scalability: make
-processOverReplicatedBlock() a no-op if blocks are not
-over-replicated.<br />(Raghu Angadi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1149">HADOOP-1149</a>.  Improve DFS Scalability: optimize getDistance(),
-contains(), and isOnSameRack() in NetworkTopology.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1218">HADOOP-1218</a>.  Make synchronization on TaskTracker's RunningJob
-object consistent.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1219">HADOOP-1219</a>.  Ignore progress report once a task has reported as
-'done'.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1114">HADOOP-1114</a>.  Permit user to specify additional CLASSPATH elements
-with a HADOOP_CLASSPATH environment variable.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1198">HADOOP-1198</a>.  Remove ipc.client.timeout parameter override from
-unit test configuration.  Using the default is more robust and
-has almost the same run time.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1211">HADOOP-1211</a>.  Remove deprecated constructor and unused static
-members in DataNode class.<br />(Konstantin Shvachko via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1136">HADOOP-1136</a>.  Fix ArrayIndexOutOfBoundsException in
-FSNamesystem$UnderReplicatedBlocks add() method.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-978">HADOOP-978</a>.  Add the client name and the address of the node that
-previously started to create the file to the description of
-AlreadyBeingCreatedException.<br />(Konstantin Shvachko via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1001">HADOOP-1001</a>.  Check the type of keys and values generated by the
-mapper against the types specified in JobConf.<br />(Tahir Hashmi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-971">HADOOP-971</a>.  Improve DFS Scalability: Improve name node performance
-by adding a hostname to datanodes map.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1189">HADOOP-1189</a>.  Fix 'No space left on device' exceptions on datanodes.<br />(Raghu Angadi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-819">HADOOP-819</a>.  Change LineRecordWriter to not insert a tab between
-key and value when either is null, and to print nothing when both
-are null.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1204">HADOOP-1204</a>.  Rename InputFormatBase to be FileInputFormat, and
-deprecate InputFormatBase.  Also make LineRecordReader easier to
-extend.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1213">HADOOP-1213</a>.  Improve logging of errors by IPC server, to
-consistently include the service name and the call.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1238">HADOOP-1238</a>.  Fix metrics reporting by TaskTracker to correctly
-track maps_running and reduces_running.<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1093">HADOOP-1093</a>.  Fix a race condition in HDFS where blocks were
-sometimes erased before they were reported written.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1239">HADOOP-1239</a>.  Add a package name to some testjar test classes.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1241">HADOOP-1241</a>.  Fix NullPointerException in processReport when
-namenode is restarted.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1244">HADOOP-1244</a>.  Fix stop-dfs.sh to no longer incorrectly specify
-slaves file for stopping datanode.<br />(Michael Bieniosek via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1253">HADOOP-1253</a>.  Fix ConcurrentModificationException and
-NullPointerException in JobControl.<br />(Johan Oskarson via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1256">HADOOP-1256</a>.  Fix NameNode so that multiple DataNodeDescriptors
-can no longer be created on startup.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1214">HADOOP-1214</a>.  Replace streaming classes with new counterparts
-from Hadoop core.<br />(Runping Qi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1250">HADOOP-1250</a>.  Move a chmod utility from streaming to FileUtil.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1258">HADOOP-1258</a>.  Fix TestCheckpoint test case to wait for
-MiniDFSCluster to be active.<br />(Nigel Daley via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1148">HADOOP-1148</a>.  Re-indent all Java source code to consistently use
-two spaces per indent level.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1251">HADOOP-1251</a>.  Add a method to Reporter to get the map InputSplit.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1224">HADOOP-1224</a>.  Fix "Browse the filesystem" link to no longer point
-to dead datanodes.<br />(Enis Soztutar via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1154">HADOOP-1154</a>.  Fail a streaming task if the threads reading from or
-writing to the streaming process fail.<br />(Koji Noguchi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-968">HADOOP-968</a>.  Move shuffle and sort to run in reduce's child JVM,
-rather than in TaskTracker.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1111">HADOOP-1111</a>.  Add support for client notification of job
-completion. If the job configuration has a job.end.notification.url
-property it will make a HTTP GET request to the specified URL.
-The number of retries and the interval between retries is also
-configurable.<br />(Alejandro Abdelnur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1275">HADOOP-1275</a>.  Fix misspelled job notification property in
-hadoop-default.xml.<br />(Alejandro Abdelnur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1152">HADOOP-1152</a>.  Fix race condition in MapOutputCopier.copyOutput file
-rename causing possible reduce task hang.<br />(Tahir Hashmi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1050">HADOOP-1050</a>.  Distinguish between failed and killed tasks so as to
-not count a lost tasktracker against the job.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1271">HADOOP-1271</a>.  Fix StreamBaseRecordReader to be able to log record
-data that's not UTF-8.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1190">HADOOP-1190</a>.  Fix unchecked warnings in main Hadoop code.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1127">HADOOP-1127</a>.  Fix AlreadyBeingCreatedException in namenode for
-jobs run with speculative execution.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1282">HADOOP-1282</a>.  Omnibus HBase patch.  Improved tests &amp; configuration.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1262">HADOOP-1262</a>.  Make dfs client try to read from a different replica
-of the checksum file when a checksum error is detected.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1279">HADOOP-1279</a>.  Fix JobTracker to maintain list of recently
-completed jobs by order of completion, not submission.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1284">HADOOP-1284</a>.  In contrib/streaming, permit flexible specification
-of field delimiter and fields for partitioning and sorting.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1176">HADOOP-1176</a>.  Fix a bug where reduce would hang when a map had
-more than 2GB of output for it.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1293">HADOOP-1293</a>.  Fix contrib/streaming to print more than the first
-twenty lines of standard error.<br />(Koji Noguchi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1297">HADOOP-1297</a>.  Fix datanode so that requests to remove blocks that
-do not exist no longer causes block reports to be re-sent every
-second.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1216">HADOOP-1216</a>.  Change MapReduce so that, when numReduceTasks is
-zero, map outputs are written directly as final output, skipping
-shuffle, sort and reduce.  Use this to implement reduce=NONE
-option in contrib/streaming.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1294">HADOOP-1294</a>.  Fix unchecked warnings in main Hadoop code under
-Java 6.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1299">HADOOP-1299</a>.  Fix so that RPC will restart after RPC.stopClient()
-has been called.<br />(Michael Stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1278">HADOOP-1278</a>.  Improve blacklisting of TaskTrackers by JobTracker,
-to reduce false positives.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1290">HADOOP-1290</a>.  Move contrib/abacus into mapred/lib/aggregate.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1272">HADOOP-1272</a>.  Extract inner classes from FSNamesystem into separate
-classes.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1247">HADOOP-1247</a>.  Add support to contrib/streaming for aggregate
-package, formerly called Abacus.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1061">HADOOP-1061</a>.  Fix bug in listing files in the S3 filesystem.
-NOTE: this change is not backwards compatible!  You should use the
-MigrationTool supplied to migrate existing S3 filesystem data to
-the new format.  Please backup your data first before upgrading
-(using 'hadoop distcp' for example).<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1304">HADOOP-1304</a>.  Make configurable the maximum number of task
-attempts before a job fails.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1308">HADOOP-1308</a>.  Use generics to restrict types when classes are
-passed as parameters to JobConf methods.<br />(Michael Bieniosek via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1312">HADOOP-1312</a>.  Fix a ConcurrentModificationException in NameNode
-that killed the heartbeat monitoring thread.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1315">HADOOP-1315</a>.  Clean up contrib/streaming, switching it to use core
-classes more and removing unused code.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-485">HADOOP-485</a>.  Allow a different comparator for grouping keys in
-calls to reduce.<br />(Tahir Hashmi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1322">HADOOP-1322</a>.  Fix TaskTracker blacklisting to work correctly in
-one- and two-node clusters.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1144">HADOOP-1144</a>.  Permit one to specify a maximum percentage of tasks
-that can fail before a job is aborted.  The default is zero.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1184">HADOOP-1184</a>.  Fix HDFS decomissioning to complete when the only
-copy of a block is on a decommissioned node.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1263">HADOOP-1263</a>.  Change DFSClient to retry certain namenode calls
-with a random, exponentially increasing backoff time, to avoid
-overloading the namenode on, e.g., job start.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1325">HADOOP-1325</a>.  First complete, functioning version of HBase.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1276">HADOOP-1276</a>.  Make tasktracker expiry interval configurable.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1326">HADOOP-1326</a>.  Change JobClient#RunJob() to return the job.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1270">HADOOP-1270</a>.  Randomize the fetch of map outputs, speeding the
-shuffle.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1200">HADOOP-1200</a>.  Restore disk checking lost in <a href="http://issues.apache.org/jira/browse/HADOOP-1170">HADOOP-1170</a>.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1252">HADOOP-1252</a>.  Changed MapReduce's allocation of local files to
-use round-robin among available devices, rather than a hashcode.
-More care is also taken to not allocate files on full or offline
-drives.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1324">HADOOP-1324</a>.  Change so that an FSError kills only the task that
-generates it rather than the entire task tracker.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1310">HADOOP-1310</a>.  Fix unchecked warnings in aggregate code.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1255">HADOOP-1255</a>.  Fix a bug where the namenode falls into an infinite
-loop trying to remove a dead node.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1160">HADOOP-1160</a>.  Fix DistributedFileSystem.close() to close the
-underlying FileSystem, correctly aborting files being written.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1341">HADOOP-1341</a>.  Fix intermittent failures in HBase unit tests
-caused by deadlock.<br />(Jim Kellerman via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1350">HADOOP-1350</a>.  Fix shuffle performance problem caused by forcing
-chunked encoding of map outputs.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1345">HADOOP-1345</a>.  Fix HDFS to correctly retry another replica when a
-checksum error is encountered.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1205">HADOOP-1205</a>.  Improve synchronization around HDFS block map.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1353">HADOOP-1353</a>.  Fix a potential NullPointerException in namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1354">HADOOP-1354</a>.  Fix a potential NullPointerException in FsShell.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1358">HADOOP-1358</a>.  Fix a potential bug when DFSClient calls skipBytes.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1356">HADOOP-1356</a>.  Fix a bug in ValueHistogram.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1363">HADOOP-1363</a>.  Fix locking bug in JobClient#waitForCompletion().<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1368">HADOOP-1368</a>.  Fix inconsistent synchronization in JobInProgress.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1369">HADOOP-1369</a>.  Fix inconsistent synchronization in TaskTracker.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1361">HADOOP-1361</a>.  Fix various calls to skipBytes() to check return
-value.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1388">HADOOP-1388</a>.  Fix a potential NullPointerException in web ui.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1385">HADOOP-1385</a>.  Fix MD5Hash#hashCode() to generally hash to more
-than 256 values.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1386">HADOOP-1386</a>.  Fix Path to not permit the empty string as a
-path, as this has lead to accidental file deletion.  Instead
-force applications to use "." to name the default directory.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1407">HADOOP-1407</a>.  Fix integer division bug in JobInProgress which
-meant failed tasks didn't cause the job to fail.<br />(Arun C Murthy via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1427">HADOOP-1427</a>.  Fix a typo that caused GzipCodec to incorrectly use
-a very small input buffer.<br />(Espen Amble Kolstad via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1435">HADOOP-1435</a>.  Fix globbing code to no longer use the empty string
-to indicate the default directory, per <a href="http://issues.apache.org/jira/browse/HADOOP-1386">HADOOP-1386</a>.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1411">HADOOP-1411</a>.  Make task retry framework handle
-AlreadyBeingCreatedException when wrapped as a RemoteException.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1242">HADOOP-1242</a>.  Improve handling of DFS upgrades.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1332">HADOOP-1332</a>.  Fix so that TaskTracker exits reliably during unit
-tests on Windows.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1431">HADOOP-1431</a>.  Fix so that sort progress reporting during map runs
-only while sorting, so that stuck maps are correctly terminated.<br />(Devaraj Das and Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1452">HADOOP-1452</a>.  Change TaskTracker.MapOutputServlet.doGet.totalRead
-to a long, permitting map outputs to exceed 2^31 bytes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1443">HADOOP-1443</a>.  Fix a bug opening zero-length files in HDFS.<br />(Konstantin Shvachko via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.12.3_-_2007-04-06_')">Release 0.12.3 - 2007-04-06
-</a></h3>
-    <ol id="release_0.12.3_-_2007-04-06_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1162">HADOOP-1162</a>.  Fix bug in record CSV and XML serialization of
-binary values.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1123">HADOOP-1123</a>.  Fix NullPointerException in LocalFileSystem when
-trying to recover from a checksum error.<br />(Hairong Kuang &amp; Nigel Daley via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1177">HADOOP-1177</a>.  Fix bug where IOException in MapOutputLocation.getFile
-was not being logged.<br />(Devaraj Das via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1175">HADOOP-1175</a>.  Fix bugs in JSP for displaying a task's log messages.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1191">HADOOP-1191</a>.  Fix map tasks to wait until sort progress thread has
-stopped before reporting the task done.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1192">HADOOP-1192</a>.  Fix an integer overflow bug in FSShell's 'dus'
-command and a performance problem in HDFS's implementation of it.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1105">HADOOP-1105</a>. Fix reducers to make "progress" while iterating
-through values.<br />(Devaraj Das &amp; Owen O'Malley via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1179">HADOOP-1179</a>. Make Task Tracker close index file as soon as the read
-is done when serving get-map-output requests.<br />(Devaraj Das via tomwhite)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.12.2_-_2007-23-17_')">Release 0.12.2 - 2007-23-17
-</a></h3>
-    <ol id="release_0.12.2_-_2007-23-17_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1135">HADOOP-1135</a>.  Fix bug in block report processing which may cause
-the namenode to delete blocks.<br />(Dhruba Borthakur via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1145">HADOOP-1145</a>.  Make XML serializer and deserializer classes public
-in record package.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1140">HADOOP-1140</a>.  Fix a deadlock in metrics.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1150">HADOOP-1150</a>.  Fix streaming -reducer and -mapper to give them
-defaults.<br />(Owen O'Malley via tomwhite)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.12.1_-_2007-03-17_')">Release 0.12.1 - 2007-03-17
-</a></h3>
-    <ol id="release_0.12.1_-_2007-03-17_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1035">HADOOP-1035</a>.  Fix a StackOverflowError in FSDataSet.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1053">HADOOP-1053</a>.  Fix VInt representation of negative values.  Also
-remove references in generated record code to methods outside of
-the record package and improve some record documentation.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1067">HADOOP-1067</a>.  Compile fails if Checkstyle jar is present in lib
-directory. Also remove dependency on a particular Checkstyle
-version number.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1060">HADOOP-1060</a>.  Fix an IndexOutOfBoundsException in the JobTracker
-that could cause jobs to hang.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1077">HADOOP-1077</a>.  Fix a race condition fetching map outputs that could
-hang reduces.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1083">HADOOP-1083</a>.  Fix so that when a cluster restarts with a missing
-datanode, its blocks are replicated.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1082">HADOOP-1082</a>.  Fix a NullPointerException in ChecksumFileSystem.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1088">HADOOP-1088</a>.  Fix record serialization of negative values.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1080">HADOOP-1080</a>.  Fix bug in bin/hadoop on Windows when native
-libraries are present.<br />(ab via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1091">HADOOP-1091</a>.  Fix a NullPointerException in MetricsRecord.<br />(David Bowen via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1092">HADOOP-1092</a>.  Fix a NullPointerException in HeartbeatMonitor
-thread.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1112">HADOOP-1112</a>.  Fix a race condition in Hadoop metrics.<br />(David Bowen via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1108">HADOOP-1108</a>.  Checksummed file system should retry reading if a
-different replica is found when handling ChecksumException.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1070">HADOOP-1070</a>.  Fix a problem with number of racks and datanodes
-temporarily doubling.<br />(Konstantin Shvachko via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1099">HADOOP-1099</a>.  Fix NullPointerException in JobInProgress.<br />(Gautam Kowshik via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1115">HADOOP-1115</a>.  Fix bug where FsShell copyToLocal doesn't
-copy directories.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1109">HADOOP-1109</a>.  Fix NullPointerException in StreamInputFormat.<br />(Koji Noguchi via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1117">HADOOP-1117</a>.  Fix DFS scalability: when the namenode is
-restarted it consumes 80% CPU.<br />(Dhruba Borthakur via
-tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1089">HADOOP-1089</a>.  Make the C++ version of write and read v-int
-agree with the Java versions.<br />(Milind Bhandarkar via
-tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1096">HADOOP-1096</a>.  Rename InputArchive and OutputArchive and
-make them public.<br />(Milind Bhandarkar via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1128">HADOOP-1128</a>.  Fix missing progress information in map tasks.<br />(Espen Amble Kolstad, Andrzej Bialecki, and Owen O'Malley
-via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1129">HADOOP-1129</a>.  Fix DFSClient to not hide IOExceptions in
-flush method.<br />(Hairong Kuang via tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1126">HADOOP-1126</a>.  Optimize CPU usage for under replicated blocks
-when cluster restarts.<br />(Hairong Kuang via tomwhite)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.12.0_-_2007-03-02_')">Release 0.12.0 - 2007-03-02
-</a></h3>
-    <ol id="release_0.12.0_-_2007-03-02_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-975">HADOOP-975</a>.  Separate stdout and stderr from tasks.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-982">HADOOP-982</a>.  Add some setters and a toString() method to
-BytesWritable.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-858">HADOOP-858</a>.  Move contrib/smallJobsBenchmark to src/test, removing
-obsolete bits.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-992">HADOOP-992</a>.  Fix MiniMR unit tests to use MiniDFS when specified,
-rather than the local FS.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-954">HADOOP-954</a>.  Change use of metrics to use callback mechanism.
-Also rename utility class Metrics to MetricsUtil.<br />(David Bowen &amp; Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-893">HADOOP-893</a>.  Improve HDFS client's handling of dead datanodes.
-The set is no longer reset with each block, but rather is now
-maintained for the life of an open file.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-882">HADOOP-882</a>.  Upgrade to jets3t version 0.5, used by the S3
-FileSystem.  This version supports retries.<br />(Michael Stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-977">HADOOP-977</a>.  Send task's stdout and stderr to JobClient's stdout
-and stderr respectively, with each line tagged by the task's name.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-761">HADOOP-761</a>.  Change unit tests to not use /tmp.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1007">HADOOP-1007</a>. Make names of metrics used in Hadoop unique.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-491">HADOOP-491</a>.  Change mapred.task.timeout to be per-job, and make a
-value of zero mean no timeout.  Also change contrib/streaming to
-disable task timeouts.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1010">HADOOP-1010</a>.  Add Reporter.NULL, a Reporter implementation that
-does nothing.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-923">HADOOP-923</a>.  In HDFS NameNode, move replication computation to a
-separate thread, to improve heartbeat processing time.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-476">HADOOP-476</a>.  Rewrite contrib/streaming command-line processing,
-improving parameter validation.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-973">HADOOP-973</a>.  Improve error messages in Namenode.  This should help
-to track down a problem that was appearing as a
-NullPointerException.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-649">HADOOP-649</a>.  Fix so that jobs with no tasks are not lost.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-803">HADOOP-803</a>.  Reduce memory use by HDFS namenode, phase I.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1021">HADOOP-1021</a>.  Fix MRCaching-based unit tests on Windows.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-889">HADOOP-889</a>.  Remove duplicate code from HDFS unit tests.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-943">HADOOP-943</a>.  Improve HDFS's fsck command to display the filename
-for under-replicated blocks.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-333">HADOOP-333</a>.  Add validator for sort benchmark output.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-947">HADOOP-947</a>.  Improve performance of datanode decomissioning.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-442">HADOOP-442</a>.  Permit one to specify hosts allowed to connect to
-namenode and jobtracker with include and exclude files.<br />(Wendy
-Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1017">HADOOP-1017</a>.  Cache constructors, for improved performance.<br />(Ron Bodkin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-867">HADOOP-867</a>.  Move split creation out of JobTracker to client.
-Splits are now saved in a separate file, read by task processes
-directly, so that user code is no longer required in the
-JobTracker.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1006">HADOOP-1006</a>.  Remove obsolete '-local' option from test code.<br />(Gautam Kowshik via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-952">HADOOP-952</a>. Create a public (shared) Hadoop EC2 AMI.
-The EC2 scripts now support launch of public AMIs.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1025">HADOOP-1025</a>. Remove some obsolete code in ipc.Server.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-997">HADOOP-997</a>. Implement S3 retry mechanism for failed block
-transfers. This includes a generic retry mechanism for use
-elsewhere in Hadoop.<br />(tomwhite)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-990">HADOOP-990</a>.  Improve HDFS support for full datanode volumes.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-564">HADOOP-564</a>.  Replace uses of "dfs://" URIs with the more standard
-"hdfs://".<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1030">HADOOP-1030</a>.  In unit tests, unify setting of ipc.client.timeout.
-Also increase the value used from one to two seconds, in hopes of
-making tests complete more reliably.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-654">HADOOP-654</a>.  Stop assigning tasks to a tasktracker if it has
-failed more than a specified number in the job.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-985">HADOOP-985</a>.  Change HDFS to identify nodes by IP address rather
-than by DNS hostname.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-248">HADOOP-248</a>.  Optimize location of map outputs to not use random
-probes.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1029">HADOOP-1029</a>.  Fix streaming's input format to correctly seek to
-the start of splits.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-492">HADOOP-492</a>.  Add per-job and per-task counters.  These are
-incremented via the Reporter interface and available through the
-web ui and the JobClient API.  The mapreduce framework maintains a
-few basic counters, and applications may add their own.  Counters
-are also passed to the metrics system.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1034">HADOOP-1034</a>.  Fix datanode to better log exceptions.<br />(Philippe Gassmann via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-878">HADOOP-878</a>.  In contrib/streaming, fix reducer=NONE to work with
-multiple maps.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1039">HADOOP-1039</a>.  In HDFS's TestCheckpoint, avoid restarting
-MiniDFSCluster so often, speeding this test.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1040">HADOOP-1040</a>.  Update RandomWriter example to use counters and
-user-defined input and output formats.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1027">HADOOP-1027</a>.  Fix problems with in-memory merging during shuffle
-and re-enable this optimization.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1036">HADOOP-1036</a>.  Fix exception handling in TaskTracker to keep tasks
-from being lost.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1042">HADOOP-1042</a>.  Improve the handling of failed map output fetches.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-928">HADOOP-928</a>.  Make checksums optional per FileSystem.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1044">HADOOP-1044</a>.  Fix HDFS's TestDecommission to not spuriously fail.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-972">HADOOP-972</a>.  Optimize HDFS's rack-aware block placement algorithm.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1043">HADOOP-1043</a>.  Optimize shuffle, increasing parallelism.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-940">HADOOP-940</a>.  Improve HDFS's replication scheduling.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1020">HADOOP-1020</a>.  Fix a bug in Path resolution, and a with unit tests
-on Windows.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-941">HADOOP-941</a>.  Enhance record facility.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1000">HADOOP-1000</a>.  Fix so that log messages in task subprocesses are
-not written to a task's standard error.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1037">HADOOP-1037</a>.  Fix bin/slaves.sh, which currently only works with
-/bin/bash, to specify /bin/bash rather than /bin/sh.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1046">HADOOP-1046</a>. Clean up tmp from partially received stale block files.<br />(ab)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1041">HADOOP-1041</a>.  Optimize mapred counter implementation.  Also group
-counters by their declaring Enum.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1032">HADOOP-1032</a>.  Permit one to specify jars that will be cached
-across multiple jobs.<br />(Gautam Kowshik via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1051">HADOOP-1051</a>.  Add optional checkstyle task to build.xml.  To use
-this developers must download the (LGPL'd) checkstyle jar
-themselves.<br />(tomwhite via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1049">HADOOP-1049</a>.  Fix a race condition in IPC client.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1056">HADOOP-1056</a>.  Check HDFS include/exclude node lists with both IP
-address and hostname.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-994">HADOOP-994</a>.  In HDFS, limit the number of blocks invalidated at
-once.  Large lists were causing datenodes to timeout.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-432">HADOOP-432</a>.  Add a trash feature, disabled by default.  When
-enabled, the FSShell 'rm' command will move things to a trash
-directory in the filesystem.  In HDFS, a thread periodically
-checkpoints the trash and removes old checkpoints.<br />(cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.11.2_-_2007-02-16_')">Release 0.11.2 - 2007-02-16
-</a></h3>
-    <ol id="release_0.11.2_-_2007-02-16_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1009">HADOOP-1009</a>.  Fix an infinite loop in the HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-1014">HADOOP-1014</a>.  Disable in-memory merging during shuffle, as this is
-causing data corruption.<br />(Devaraj Das via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.11.1_-_2007-02-09_')">Release 0.11.1 - 2007-02-09
-</a></h3>
-    <ol id="release_0.11.1_-_2007-02-09_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-976">HADOOP-976</a>.  Make SequenceFile.Metadata public.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-917">HADOOP-917</a>.  Fix a NullPointerException in SequenceFile's merger
-with large map outputs.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-984">HADOOP-984</a>.  Fix a bug in shuffle error handling introduced by
-<a href="http://issues.apache.org/jira/browse/HADOOP-331">HADOOP-331</a>.  If a map output is unavailable, the job tracker is
-once more informed.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-987">HADOOP-987</a>.  Fix a problem in HDFS where blocks were not removed
-from neededReplications after a replication target was selected.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.11.0_-_2007-02-02_')">Release 0.11.0 - 2007-02-02
-</a></h3>
-    <ol id="release_0.11.0_-_2007-02-02_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-781">HADOOP-781</a>.  Remove methods deprecated in 0.10 that are no longer
-widely used.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-842">HADOOP-842</a>.  Change HDFS protocol so that the open() method is
-passed the client hostname, to permit the namenode to order block
-locations on the basis of network topology.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-852">HADOOP-852</a>.  Add an ant task to compile record definitions, and
-use it to compile record unit tests.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-757">HADOOP-757</a>.  Fix "Bad File Descriptor" exception in HDFS client
-when an output file is closed twice.<br />(Raghu Angadi via cutting)</li>
-      <li>[ intentionally blank ]
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-890">HADOOP-890</a>.  Replace dashes in metric names with underscores,
-for better compatibility with some monitoring systems.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-801">HADOOP-801</a>.  Add to jobtracker a log of task completion events.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-855">HADOOP-855</a>.  In HDFS, try to repair files with checksum errors.
-An exception is still thrown, but corrupt blocks are now removed
-when they have replicas.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-886">HADOOP-886</a>.  Reduce number of timer threads created by metrics API
-by pooling contexts.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-897">HADOOP-897</a>.  Add a "javac.args" property to build.xml that permits
-one to pass arbitrary options to javac.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-899">HADOOP-899</a>.  Update libhdfs for changes in <a href="http://issues.apache.org/jira/browse/HADOOP-871">HADOOP-871</a>.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-905">HADOOP-905</a>.  Remove some dead code from JobClient.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-902">HADOOP-902</a>.  Fix a NullPointerException in HDFS client when
-closing output streams.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-735">HADOOP-735</a>.  Switch generated record code to use BytesWritable to
-represent fields of type 'buffer'.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-830">HADOOP-830</a>.  Improve mapreduce merge performance by buffering and
-merging multiple map outputs as they arrive at reduce nodes before
-they're written to disk.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-908">HADOOP-908</a>.  Add a new contrib package, Abacus, that simplifies
-counting and aggregation, built on MapReduce.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-901">HADOOP-901</a>.  Add support for recursive renaming to the S3 filesystem.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-912">HADOOP-912</a>.  Fix a bug in TaskTracker.isIdle() that was
-sporadically causing unit test failures.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-909">HADOOP-909</a>.  Fix the 'du' command to correctly compute the size of
-FileSystem directory trees.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-731">HADOOP-731</a>.  When a checksum error is encountered on a file stored
-in HDFS, try another replica of the data, if any.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-732">HADOOP-732</a>.  Add support to SequenceFile for arbitrary metadata,
-as a set of attribute value pairs.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-929">HADOOP-929</a>.  Fix PhasedFileSystem to pass configuration to
-underlying FileSystem.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-935">HADOOP-935</a>.  Fix contrib/abacus to not delete pre-existing output
-files, but rather to fail in this case.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-936">HADOOP-936</a>.  More metric renamings, as in <a href="http://issues.apache.org/jira/browse/HADOOP-890">HADOOP-890</a>.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-856">HADOOP-856</a>.  Fix HDFS's fsck command to not report that
-non-existent filesystems are healthy.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-602">HADOOP-602</a>.  Remove the dependency on Lucene's PriorityQueue
-utility, by copying it into Hadoop.  This facilitates using Hadoop
-with different versions of Lucene without worrying about CLASSPATH
-order.<br />(Milind Bhandarkar via cutting)</li>
-      <li>[ intentionally blank ]
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-227">HADOOP-227</a>.  Add support for backup namenodes, which periodically
-get snapshots of the namenode state.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-884">HADOOP-884</a>.  Add scripts in contrib/ec2 to facilitate running
-Hadoop on an Amazon's EC2 cluster.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-937">HADOOP-937</a>.  Change the namenode to request re-registration of
-datanodes in more circumstances.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-922">HADOOP-922</a>.  Optimize small forward seeks in HDFS.  If data is has
-likely already in flight, skip ahead rather than re-opening the
-block.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-961">HADOOP-961</a>.  Add a 'job -events' sub-command that prints job
-events, including task completions and failures.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-959">HADOOP-959</a>.  Fix namenode snapshot code added in <a href="http://issues.apache.org/jira/browse/HADOOP-227">HADOOP-227</a> to
-work on Windows.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-934">HADOOP-934</a>.  Fix TaskTracker to catch metrics exceptions that were
-causing heartbeats to fail.<br />(Arun Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-881">HADOOP-881</a>.  Fix JobTracker web interface to display the correct
-number of task failures.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-788">HADOOP-788</a>.  Change contrib/streaming to subclass TextInputFormat,
-permitting it to take advantage of native compression facilities.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-962">HADOOP-962</a>.  In contrib/ec2: make scripts executable in tar file;
-add a README; make the environment file use a template.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-549">HADOOP-549</a>.  Fix a NullPointerException in TaskReport's
-serialization.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-963">HADOOP-963</a>.  Fix remote exceptions to have the stack trace of the
-caller thread, not the IPC listener thread.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-967">HADOOP-967</a>.  Change RPC clients to start sending a version header.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-964">HADOOP-964</a>.  Fix a bug introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-830">HADOOP-830</a> where jobs failed
-whose comparators and/or i/o types were in the job's jar.<br />(Dennis Kubes via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-969">HADOOP-969</a>.  Fix a deadlock in JobTracker.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-862">HADOOP-862</a>.  Add support for the S3 FileSystem to the CopyFiles
-tool.<br />(Michael Stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-965">HADOOP-965</a>.  Fix IsolationRunner so that job's jar can be found.<br />(Dennis Kubes via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-309">HADOOP-309</a>.  Fix two NullPointerExceptions in StatusHttpServer.<br />(navychen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-692">HADOOP-692</a>.  Add rack awareness to HDFS's placement of blocks.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.10.1_-_2007-01-10_')">Release 0.10.1 - 2007-01-10
-</a></h3>
-    <ol id="release_0.10.1_-_2007-01-10_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-857">HADOOP-857</a>.  Fix S3 FileSystem implementation to permit its use
-for MapReduce input and output.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-863">HADOOP-863</a>.  Reduce logging verbosity introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-813">HADOOP-813</a>.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-815">HADOOP-815</a>.  Fix memory leaks in JobTracker.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-600">HADOOP-600</a>.  Fix a race condition in JobTracker.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-864">HADOOP-864</a>.  Fix 'bin/hadoop -jar' to operate correctly when
-hadoop.tmp.dir does not yet exist.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-866">HADOOP-866</a>.  Fix 'dfs -get' command to remove existing crc files,
-if any.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-871">HADOOP-871</a>.  Fix a bug in bin/hadoop setting JAVA_LIBRARY_PATH.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-868">HADOOP-868</a>.  Decrease the number of open files during map,
-respecting io.sort.fa ctor.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-865">HADOOP-865</a>.  Fix S3 FileSystem so that partially created files can
-be deleted.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-873">HADOOP-873</a>.	 Pass java.library.path correctly to child processes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-851">HADOOP-851</a>.  Add support for the LZO codec.  This is much faster
-than the default, zlib-based compression, but it is only available
-when the native library is built.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-880">HADOOP-880</a>.  Fix S3 FileSystem to remove directories.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-879">HADOOP-879</a>.  Fix InputFormatBase to handle output generated by
-MapFileOutputFormat.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-659">HADOOP-659</a>.  In HDFS, prioritize replication of blocks based on
-current replication level.  Blocks which are severely
-under-replicated should be further replicated before blocks which
-are less under-replicated.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-726">HADOOP-726</a>.  Deprecate FileSystem locking methods.  They are not
-currently usable.  Locking should eventually provided as an
-independent service.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-758">HADOOP-758</a>.  Fix exception handling during reduce so that root
-exceptions are not masked by exceptions in cleanups.<br />(Raghu Angadi via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.10.0_-_2007-01-05_')">Release 0.10.0 - 2007-01-05
-</a></h3>
-    <ol id="release_0.10.0_-_2007-01-05_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-763">HADOOP-763</a>. Change DFS namenode benchmark to not use MapReduce.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-777">HADOOP-777</a>. Use fully-qualified hostnames for tasktrackers and
-datanodes.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-621">HADOOP-621</a>. Change 'dfs -cat' to exit sooner when output has been
-closed.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-752">HADOOP-752</a>. Rationalize some synchronization in DFS namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-629">HADOOP-629</a>. Fix RPC services to better check the protocol name and
-version.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-774">HADOOP-774</a>. Limit the number of invalid blocks returned with
-heartbeats by the namenode to datanodes.  Transmitting and
-processing very large invalid block lists can tie up both the
-namenode and datanode for too long.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-738">HADOOP-738</a>. Change 'dfs -get' command to not create CRC files by
-default, adding a -crc option to force their creation.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-676">HADOOP-676</a>. Improved exceptions and error messages for common job
-input specification errors.<br />(Sanjay Dahiya via cutting)</li>
-      <li>[Included in 0.9.2 release]
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-756">HADOOP-756</a>. Add new dfsadmin option to wait for filesystem to be
-operational.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-770">HADOOP-770</a>. Fix jobtracker web interface to display, on restart,
-jobs that were running when it was last stopped.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-331">HADOOP-331</a>. Write all map outputs to a single file with an index,
-rather than to a separate file per reduce task.  This should both
-speed the shuffle and make things more scalable.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-818">HADOOP-818</a>. Fix contrib unit tests to not depend on core unit
-tests.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-786">HADOOP-786</a>. Log common exception at debug level.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-796">HADOOP-796</a>. Provide more convenient access to failed task
-information in the web interface.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-764">HADOOP-764</a>. Reduce memory allocations in namenode some.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-802">HADOOP-802</a>. Update description of mapred.speculative.execution to
-mention reduces.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-806">HADOOP-806</a>. Include link to datanodes on front page of namenode
-web interface.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-618">HADOOP-618</a>.  Make JobSubmissionProtocol public.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-782">HADOOP-782</a>.  Fully remove killed tasks.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-792">HADOOP-792</a>.  Fix 'dfs -mv' to return correct status.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-673">HADOOP-673</a>.  Give each task its own working directory again.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-571">HADOOP-571</a>.  Extend the syntax of Path to be a URI; to be
-optionally qualified with a scheme and authority.  The scheme
-determines the FileSystem implementation, while the authority
-determines the FileSystem instance.  New FileSystem
-implementations may be provided by defining an fs.&lt;scheme&gt;.impl
-property, naming the FileSystem implementation class.  This
-permits easy integration of new FileSystem implementations.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-720">HADOOP-720</a>.  Add an HDFS white paper to website.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-794">HADOOP-794</a>.  Fix a divide-by-zero exception when a job specifies
-zero map tasks.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-454">HADOOP-454</a>.  Add a 'dfs -dus' command that provides summary disk
-usage.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-574">HADOOP-574</a>.  Add an Amazon S3 implementation of FileSystem.  To
-use this, one need only specify paths of the form
-s3://id:secret@bucket/.  Alternately, the AWS access key id and
-secret can be specified in your config, with the properties
-fs.s3.awsAccessKeyId and fs.s3.awsSecretAccessKey.<br />(Tom White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-824">HADOOP-824</a>.  Rename DFSShell to be FsShell, since it applies
-generically to all FileSystem implementations.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-813">HADOOP-813</a>.  Fix map output sorting to report progress, so that
-sorts which take longer than the task timeout do not fail.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-825">HADOOP-825</a>.  Fix HDFS daemons when configured with new URI syntax.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-596">HADOOP-596</a>.  Fix a bug in phase reporting during reduce.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-811">HADOOP-811</a>.  Add a utility, MultithreadedMapRunner.<br />(Alejandro Abdelnur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-829">HADOOP-829</a>.  Within HDFS, clearly separate three different
-representations for datanodes: one for RPCs, one for
-namenode-internal use, and one for namespace persistence.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-823">HADOOP-823</a>.  Fix problem starting datanode when not all configured
-data directories exist.<br />(Bryan Pendleton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-451">HADOOP-451</a>.  Add a Split interface.  CAUTION: This incompatibly
-changes the InputFormat and RecordReader interfaces.  Not only is
-FileSplit replaced with Split, but a FileSystem parameter is no
-longer passed in several methods, input validation has changed,
-etc.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-814">HADOOP-814</a>.  Optimize locking in namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-738">HADOOP-738</a>.  Change 'fs -put' and 'fs -get' commands to accept
-standard input and output, respectively.  Standard i/o is
-specified by a file named '-'.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-835">HADOOP-835</a>.  Fix a NullPointerException reading record-compressed
-SequenceFiles.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-836">HADOOP-836</a>.  Fix a MapReduce bug on Windows, where the wrong
-FileSystem was used.  Also add a static FileSystem.getLocal()
-method and better Path checking in HDFS, to help avoid such issues
-in the future.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-837">HADOOP-837</a>.  Improve RunJar utility to unpack jar file
-hadoop.tmp.dir, rather than the system temporary directory.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-841">HADOOP-841</a>.  Fix native library to build 32-bit version even when
-on a 64-bit host, if a 32-bit JVM is used.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-838">HADOOP-838</a>.  Fix tasktracker to pass java.library.path to
-sub-processes, so that libhadoop.a is found.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-844">HADOOP-844</a>.  Send metrics messages on a fixed-delay schedule
-instead of a fixed-rate schedule.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-849">HADOOP-849</a>.  Fix OutOfMemory exceptions in TaskTracker due to a
-file handle leak in SequenceFile.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-745">HADOOP-745</a>.  Fix a synchronization bug in the HDFS namenode.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-850">HADOOP-850</a>.  Add Writable implementations for variable-length
-integers.<br />(ab via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-525">HADOOP-525</a>.  Add raw comparators to record types.  This greatly
-improves record sort performance.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-628">HADOOP-628</a>.  Fix a problem with 'fs -cat' command, where some
-characters were replaced with question marks.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-804">HADOOP-804</a>.  Reduce verbosity of MapReduce logging.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-853">HADOOP-853</a>.  Rename 'site' to 'docs', in preparation for inclusion
-in releases.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-371">HADOOP-371</a>.  Include contrib jars and site documentation in
-distributions.  Also add contrib and example documentation to
-distributed javadoc, in separate sections.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-846">HADOOP-846</a>.  Report progress during entire map, as sorting of
-intermediate outputs may happen at any time, potentially causing
-task timeouts.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-840">HADOOP-840</a>.  In task tracker, queue task cleanups and perform them
-in a separate thread.<br />(omalley &amp; Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-681">HADOOP-681</a>.  Add to HDFS the ability to decommission nodes.  This
-causes their blocks to be re-replicated on other nodes, so that
-they may be removed from a cluster.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-470">HADOOP-470</a>.  In HDFS web ui, list the datanodes containing each
-copy of a block.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-700">HADOOP-700</a>.  Change bin/hadoop to only include core jar file on
-classpath, not example, test, etc.  Also rename core jar to
-hadoop-${version}-core.jar so that it can be more easily
-identified.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-619">HADOOP-619</a>.  Extend InputFormatBase to accept individual files and
-glob patterns as MapReduce inputs, not just directories.  Also
-change contrib/streaming to use this.<br />(Sanjay Dahia via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.9.2_-_2006-12-15_')">Release 0.9.2 - 2006-12-15
-</a></h3>
-    <ol id="release_0.9.2_-_2006-12-15_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-639">HADOOP-639</a>. Restructure InterTrackerProtocol to make task
-accounting more reliable.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-827">HADOOP-827</a>. Turn off speculative execution by default, since it's
-currently broken.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-791">HADOOP-791</a>. Fix a deadlock in the task tracker.<br />(Mahadev Konar via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.9.1_-_2006-12-06_')">Release 0.9.1 - 2006-12-06
-</a></h3>
-    <ol id="release_0.9.1_-_2006-12-06_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-780">HADOOP-780</a>. Use ReflectionUtils to instantiate key and value
-objects.<br />(ab)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-779">HADOOP-779</a>. Fix contrib/streaming to work correctly with gzipped
-input files.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.9.0_-_2006-12-01_')">Release 0.9.0 - 2006-12-01
-</a></h3>
-    <ol id="release_0.9.0_-_2006-12-01_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-655">HADOOP-655</a>.  Remove most deprecated code.  A few deprecated things
-remain, notably UTF8 and some methods that are still required.
-Also cleaned up constructors for SequenceFile, MapFile, SetFile,
-and ArrayFile a bit.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-565">HADOOP-565</a>.  Upgrade to Jetty version 6.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-682">HADOOP-682</a>.  Fix DFS format command to work correctly when
-configured with a non-existent directory.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-645">HADOOP-645</a>.  Fix a bug in contrib/streaming when -reducer is NONE.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-687">HADOOP-687</a>.  Fix a classpath bug in bin/hadoop that blocked the
-servers from starting.<br />(Sameer Paranjpye via omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-683">HADOOP-683</a>.  Remove a script dependency on bash, so it works with
-dash, the new default for /bin/sh on Ubuntu.<br />(James Todd via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-382">HADOOP-382</a>.  Extend unit tests to run multiple datanodes.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-604">HADOOP-604</a>.  Fix some synchronization issues and a
-NullPointerException in DFS datanode.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-459">HADOOP-459</a>.  Fix memory leaks and a host of other issues with
-libhdfs.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-694">HADOOP-694</a>.  Fix a NullPointerException in jobtracker.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-637">HADOOP-637</a>.  Fix a memory leak in the IPC server.  Direct buffers
-are not collected like normal buffers, and provided little
-advantage.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-696">HADOOP-696</a>.  Fix TestTextInputFormat unit test to not rely on the
-order of directory listings.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-611">HADOOP-611</a>.  Add support for iterator-based merging to
-SequenceFile.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-688">HADOOP-688</a>.  Move DFS administrative commands to a separate
-command named 'dfsadmin'.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-708">HADOOP-708</a>.  Fix test-libhdfs to return the correct status, so
-that failures will break the build.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-646">HADOOP-646</a>.  Fix namenode to handle edits files larger than 2GB.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-705">HADOOP-705</a>.  Fix a bug in the JobTracker when failed jobs were
-not completely cleaned up.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-613">HADOOP-613</a>.  Perform final merge while reducing.  This removes one
-sort pass over the data and should consequently significantly
-decrease overall processing time.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-661">HADOOP-661</a>.  Make each job's configuration visible through the web
-ui.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-489">HADOOP-489</a>.  In MapReduce, separate user logs from system logs.
-Each task's log output is now available through the web ui.<br />(Arun
-C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-712">HADOOP-712</a>.  Fix record io's xml serialization to correctly handle
-control-characters.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-668">HADOOP-668</a>.  Improvements to the web-based DFS browser.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-715">HADOOP-715</a>.  Fix build.xml so that test logs are written in build
-directory, rather than in CWD.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-538">HADOOP-538</a>.  Add support for building an optional native library,
-libhadoop.so, that improves the performance of zlib-based
-compression.  To build this, specify -Dcompile.native to Ant.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-610">HADOOP-610</a>.  Fix an problem when the DFS block size is configured
-to be smaller than the buffer size, typically only when debugging.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-695">HADOOP-695</a>.  Fix a NullPointerException in contrib/streaming.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-652">HADOOP-652</a>.  In DFS, when a file is deleted, the block count is
-now decremented.<br />(Vladimir Krokhmalyov via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-725">HADOOP-725</a>.  In DFS, optimize block placement algorithm,
-previously a performance bottleneck.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-723">HADOOP-723</a>.  In MapReduce, fix a race condition during the
-shuffle, which resulted in FileNotFoundExceptions.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-447">HADOOP-447</a>.  In DFS, fix getBlockSize(Path) to work with relative
-paths.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-733">HADOOP-733</a>.  Make exit codes in DFShell consistent and add a unit
-test.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-709">HADOOP-709</a>.  Fix contrib/streaming to work with commands that
-contain control characters.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-677">HADOOP-677</a>.  In IPC, permit a version header to be transmitted
-when connections are established.  This will permit us to change
-the format of IPC requests back-compatibly in subsequent releases.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-699">HADOOP-699</a>.  Fix DFS web interface so that filesystem browsing
-works correctly, using the right port number.  Also add support
-for sorting datanode list by various columns.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-76">HADOOP-76</a>.  Implement speculative reduce.  Now when a job is
-configured for speculative execution, both maps and reduces will
-execute speculatively.  Reduce outputs are written to temporary
-location and moved to the final location when reduce is complete.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-736">HADOOP-736</a>.  Roll back to Jetty 5.1.4, due to performance problems
-with Jetty 6.0.1.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-739">HADOOP-739</a>.  Fix TestIPC to use different port number, making it
-more reliable.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-749">HADOOP-749</a>.  Fix a NullPointerException in jobfailures.jsp.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-747">HADOOP-747</a>.  Fix record serialization to work correctly when
-records are embedded in Maps.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-698">HADOOP-698</a>.  Fix HDFS client not to retry the same datanode on
-read failures.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-689">HADOOP-689</a>. Add GenericWritable, to facilitate polymorphism in
-MapReduce, SequenceFile, etc.<br />(Feng Jiang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-430">HADOOP-430</a>.  Stop datanode's HTTP server when registration with
-namenode fails.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-750">HADOOP-750</a>.  Fix a potential race condition during mapreduce
-shuffle.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-728">HADOOP-728</a>.  Fix contrib/streaming-related issues, including
-'-reducer NONE'.<br />(Sanjay Dahiya via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.8.0_-_2006-11-03_')">Release 0.8.0 - 2006-11-03
-</a></h3>
-    <ol id="release_0.8.0_-_2006-11-03_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-477">HADOOP-477</a>.  Extend contrib/streaming to scan the PATH environment
-variables when resolving executable program names.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-583">HADOOP-583</a>.  In DFSClient, reduce the log level of re-connect
-attempts from 'info' to 'debug', so they are not normally shown.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-498">HADOOP-498</a>.  Re-implement DFS integrity checker to run server-side,
-for much improved performance.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-586">HADOOP-586</a>.  Use the jar name for otherwise un-named jobs.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-514">HADOOP-514</a>.  Make DFS heartbeat interval configurable.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-588">HADOOP-588</a>.  Fix logging and accounting of failed tasks.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-462">HADOOP-462</a>.  Improve command line parsing in DFSShell, so that
-incorrect numbers of arguments result in informative errors rather
-than ArrayOutOfBoundsException.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-561">HADOOP-561</a>.  Fix DFS so that one replica of each block is written
-locally, if possible.  This was the intent, but there as a bug.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-610">HADOOP-610</a>.  Fix TaskTracker to survive more exceptions, keeping
-tasks from becoming lost.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-625">HADOOP-625</a>.  Add a servlet to all http daemons that displays a
-stack dump, useful for debugging.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-554">HADOOP-554</a>.  Fix DFSShell to return -1 for errors.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-626">HADOOP-626</a>.  Correct the documentation in the NNBench example
-code, and also remove a mistaken call there.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-634">HADOOP-634</a>.  Add missing license to many files.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-627">HADOOP-627</a>.  Fix some synchronization problems in MiniMRCluster
-that sometimes caused unit tests to fail.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-563">HADOOP-563</a>.  Improve the NameNode's lease policy so that leases
-are held for one hour without renewal (instead of one minute).
-However another attempt to create the same file will still succeed
-if the lease has not been renewed within a minute.  This prevents
-communication or scheduling problems from causing a write to fail
-for up to an hour, barring some other process trying to create the
-same file.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-635">HADOOP-635</a>.  In DFSShell, permit specification of multiple files
-as the source for file copy and move commands.<br />(Dhruba Borthakur via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-641">HADOOP-641</a>.  Change NameNode to request a fresh block report from
-a re-discovered DataNode, so that no-longer-needed replications
-are stopped promptly.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-642">HADOOP-642</a>.  Change IPC client to specify an explicit connect
-timeout.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-638">HADOOP-638</a>.  Fix an unsynchronized access to TaskTracker's
-internal state.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-624">HADOOP-624</a>.  Fix servlet path to stop a Jetty warning on startup.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-578">HADOOP-578</a>.  Failed tasks are no longer placed at the end of the
-task queue.  This was originally done to work around other
-problems that have now been fixed.  Re-executing failed tasks
-sooner causes buggy jobs to fail faster.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-658">HADOOP-658</a>.  Update source file headers per Apache policy.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-636">HADOOP-636</a>.  Add MapFile &amp; ArrayFile constructors which accept a
-Progressable, and pass it down to SequenceFile.  This permits
-reduce tasks which use MapFile to still report progress while
-writing blocks to the filesystem.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-576">HADOOP-576</a>.  Enable contrib/streaming to use the file cache.  Also
-extend the cache to permit symbolic links to cached items, rather
-than local file copies.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-482">HADOOP-482</a>.  Fix unit tests to work when a cluster is running on
-the same machine, removing port conflicts.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-90">HADOOP-90</a>.  Permit dfs.name.dir to list multiple directories,
-where namenode data is to be replicated.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-651">HADOOP-651</a>.  Fix DFSCk to correctly pass parameters to the servlet
-on the namenode.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-553">HADOOP-553</a>.  Change main() routines of DataNode and NameNode to
-log exceptions rather than letting the JVM print them to standard
-error.  Also, change the hadoop-daemon.sh script to rotate
-standard i/o log files.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-399">HADOOP-399</a>.  Fix javadoc warnings.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-599">HADOOP-599</a>.  Fix web ui and command line to correctly report DFS
-filesystem size statistics.  Also improve web layout.<br />(Raghu Angadi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-660">HADOOP-660</a>.  Permit specification of junit test output format.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-663">HADOOP-663</a>.  Fix a few unit test issues.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-664">HADOOP-664</a>.  Cause entire build to fail if libhdfs tests fail.<br />(Nigel Daley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-633">HADOOP-633</a>.  Keep jobtracker from dying when job initialization
-throws exceptions.  Also improve exception handling in a few other
-places and add more informative thread names.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-669">HADOOP-669</a>.  Fix a problem introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-90">HADOOP-90</a> that can cause
-DFS to lose files.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-373">HADOOP-373</a>.  Consistently check the value returned by
-FileSystem.mkdirs().<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-670">HADOOP-670</a>.  Code cleanups in some DFS internals: use generic
-types, replace Vector with ArrayList, etc.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-647">HADOOP-647</a>.  Permit map outputs to use a different compression
-type than the job output.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-671">HADOOP-671</a>.  Fix file cache to check for pre-existence before
-creating .<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-665">HADOOP-665</a>.  Extend many DFSShell commands to accept multiple
-arguments.  Now commands like "ls", "rm", etc. will operate on
-multiple files.<br />(Dhruba Borthakur via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.7.2_-_2006-10-18_')">Release 0.7.2 - 2006-10-18
-</a></h3>
-    <ol id="release_0.7.2_-_2006-10-18_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-607">HADOOP-607</a>.  Fix a bug where classes included in job jars were not
-found by tasks.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-609">HADOOP-609</a>.  Add a unit test that checks that classes in job jars
-can be found by tasks.  Also modify unit tests to specify multiple
-local directories.<br />(Mahadev Konar via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.7.1_-_2006-10-11_')">Release 0.7.1 - 2006-10-11
-</a></h3>
-    <ol id="release_0.7.1_-_2006-10-11_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-593">HADOOP-593</a>.  Fix a NullPointerException in the JobTracker.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-592">HADOOP-592</a>.  Fix a NullPointerException in the IPC Server.  Also
-consistently log when stale calls are discarded.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-594">HADOOP-594</a>.  Increase the DFS safe-mode threshold from .95 to
-.999, so that nearly all blocks must be reported before filesystem
-modifications are permitted.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-598">HADOOP-598</a>.  Fix tasks to retry when reporting completion, so that
-a single RPC timeout won't fail a task.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-597">HADOOP-597</a>.  Fix TaskTracker to not discard map outputs for errors
-in transmitting them to reduce nodes.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.7.0_-_2006-10-06_')">Release 0.7.0 - 2006-10-06
-</a></h3>
-    <ol id="release_0.7.0_-_2006-10-06_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-243">HADOOP-243</a>.  Fix rounding in the display of task and job progress
-so that things are not shown to be 100% complete until they are in
-fact finished.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-438">HADOOP-438</a>.  Limit the length of absolute paths in DFS, since the
-file format used to store pathnames has some limitations.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-530">HADOOP-530</a>.  Improve error messages in SequenceFile when keys or
-values are of the wrong type.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-288">HADOOP-288</a>.  Add a file caching system and use it in MapReduce to
-cache job jar files on slave nodes.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-533">HADOOP-533</a>.  Fix unit test to not modify conf directory.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-527">HADOOP-527</a>.  Permit specification of the local address that various
-Hadoop daemons should bind to.<br />(Philippe Gassmann via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-542">HADOOP-542</a>.  Updates to contrib/streaming: reformatted source code,
-on-the-fly merge sort, a fix for <a href="http://issues.apache.org/jira/browse/HADOOP-540">HADOOP-540</a>, etc.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-545">HADOOP-545</a>.  Remove an unused config file parameter.<br />(Philippe Gassmann via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-548">HADOOP-548</a>.  Add an Ant property "test.output" to build.xml that
-causes test output to be logged to the console.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-261">HADOOP-261</a>.  Record an error message when map output is lost.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-293">HADOOP-293</a>.  Report the full list of task error messages in the
-web ui, not just the most recent.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-551">HADOOP-551</a>.  Restore JobClient's console printouts to only include
-a maximum of one update per one percent of progress.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-306">HADOOP-306</a>.  Add a "safe" mode to DFS.  The name node enters this
-when less than a specified percentage of file data is complete.
-Currently safe mode is only used on startup, but eventually it
-will also be entered when datanodes disconnect and file data
-becomes incomplete.  While in safe mode no filesystem
-modifications are permitted and block replication is inhibited.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-431">HADOOP-431</a>.  Change 'dfs -rm' to not operate recursively and add a
-new command, 'dfs -rmr' which operates recursively.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-263">HADOOP-263</a>.  Include timestamps for job transitions.  The web
-interface now displays the start and end times of tasks and the
-start times of sorting and reducing for reduce tasks.  Also,
-extend ObjectWritable to handle enums, so that they can be passed
-as RPC parameters.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-556">HADOOP-556</a>.  Contrib/streaming: send keep-alive reports to task
-tracker every 10 seconds rather than every 100 records, to avoid
-task timeouts.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-547">HADOOP-547</a>.  Fix reduce tasks to ping tasktracker while copying
-data, rather than only between copies, avoiding task timeouts.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-537">HADOOP-537</a>.  Fix src/c++/libhdfs build process to create files in
-build/, no longer modifying the source tree.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-487">HADOOP-487</a>.  Throw a more informative exception for unknown RPC
-hosts.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-559">HADOOP-559</a>.  Add file name globbing (pattern matching) support to
-the FileSystem API, and use it in DFSShell ('bin/hadoop dfs')
-commands.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-508">HADOOP-508</a>.  Fix a bug in FSDataInputStream.  Incorrect data was
-returned after seeking to a random location.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-560">HADOOP-560</a>.  Add a "killed" task state.  This can be used to
-distinguish kills from other failures.  Task state has also been
-converted to use an enum type instead of an int, uncovering a bug
-elsewhere.  The web interface is also updated to display killed
-tasks.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-423">HADOOP-423</a>.  Normalize Paths containing directories named "." and
-"..", using the standard, unix interpretation.  Also add checks in
-DFS, prohibiting the use of "." or ".." as directory or file
-names.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-513">HADOOP-513</a>.  Replace map output handling with a servlet, rather
-than a JSP page.  This fixes an issue where
-IllegalStateException's were logged, sets content-length
-correctly, and better handles some errors.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-552">HADOOP-552</a>.  Improved error checking when copying map output files
-to reduce nodes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-566">HADOOP-566</a>.  Fix scripts to work correctly when accessed through
-relative symbolic links.<br />(Lee Faris via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-519">HADOOP-519</a>.  Add positioned read methods to FSInputStream.  These
-permit one to read from a stream without moving its position, and
-can hence be performed by multiple threads at once on a single
-stream. Implement an optimized version for DFS and local FS.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-522">HADOOP-522</a>. Permit block compression with MapFile and SetFile.
-Since these formats are always sorted, block compression can
-provide a big advantage.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-567">HADOOP-567</a>. Record version and revision information in builds.  A
-package manifest is added to the generated jar file containing
-version information, and a VersionInfo utility is added that
-includes further information, including the build date and user,
-and the subversion revision and repository.  A 'bin/hadoop
-version' comand is added to show this information, and it is also
-added to various web interfaces.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-568">HADOOP-568</a>.  Fix so that errors while initializing tasks on a
-tasktracker correctly report the task as failed to the jobtracker,
-so that it will be rescheduled.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-550">HADOOP-550</a>.  Disable automatic UTF-8 validation in Text.  This
-permits, e.g., TextInputFormat to again operate on non-UTF-8 data.<br />(Hairong and Mahadev via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-343">HADOOP-343</a>.  Fix mapred copying so that a failed tasktracker
-doesn't cause other copies to slow.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-239">HADOOP-239</a>.  Add a persistent job history mechanism, so that basic
-job statistics are not lost after 24 hours and/or when the
-jobtracker is restarted.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-506">HADOOP-506</a>.  Ignore heartbeats from stale task trackers.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-255">HADOOP-255</a>.  Discard stale, queued IPC calls.  Do not process
-calls whose clients will likely time out before they receive a
-response.  When the queue is full, new calls are now received and
-queued, and the oldest calls are discarded, so that, when servers
-get bogged down, they no longer develop a backlog on the socket.
-This should improve some DFS namenode failure modes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-581">HADOOP-581</a>.  Fix datanode to not reset itself on communications
-errors with the namenode.  If a request to the namenode fails, the
-datanode should retry, not restart.  This reduces the load on the
-namenode, since restarts cause a resend of the block report.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.6.2_-_2006-09-18_')">Release 0.6.2 - 2006-09-18
-</a></h3>
-    <ol id="release_0.6.2_-_2006-09-18_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-532">HADOOP-532</a>.  Fix a bug reading value-compressed sequence files,
-where an exception was thrown reporting that the full value had not
-been read.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-534">HADOOP-534</a>.  Change the default value class in JobConf to be Text
-instead of the now-deprecated UTF8.  This fixes the Grep example
-program, which was updated to use Text, but relies on this
-default.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.6.1_-_2006-09-13_')">Release 0.6.1 - 2006-09-13
-</a></h3>
-    <ol id="release_0.6.1_-_2006-09-13_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-520">HADOOP-520</a>.  Fix a bug in libhdfs, where write failures were not
-correctly returning error codes.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-523">HADOOP-523</a>.  Fix a NullPointerException when TextInputFormat is
-explicitly specified.  Also add a test case for this.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-521">HADOOP-521</a>.  Fix another NullPointerException finding the
-ClassLoader when using libhdfs.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-526">HADOOP-526</a>.  Fix a NullPointerException when attempting to start
-two datanodes in the same directory.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-529">HADOOP-529</a>.  Fix a NullPointerException when opening
-value-compressed sequence files generated by pre-0.6.0 Hadoop.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.6.0_-_2006-09-08_')">Release 0.6.0 - 2006-09-08
-</a></h3>
-    <ol id="release_0.6.0_-_2006-09-08_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-427">HADOOP-427</a>.  Replace some uses of DatanodeDescriptor in the DFS
-web UI code with DatanodeInfo, the preferred public class.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-426">HADOOP-426</a>.  Fix streaming contrib module to work correctly on
-Solaris.  This was causing nightly builds to fail.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-400">HADOOP-400</a>.  Improvements to task assignment.  Tasks are no longer
-re-run on nodes where they have failed (unless no other node is
-available).  Also, tasks are better load-balanced among nodes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-324">HADOOP-324</a>.  Fix datanode to not exit when a disk is full, but
-rather simply to fail writes.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-434">HADOOP-434</a>.  Change smallJobsBenchmark to use standard Hadoop
-scripts.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-453">HADOOP-453</a>.  Fix a bug in Text.setCapacity().<br />(siren via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-450">HADOOP-450</a>.  Change so that input types are determined by the
-RecordReader rather than specified directly in the JobConf.  This
-facilitates jobs with a variety of input types.
-<p/>
-WARNING: This contains incompatible API changes!  The RecordReader
-interface has two new methods that all user-defined InputFormats
-must now define.  Also, the values returned by TextInputFormat are
-no longer of class UTF8, but now of class Text.
-</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-436">HADOOP-436</a>.  Fix an error-handling bug in the web ui.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-455">HADOOP-455</a>.  Fix a bug in Text, where DEL was not permitted.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-456">HADOOP-456</a>.  Change the DFS namenode to keep a persistent record
-of the set of known datanodes.  This will be used to implement a
-"safe mode" where filesystem changes are prohibited when a
-critical percentage of the datanodes are unavailable.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-322">HADOOP-322</a>.  Add a job control utility.  This permits one to
-specify job interdependencies.  Each job is submitted only after
-the jobs it depends on have successfully completed.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-176">HADOOP-176</a>.  Fix a bug in IntWritable.Comparator.<br />(Dick King via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-421">HADOOP-421</a>.  Replace uses of String in recordio package with Text
-class, for improved handling of UTF-8 data.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-464">HADOOP-464</a>.  Improved error message when job jar not found.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-469">HADOOP-469</a>.  Fix /bin/bash specifics that have crept into our
-/bin/sh scripts since <a href="http://issues.apache.org/jira/browse/HADOOP-352">HADOOP-352</a>.<br />(Jean-Baptiste Quenot via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-468">HADOOP-468</a>.  Add HADOOP_NICENESS environment variable to set
-scheduling priority for daemons.<br />(Vetle Roeim via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-473">HADOOP-473</a>.  Fix TextInputFormat to correctly handle more EOL
-formats.  Things now work correctly with CR, LF or CRLF.<br />(Dennis Kubes &amp; James White via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-461">HADOOP-461</a>.  Make Java 1.5 an explicit requirement.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-54">HADOOP-54</a>.  Add block compression to SequenceFile.  One may now
-specify that blocks of keys and values are compressed together,
-improving compression for small keys and values.
-SequenceFile.Writer's constructor is now deprecated and replaced
-with a factory method.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-281">HADOOP-281</a>.  Prohibit DFS files that are also directories.<br />(Wendy Chien via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-486">HADOOP-486</a>.  Add the job username to JobStatus instances returned
-by JobClient.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-437">HADOOP-437</a>.  contrib/streaming: Add support for gzipped inputs.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-463">HADOOP-463</a>.  Add variable expansion to config files.
-Configuration property values may now contain variable
-expressions.  A variable is referenced with the syntax
-'${variable}'.  Variables values are found first in the
-configuration, and then in Java system properties.  The default
-configuration is modified so that temporary directories are now
-under ${hadoop.tmp.dir}, which is, by default,
-/tmp/hadoop-${user.name}.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-419">HADOOP-419</a>. Fix a NullPointerException finding the ClassLoader
-when using libhdfs.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-460">HADOOP-460</a>. Fix contrib/smallJobsBenchmark to use Text instead of
-UTF8.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-196">HADOOP-196</a>.  Fix Configuration(Configuration) constructor to work
-correctly.<br />(Sami Siren via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-501">HADOOP-501</a>.  Fix Configuration.toString() to handle URL resources.<br />(Thomas Friol via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-499">HADOOP-499</a>.  Reduce the use of Strings in contrib/streaming,
-replacing them with Text for better performance.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-64">HADOOP-64</a>.  Manage multiple volumes with a single DataNode.
-Previously DataNode would create a separate daemon per configured
-volume, each with its own connection to the NameNode.  Now all
-volumes are handled by a single DataNode daemon, reducing the load
-on the NameNode.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-424">HADOOP-424</a>.  Fix MapReduce so that jobs which generate zero splits
-do not fail.<br />(Fr??d??ric Bertin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-408">HADOOP-408</a>.  Adjust some timeouts and remove some others so that
-unit tests run faster.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-507">HADOOP-507</a>.  Fix an IllegalAccessException in DFS.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-320">HADOOP-320</a>.  Fix so that checksum files are correctly copied when
-the destination of a file copy is a directory.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-286">HADOOP-286</a>.  In DFSClient, avoid pinging the NameNode with
-renewLease() calls when no files are being written.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-312">HADOOP-312</a>.  Close idle IPC connections.  All IPC connections were
-cached forever.  Now, after a connection has been idle for more
-than a configurable amount of time (one second by default), the
-connection is closed, conserving resources on both client and
-server.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-497">HADOOP-497</a>.  Permit the specification of the network interface and
-nameserver to be used when determining the local hostname
-advertised by datanodes and tasktrackers.<br />(Lorenzo Thione via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-441">HADOOP-441</a>.  Add a compression codec API and extend SequenceFile
-to use it.  This will permit the use of alternate compression
-codecs in SequenceFile.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-483">HADOOP-483</a>. Improvements to libhdfs build and documentation.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-458">HADOOP-458</a>.  Fix a memory corruption bug in libhdfs.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-517">HADOOP-517</a>.  Fix a contrib/streaming bug in end-of-line detection.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-474">HADOOP-474</a>.  Add CompressionCodecFactory, and use it in
-TextInputFormat and TextOutputFormat.  Compressed input files are
-automatically decompressed when they have the correct extension.
-Output files will, when output compression is specified, be
-generated with an approprate extension.  Also add a gzip codec and
-fix problems with UTF8 text inputs.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.5.0_-_2006-08-04_')">Release 0.5.0 - 2006-08-04
-</a></h3>
-    <ol id="release_0.5.0_-_2006-08-04_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-352">HADOOP-352</a>.  Fix shell scripts to use /bin/sh instead of
-/bin/bash, for better portability.<br />(Jean-Baptiste Quenot via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-313">HADOOP-313</a>.  Permit task state to be saved so that single tasks
-may be manually re-executed when debugging.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-339">HADOOP-339</a>.  Add method to JobClient API listing jobs that are
-not yet complete, i.e., that are queued or running.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-355">HADOOP-355</a>.  Updates to the streaming contrib module, including
-API fixes, making reduce optional, and adding an input type for
-StreamSequenceRecordReader.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-358">HADOOP-358</a>.  Fix a NPE bug in Path.equals().<br />(Fr??d??ric Bertin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-327">HADOOP-327</a>.  Fix ToolBase to not call System.exit() when
-exceptions are thrown.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-359">HADOOP-359</a>.  Permit map output to be compressed.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-341">HADOOP-341</a>.  Permit input URI to CopyFiles to use the HTTP
-protocol.  This lets one, e.g., more easily copy log files into
-DFS.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-361">HADOOP-361</a>.  Remove unix dependencies from streaming contrib
-module tests, making them pure java.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-354">HADOOP-354</a>.  Make public methods to stop DFS daemons.<br />(Barry Kaplan via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-252">HADOOP-252</a>.  Add versioning to RPC protocols.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-356">HADOOP-356</a>.  Add contrib to "compile" and "test" build targets, so
-that this code is better maintained.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-307">HADOOP-307</a>.  Add smallJobsBenchmark contrib module.  This runs
-lots of small jobs, in order to determine per-task overheads.<br />(Sanjay Dahiya via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-342">HADOOP-342</a>.  Add a tool for log analysis: Logalyzer.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-347">HADOOP-347</a>.  Add web-based browsing of DFS content.  The namenode
-redirects browsing requests to datanodes.  Content requests are
-redirected to datanodes where the data is local when possible.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-351">HADOOP-351</a>.  Make Hadoop IPC kernel independent of Jetty.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-237">HADOOP-237</a>.  Add metric reporting to DFS and MapReduce.  With only
-minor configuration changes, one can now monitor many Hadoop
-system statistics using Ganglia or other monitoring systems.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-376">HADOOP-376</a>.  Fix datanode's HTTP server to scan for a free port.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-260">HADOOP-260</a>.  Add --config option to shell scripts, specifying an
-alternate configuration directory.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-381">HADOOP-381</a>.  Permit developers to save the temporary files for
-tasks whose names match a regular expression, to facilliate
-debugging.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-344">HADOOP-344</a>.  Fix some Windows-related problems with DF.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-380">HADOOP-380</a>.  Fix reduce tasks to poll less frequently for map
-outputs.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-321">HADOOP-321</a>.  Refactor DatanodeInfo, in preparation for
-<a href="http://issues.apache.org/jira/browse/HADOOP-306">HADOOP-306</a>.<br />(Konstantin Shvachko &amp; omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-385">HADOOP-385</a>.  Fix some bugs in record io code generation.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-302">HADOOP-302</a>.  Add new Text class to replace UTF8, removing
-limitations of that class.  Also refactor utility methods for
-writing zero-compressed integers (VInts and VLongs).<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-335">HADOOP-335</a>.  Refactor DFS namespace/transaction logging in
-namenode.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-375">HADOOP-375</a>.  Fix handling of the datanode HTTP daemon's port so
-that multiple datanode's can be run on a single host.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-386">HADOOP-386</a>.  When removing excess DFS block replicas, remove those
-on nodes with the least free space first.<br />(Johan Oskarson via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-389">HADOOP-389</a>.  Fix intermittent failures of mapreduce unit tests.
-Also fix some build dependencies.<br />(Mahadev &amp; Konstantin via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-362">HADOOP-362</a>.  Fix a problem where jobs hang when status messages
-are recieved out-of-order.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-394">HADOOP-394</a>.  Change order of DFS shutdown in unit tests to
-minimize errors logged.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-396">HADOOP-396</a>.  Make DatanodeID implement Writable.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-377">HADOOP-377</a>.  Permit one to add URL resources to a Configuration.<br />(Jean-Baptiste Quenot via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-345">HADOOP-345</a>.  Permit iteration over Configuration key/value pairs.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-409">HADOOP-409</a>.  Streaming contrib module: make configuration
-properties available to commands as environment variables.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-369">HADOOP-369</a>.  Add -getmerge option to dfs command that appends all
-files in a directory into a single local file.<br />(Johan Oskarson via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-410">HADOOP-410</a>.  Replace some TreeMaps with HashMaps in DFS, for
-a 17% performance improvement.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-411">HADOOP-411</a>.  Add unit tests for command line parser.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-412">HADOOP-412</a>.  Add MapReduce input formats that support filtering
-of SequenceFile data, including sampling and regex matching.
-Also, move JobConf.newInstance() to a new utility class.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-226">HADOOP-226</a>.  Fix fsck command to properly consider replication
-counts, now that these can vary per file.<br />(Bryan Pendleton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-425">HADOOP-425</a>.  Add a Python MapReduce example, using Jython.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.4.0_-_2006-06-28_')">Release 0.4.0 - 2006-06-28
-</a></h3>
-    <ol id="release_0.4.0_-_2006-06-28_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-298">HADOOP-298</a>.  Improved progress reports for CopyFiles utility, the
-distributed file copier.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-299">HADOOP-299</a>.  Fix the task tracker, permitting multiple jobs to
-more easily execute at the same time.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-250">HADOOP-250</a>.  Add an HTTP user interface to the namenode, running
-on port 50070.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-123">HADOOP-123</a>.  Add MapReduce unit tests that run a jobtracker and
-tasktracker, greatly increasing code coverage.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-271">HADOOP-271</a>.  Add links from jobtracker's web ui to tasktracker's
-web ui.  Also attempt to log a thread dump of child processes
-before they're killed.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-210">HADOOP-210</a>.  Change RPC server to use a selector instead of a
-thread per connection.  This should make it easier to scale to
-larger clusters.  Note that this incompatibly changes the RPC
-protocol: clients and servers must both be upgraded to the new
-version to ensure correct operation.<br />(Devaraj Das via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-311">HADOOP-311</a>.  Change DFS client to retry failed reads, so that a
-single read failure will not alone cause failure of a task.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-314">HADOOP-314</a>.  Remove the "append" phase when reducing.  Map output
-files are now directly passed to the sorter, without first
-appending them into a single file.  Now, the first third of reduce
-progress is "copy" (transferring map output to reduce nodes), the
-middle third is "sort" (sorting map output) and the last third is
-"reduce" (generating output).  Long-term, the "sort" phase will
-also be removed.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-316">HADOOP-316</a>.  Fix a potential deadlock in the jobtracker.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-319">HADOOP-319</a>.  Fix FileSystem.close() to remove the FileSystem
-instance from the cache.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-135">HADOOP-135</a>.  Fix potential deadlock in JobTracker by acquiring
-locks in a consistent order.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-278">HADOOP-278</a>.  Check for existence of input directories before
-starting MapReduce jobs, making it easier to debug this common
-error.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-304">HADOOP-304</a>.  Improve error message for
-UnregisterdDatanodeException to include expected node name.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-305">HADOOP-305</a>.  Fix TaskTracker to ask for new tasks as soon as a
-task is finished, rather than waiting for the next heartbeat.
-This improves performance when tasks are short.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-59">HADOOP-59</a>.  Add support for generic command line options.  One may
-now specify the filesystem (-fs), the MapReduce jobtracker (-jt),
-a config file (-conf) or any configuration property (-D).  The
-"dfs", "fsck", "job", and "distcp" commands currently support
-this, with more to be added.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-296">HADOOP-296</a>.  Permit specification of the amount of reserved space
-on a DFS datanode.  One may specify both the percentage free and
-the number of bytes.<br />(Johan Oskarson via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-325">HADOOP-325</a>.  Fix a problem initializing RPC parameter classes, and
-remove the workaround used to initialize classes.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-328">HADOOP-328</a>.  Add an option to the "distcp" command to ignore read
-errors while copying.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-27">HADOOP-27</a>.  Don't allocate tasks to trackers whose local free
-space is too low.<br />(Johan Oskarson via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-318">HADOOP-318</a>.  Keep slow DFS output from causing task timeouts.
-This incompatibly changes some public interfaces, adding a
-parameter to OutputFormat.getRecordWriter() and the new method
-Reporter.progress(), but it makes lots of tasks succeed that were
-previously failing.<br />(Milind Bhandarkar via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.3.2_-_2006-06-09_')">Release 0.3.2 - 2006-06-09
-</a></h3>
-    <ol id="release_0.3.2_-_2006-06-09_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-275">HADOOP-275</a>.  Update the streaming contrib module to use log4j for
-its logging.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-279">HADOOP-279</a>.  Provide defaults for log4j logging parameters, so
-that things still work reasonably when Hadoop-specific system
-properties are not provided.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-280">HADOOP-280</a>.  Fix a typo in AllTestDriver which caused the wrong
-test to be run when "DistributedFSCheck" was specified.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-240">HADOOP-240</a>.  DFS's mkdirs() implementation no longer logs a warning
-when the directory already exists.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-285">HADOOP-285</a>.  Fix DFS datanodes to be able to re-join the cluster
-after the connection to the namenode is lost.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-277">HADOOP-277</a>.  Fix a race condition when creating directories.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-289">HADOOP-289</a>.  Improved exception handling in DFS datanode.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-292">HADOOP-292</a>.  Fix client-side logging to go to standard error
-rather than standard output, so that it can be distinguished from
-application output.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-294">HADOOP-294</a>.  Fixed bug where conditions for retrying after errors
-in the DFS client were reversed.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.3.1_-_2006-06-05_')">Release 0.3.1 - 2006-06-05
-</a></h3>
-    <ol id="release_0.3.1_-_2006-06-05_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-272">HADOOP-272</a>.  Fix a bug in bin/hadoop setting log
-parameters.<br />(omalley &amp; cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-274">HADOOP-274</a>.  Change applications to log to standard output rather
-than to a rolling log file like daemons.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-262">HADOOP-262</a>.  Fix reduce tasks to report progress while they're
-waiting for map outputs, so that they do not time out.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-245">HADOOP-245</a> and <a href="http://issues.apache.org/jira/browse/HADOOP-246">HADOOP-246</a>.  Improvements to record io package.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-276">HADOOP-276</a>.  Add logging config files to jar file so that they're
-always found.<br />(omalley via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.3.0_-_2006-06-02_')">Release 0.3.0 - 2006-06-02
-</a></h3>
-    <ol id="release_0.3.0_-_2006-06-02_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-208">HADOOP-208</a>.  Enhance MapReduce web interface, adding new pages
-for failed tasks, and tasktrackers.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-204">HADOOP-204</a>.  Tweaks to metrics package.<br />(David Bowen via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-209">HADOOP-209</a>.  Add a MapReduce-based file copier.  This will
-copy files within or between file systems in parallel.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-146">HADOOP-146</a>.  Fix DFS to check when randomly generating a new block
-id that no existing blocks already have that id.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-180">HADOOP-180</a>. Make a daemon thread that does the actual task clean ups, so
-that the main offerService thread in the taskTracker doesn't get stuck
-and miss his heartbeat window. This was killing many task trackers as
-big jobs finished (300+ tasks / node).<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-200">HADOOP-200</a>. Avoid transmitting entire list of map task names to
-reduce tasks.  Instead just transmit the number of map tasks and
-henceforth refer to them by number when collecting map output.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-219">HADOOP-219</a>. Fix a NullPointerException when handling a checksum
-exception under SequenceFile.Sorter.sort().<br />(cutting &amp; stack)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-212">HADOOP-212</a>. Permit alteration of the file block size in DFS.  The
-default block size for new files may now be specified in the
-configuration with the dfs.block.size property.  The block size
-may also be specified when files are opened.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-218">HADOOP-218</a>. Avoid accessing configuration while looping through
-tasks in JobTracker.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-161">HADOOP-161</a>. Add hashCode() method to DFS's Block.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-115">HADOOP-115</a>. Map output types may now be specified.  These are also
-used as reduce input types, thus permitting reduce input types to
-differ from reduce output types.<br />(Runping Qi via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-216">HADOOP-216</a>. Add task progress to task status page.<br />(Bryan Pendelton via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-233">HADOOP-233</a>.  Add web server to task tracker that shows running
-tasks and logs.  Also add log access to job tracker web interface.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-205">HADOOP-205</a>.  Incorporate pending tasks into tasktracker load
-calculations.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-247">HADOOP-247</a>.  Fix sort progress to better handle exceptions.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-195">HADOOP-195</a>.  Improve performance of the transfer of map outputs to
-reduce nodes by performing multiple transfers in parallel, each on
-a separate socket.<br />(Sameer Paranjpye via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-251">HADOOP-251</a>.  Fix task processes to be tolerant of failed progress
-reports to their parent process.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-325">HADOOP-325</a>.  Improve the FileNotFound exceptions thrown by
-LocalFileSystem to include the name of the file.<br />(Benjamin Reed via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-254">HADOOP-254</a>.  Use HTTP to transfer map output data to reduce
-nodes.  This, together with <a href="http://issues.apache.org/jira/browse/HADOOP-195">HADOOP-195</a>, greatly improves the
-performance of these transfers.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-163">HADOOP-163</a>.  Cause datanodes that\ are unable to either read or
-write data to exit, so that the namenode will no longer target
-them for new blocks and will replicate their data on other nodes.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-222">HADOOP-222</a>.  Add a -setrep option to the dfs commands that alters
-file replication levels.<br />(Johan Oskarson via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-75">HADOOP-75</a>.  In DFS, only check for a complete file when the file
-is closed, rather than as each block is written.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-124">HADOOP-124</a>. Change DFS so that datanodes are identified by a
-persistent ID rather than by host and port.  This solves a number
-of filesystem integrity problems, when, e.g., datanodes are
-restarted.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-256">HADOOP-256</a>.  Add a C API for DFS.<br />(Arun C Murthy via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-211">HADOOP-211</a>.  Switch to use the Jakarta Commons logging internally,
-configured to use log4j by default.<br />(Arun C Murthy and cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-265">HADOOP-265</a>.  Tasktracker now fails to start if it does not have a
-writable local directory for temporary files.  In this case, it
-logs a message to the JobTracker and exits.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-270">HADOOP-270</a>.  Fix potential deadlock in datanode shutdown.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.2.1_-_2006-05-12_')">Release 0.2.1 - 2006-05-12
-</a></h3>
-    <ol id="release_0.2.1_-_2006-05-12_">
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-199">HADOOP-199</a>.  Fix reduce progress (broken by <a href="http://issues.apache.org/jira/browse/HADOOP-182">HADOOP-182</a>).<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-201">HADOOP-201</a>.  Fix 'bin/hadoop dfs -report'.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-207">HADOOP-207</a>.  Fix JDK 1.4 incompatibility introduced by <a href="http://issues.apache.org/jira/browse/HADOOP-96">HADOOP-96</a>.
-System.getenv() does not work in JDK 1.4.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.2.0_-_2006-05-05_')">Release 0.2.0 - 2006-05-05
-</a></h3>
-    <ol id="release_0.2.0_-_2006-05-05_">
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-126">HADOOP-126</a>. 'bin/hadoop dfs -cp' now correctly copies .crc
-files.<br />(Konstantin Shvachko via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-51">HADOOP-51</a>. Change DFS to support per-file replication counts.<br />(Konstantin Shvachko via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-131">HADOOP-131</a>.  Add scripts to start/stop dfs and mapred daemons.
-Use these in start/stop-all scripts.<br />(Chris Mattmann via cutting)</li>
-      <li>Stop using ssh options by default that are not yet in widely used
-versions of ssh.  Folks can still enable their use by uncommenting
-a line in conf/hadoop-env.sh.<br />(cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-92">HADOOP-92</a>.  Show information about all attempts to run each
-task in the web ui.<br />(Mahadev konar via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-128">HADOOP-128</a>.  Improved DFS error handling.<br />(Owen O'Malley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-129">HADOOP-129</a>.  Replace uses of java.io.File with new class named
-Path.  This fixes bugs where java.io.File methods were called
-directly when FileSystem methods were desired, and reduces the
-likelihood of such bugs in the future.  It also makes the handling
-of pathnames more consistent between local and dfs FileSystems and
-between Windows and Unix. java.io.File-based methods are still
-available for back-compatibility, but are deprecated and will be
-removed once 0.2 is released.<br />(cutting)</li>
-      <li>Change dfs.data.dir and mapred.local.dir to be comma-separated
-lists of directories, no longer be space-separated. This fixes
-several bugs on Windows.<br />(cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-144">HADOOP-144</a>.  Use mapred task id for dfs client id, to
-facilitate debugging.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-143">HADOOP-143</a>.  Do not line-wrap stack-traces in web ui.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-118">HADOOP-118</a>.  In DFS, improve clean up of abandoned file
-creations.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-138">HADOOP-138</a>.  Stop multiple tasks in a single heartbeat, rather
-than one per heartbeat.<br />(Stefan via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-139">HADOOP-139</a>.  Remove a potential deadlock in
-LocalFileSystem.lock().<br />(Igor Bolotin via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-134">HADOOP-134</a>.  Don't hang jobs when the tasktracker is
-misconfigured to use an un-writable local directory.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-115">HADOOP-115</a>.  Correct an error message.<br />(Stack via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-133">HADOOP-133</a>.  Retry pings from child to parent, in case of
-(local) communcation problems.  Also log exit status, so that one
-can distinguish patricide from other deaths.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-142">HADOOP-142</a>.  Avoid re-running a task on a host where it has
-previously failed.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-148">HADOOP-148</a>.  Maintain a task failure count for each
-tasktracker and display it in the web ui.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-151">HADOOP-151</a>.  Close a potential socket leak, where new IPC
-connection pools were created per configuration instance that RPCs
-use.  Now a global RPC connection pool is used again, as
-originally intended.<br />(cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-69">HADOOP-69</a>.  Don't throw a NullPointerException when getting
-hints for non-existing file split.<br />(Bryan Pendelton via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-157">HADOOP-157</a>.  When a task that writes dfs files (e.g., a reduce
-task) failed and was retried, it would fail again and again,
-eventually failing the job.  The problem was that dfs did not yet
-know that the failed task had abandoned the files, and would not
-yet let another task create files with the same names.  Dfs now
-retries when creating a file long enough for locks on abandoned
-files to expire.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-150">HADOOP-150</a>.  Improved task names that include job
-names.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-162">HADOOP-162</a>.  Fix ConcurrentModificationException when
-releasing file locks.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-132">HADOOP-132</a>.  Initial check-in of new Metrics API, including
-implementations for writing metric data to a file and for sending
-it to Ganglia.<br />(David Bowen via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-160">HADOOP-160</a>.  Remove some uneeded synchronization around
-time-consuming operations in the TaskTracker.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-166">HADOOP-166</a>.  RPCs failed when passed subclasses of a declared
-parameter type.  This is fixed by changing ObjectWritable to store
-both the declared type and the instance type for Writables.  Note
-that this incompatibly changes the format of ObjectWritable and
-will render unreadable any ObjectWritables stored in files.
-Nutch only uses ObjectWritable in intermediate files, so this
-should not be a problem for Nutch.<br />(Stefan &amp; cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-168">HADOOP-168</a>.  MapReduce RPC protocol methods should all declare
-IOException, so that timeouts are handled appropriately.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-169">HADOOP-169</a>.  Don't fail a reduce task if a call to the
-jobtracker to locate map outputs fails.<br />(omalley via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-170">HADOOP-170</a>.  Permit FileSystem clients to examine and modify
-the replication count of individual files.  Also fix a few
-replication-related bugs.<br />(Konstantin Shvachko via cutting)</li>
-      <li>Permit specification of a higher replication levels for job
-submission files (job.xml and job.jar).  This helps with large
-clusters, since these files are read by every node.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-173">HADOOP-173</a>.  Optimize allocation of tasks with local data.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-167">HADOOP-167</a>.  Reduce number of Configurations and JobConf's
-created.<br />(omalley via cutting)</li>
-      <li>NUTCH-256.  Change FileSystem#createNewFile() to create a .crc
-file.  The lack of a .crc file was causing warnings.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-174">HADOOP-174</a>.  Change JobClient to not abort job until it has failed
-to contact the job tracker for five attempts, not just one as
-before.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-177">HADOOP-177</a>.  Change MapReduce web interface to page through tasks.
-Previously, when jobs had more than a few thousand tasks they
-could crash web browsers.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-178">HADOOP-178</a>.  In DFS, piggyback blockwork requests from datanodes
-on heartbeat responses from namenode.  This reduces the volume of
-RPC traffic.  Also move startup delay in blockwork from datanode
-to namenode.  This fixes a problem where restarting the namenode
-triggered a lot of uneeded replication.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-183">HADOOP-183</a>.  If the DFS namenode is restarted with different
-minimum and/or maximum replication counts, existing files'
-replication counts are now automatically adjusted to be within the
-newly configured bounds.<br />(Hairong Kuang via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-186">HADOOP-186</a>.  Better error handling in TaskTracker's top-level
-loop.  Also improve calculation of time to send next heartbeat.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-187">HADOOP-187</a>.  Add two MapReduce examples/benchmarks.  One creates
-files containing random data.  The second sorts the output of the
-first.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-185">HADOOP-185</a>.  Fix so that, when a task tracker times out making the
-RPC asking for a new task to run, the job tracker does not think
-that it is actually running the task returned.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-190">HADOOP-190</a>.  If a child process hangs after it has reported
-completion, its output should not be lost.<br />(Stack via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-184">HADOOP-184</a>. Re-structure some test code to better support testing
-on a cluster.<br />(Mahadev Konar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-191">HADOOP-191</a>  Add streaming package, Hadoop's first contrib module.
-This permits folks to easily submit MapReduce jobs whose map and
-reduce functions are implemented by shell commands.  Use
-'bin/hadoop jar build/hadoop-streaming.jar' to get details.<br />(Michel Tourn via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-189">HADOOP-189</a>.  Fix MapReduce in standalone configuration to
-correctly handle job jar files that contain a lib directory with
-nested jar files.<br />(cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-65">HADOOP-65</a>.  Initial version of record I/O framework that enables
-the specification of record types and generates marshalling code
-in both Java and C++.  Generated Java code implements
-WritableComparable, but is not yet otherwise used by
-Hadoop.<br />(Milind Bhandarkar via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-193">HADOOP-193</a>.  Add a MapReduce-based FileSystem benchmark.<br />(Konstantin Shvachko via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-194">HADOOP-194</a>.  Add a MapReduce-based FileSystem checker.  This reads
-every block in every file in the filesystem.<br />(Konstantin Shvachko
-via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-182">HADOOP-182</a>.  Fix so that lost task trackers to not change the
-status of reduce tasks or completed jobs.  Also fixes the progress
-meter so that failed tasks are subtracted.<br />(omalley via cutting)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-96">HADOOP-96</a>.  Logging improvements.  Log files are now separate from
-standard output and standard error files.  Logs are now rolled.
-Logging of all DFS state changes can be enabled, to facilitate
-debugging.<br />(Hairong Kuang via cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.1.1_-_2006-04-08_')">Release 0.1.1 - 2006-04-08
-</a></h3>
-    <ol id="release_0.1.1_-_2006-04-08_">
-      <li>Added CHANGES.txt, logging all significant changes to Hadoop.<br />(cutting)</li>
-      <li>Fix MapReduceBase.close() to throw IOException, as declared in the
-Closeable interface.  This permits subclasses which override this
-method to throw that exception.<br />(cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-117">HADOOP-117</a>.  Pathnames were mistakenly transposed in
-JobConf.getLocalFile() causing many mapred temporary files to not
-be removed.<br />(Raghavendra Prabhu via cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-116">HADOOP-116</a>. Clean up job submission files when jobs complete.<br />(cutting)</li>
-      <li>Fix <a href="http://issues.apache.org/jira/browse/HADOOP-125">HADOOP-125</a>. Fix handling of absolute paths on Windows<br />(cutting)</li>
-    </ol>
-<h3><a href="javascript:toggleList('release_0.1.0_-_2006-04-01_')">Release 0.1.0 - 2006-04-01
-</a></h3>
-    <ol id="release_0.1.0_-_2006-04-01_">
-      <li>The first release of Hadoop.
-</li>
-    </ol>
-</ul>
-</body>
-</html>

+ 0 - 942
docs/cluster_setup.html

@@ -1,942 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop Cluster Setup</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">Project</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">Wiki</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.20 Documentation</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">Overview</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">Hadoop Quick Start</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Hadoop Cluster Setup</div>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Hadoop Map/Reduce Tutorial</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">Hadoop Command Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">Hadoop FS Shell Guide</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">Hadoop DistCp Guide</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop Native Libraries</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Hadoop Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS Architecture</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS Admin Guide: Quotas</a>
-</div>
-<div class="menuitem">
-<a href="SLG_user_guide.html">HDFS Utilities</a>
-</div>
-<div class="menuitem">
-<a href="libhdfs.html">HDFS C API</a>
-</div>
-<div class="menuitem">
-<a href="hod_user_guide.html">HOD User Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_admin_guide.html">HOD Admin Guide</a>
-</div>
-<div class="menuitem">
-<a href="hod_config_guide.html">HOD Config Guide</a>
-</div>
-<div class="menuitem">
-<a href="capacity_scheduler.html">Capacity Scheduler</a>
-</div>
-<div class="menuitem">
-<a href="vaidya.html">Hadoop Vaidya</a>
-</div>
-<div class="menuitem">
-<a href="api/index.html">API Docs</a>
-</div>
-<div class="menuitem">
-<a href="jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">Wiki</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>
-</div>
-<div class="menuitem">
-<a href="releasenotes.html">Release Notes</a>
-</div>
-<div class="menuitem">
-<a href="changes.html">Change Log</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="cluster_setup.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop Cluster Setup</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#Purpose">Purpose</a>
-</li>
-<li>
-<a href="#Pre-requisites">Pre-requisites</a>
-</li>
-<li>
-<a href="#Installation">Installation</a>
-</li>
-<li>
-<a href="#Configuration">Configuration</a>
-<ul class="minitoc">
-<li>
-<a href="#Configuration+Files">Configuration Files</a>
-</li>
-<li>
-<a href="#Site+Configuration">Site Configuration</a>
-<ul class="minitoc">
-<li>
-<a href="#Configuring+the+Environment+of+the+Hadoop+Daemons">Configuring the Environment of the Hadoop Daemons</a>
-</li>
-<li>
-<a href="#Configuring+the+Hadoop+Daemons">Configuring the Hadoop Daemons</a>
-</li>
-<li>
-<a href="#Slaves">Slaves</a>
-</li>
-<li>
-<a href="#Logging">Logging</a>
-</li>
-</ul>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Cluster+Restartability">Cluster Restartability</a>
-<ul class="minitoc">
-<li>
-<a href="#Map%2FReduce">Map/Reduce</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Hadoop+Rack+Awareness">Hadoop Rack Awareness</a>
-</li>
-<li>
-<a href="#Hadoop+Startup">Hadoop Startup</a>
-</li>
-<li>
-<a href="#Hadoop+Shutdown">Hadoop Shutdown</a>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="Purpose"></a>
-<h2 class="h3">Purpose</h2>
-<div class="section">
-<p>This document describes how to install, configure and manage non-trivial
-      Hadoop clusters ranging from a few nodes to extremely large clusters with 
-      thousands of nodes.</p>
-<p>
-      To play with Hadoop, you may first want to install Hadoop on a single machine (see <a href="quickstart.html"> Hadoop Quick Start</a>).
-      </p>
-</div>
-    
-    
-<a name="N1001E"></a><a name="Pre-requisites"></a>
-<h2 class="h3">Pre-requisites</h2>
-<div class="section">
-<ol>
-        
-<li>
-          Make sure all <a href="quickstart.html#PreReqs">requisite</a> software 
-          is installed on all nodes in your cluster.
-        </li>
-        
-<li>
-          
-<a href="quickstart.html#Download">Get</a> the Hadoop software.
-        </li>
-      
-</ol>
-</div>
-    
-    
-<a name="N10036"></a><a name="Installation"></a>
-<h2 class="h3">Installation</h2>
-<div class="section">
-<p>Installing a Hadoop cluster typically involves unpacking the software 
-      on all the machines in the cluster.</p>
-<p>Typically one machine in the cluster is designated as the 
-      <span class="codefrag">NameNode</span> and another machine the as <span class="codefrag">JobTracker</span>,
-      exclusively. These are the <em>masters</em>. The rest of the machines in 
-      the cluster act as both <span class="codefrag">DataNode</span> <em>and</em> 
-      <span class="codefrag">TaskTracker</span>. These are the <em>slaves</em>.</p>
-<p>The root of the distribution is referred to as 
-      <span class="codefrag">HADOOP_HOME</span>. All machines in the cluster usually have the same 
-      <span class="codefrag">HADOOP_HOME</span> path.</p>
-</div>
-    
-    
-<a name="N10061"></a><a name="Configuration"></a>
-<h2 class="h3">Configuration</h2>
-<div class="section">
-<p>The following sections describe how to configure a Hadoop cluster.</p>
-<a name="N1006A"></a><a name="Configuration+Files"></a>
-<h3 class="h4">Configuration Files</h3>
-<p>Hadoop configuration is driven by two important configuration files
-        found in the <span class="codefrag">conf/</span> directory of the distribution:</p>
-<ol>
-          
-<li>
-            
-<a href="http://hadoop.apache.org/core/docs/current/hadoop-default.html">hadoop-default.xml</a> - Read-only 
-            default configuration.
-          </li>
-          
-<li>
-            
-<em>hadoop-site.xml</em> - Site-specific configuration.
-          </li>
-        
-</ol>
-<p>To learn more about how the Hadoop framework is controlled by these 
-        configuration files, look 
-        <a href="api/org/apache/hadoop/conf/Configuration.html">here</a>.</p>
-<p>Additionally, you can control the Hadoop scripts found in the 
-        <span class="codefrag">bin/</span> directory of the distribution, by setting site-specific 
-        values via the <span class="codefrag">conf/hadoop-env.sh</span>.</p>
-<a name="N10097"></a><a name="Site+Configuration"></a>
-<h3 class="h4">Site Configuration</h3>
-<p>To configure the Hadoop cluster you will need to configure the
-        <em>environment</em> in which the Hadoop daemons execute as well as
-        the <em>configuration parameters</em> for the Hadoop daemons.</p>
-<p>The Hadoop daemons are <span class="codefrag">NameNode</span>/<span class="codefrag">DataNode</span> 
-        and <span class="codefrag">JobTracker</span>/<span class="codefrag">TaskTracker</span>.</p>
-<a name="N100B5"></a><a name="Configuring+the+Environment+of+the+Hadoop+Daemons"></a>
-<h4>Configuring the Environment of the Hadoop Daemons</h4>
-<p>Administrators should use the <span class="codefrag">conf/hadoop-env.sh</span> script
-          to do site-specific customization of the Hadoop daemons' process 
-          environment.</p>
-<p>At the very least you should specify the
-          <span class="codefrag">JAVA_HOME</span> so that it is correctly defined on each
-          remote node.</p>
-<p>Administrators can configure individual daemons using the
-          configuration options <span class="codefrag">HADOOP_*_OPTS</span>. Various options 
-          available are shown below in the table. </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-<th colspan="1" rowspan="1">Daemon</th><th colspan="1" rowspan="1">Configure Options</th>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">NameNode</td><td colspan="1" rowspan="1">HADOOP_NAMENODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">DataNode</td><td colspan="1" rowspan="1">HADOOP_DATANODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">SecondaryNamenode</td>
-              <td colspan="1" rowspan="1">HADOOP_SECONDARYNAMENODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">JobTracker</td><td colspan="1" rowspan="1">HADOOP_JOBTRACKER_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">TaskTracker</td><td colspan="1" rowspan="1">HADOOP_TASKTRACKER_OPTS</td>
-</tr>
-          
-</table>
-<p> For example, To configure Namenode to use parallelGC, the
-          following statement should be added in <span class="codefrag">hadoop-env.sh</span> :
-          <br>
-<span class="codefrag">
-          export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC ${HADOOP_NAMENODE_OPTS}"
-          </span>
-<br>
-</p>
-<p>Other useful configuration parameters that you can customize 
-          include:</p>
-<ul>
-            
-<li>
-              
-<span class="codefrag">HADOOP_LOG_DIR</span> - The directory where the daemons'
-              log files are stored. They are automatically created if they don't
-              exist.
-            </li>
-            
-<li>
-              
-<span class="codefrag">HADOOP_HEAPSIZE</span> - The maximum amount of heapsize 
-              to use, in MB e.g. <span class="codefrag">1000MB</span>. This is used to 
-              configure the heap size for the hadoop daemon. By default,
-              the value is <span class="codefrag">1000MB</span>.
-            </li>
-          
-</ul>
-<a name="N10130"></a><a name="Configuring+the+Hadoop+Daemons"></a>
-<h4>Configuring the Hadoop Daemons</h4>
-<p>This section deals with important parameters to be specified in the
-          <span class="codefrag">conf/hadoop-site.xml</span> for the Hadoop cluster.</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		    
-<tr>
-		      
-<th colspan="1" rowspan="1">Parameter</th>
-		      <th colspan="1" rowspan="1">Value</th> 
-		      <th colspan="1" rowspan="1">Notes</th>
-		    
-</tr>
-  		    
-<tr>
-		      
-<td colspan="1" rowspan="1">fs.default.name</td>
-  		      <td colspan="1" rowspan="1">URI of <span class="codefrag">NameNode</span>.</td>
-		      <td colspan="1" rowspan="1"><em>hdfs://hostname/</em></td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.job.tracker</td>
-		      <td colspan="1" rowspan="1">Host or IP and port of <span class="codefrag">JobTracker</span>.</td>
-		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.name.dir</td>
-		      <td colspan="1" rowspan="1">
-		        Path on the local filesystem where the <span class="codefrag">NameNode</span> 
-		        stores the namespace and transactions logs persistently.</td>
-		      <td colspan="1" rowspan="1">
-		        If this is a comma-delimited list of directories then the name 
-		        table is replicated in all of the directories, for redundancy.
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.data.dir</td>
-		      <td colspan="1" rowspan="1">
-		        Comma separated list of paths on the local filesystem of a 
-		        <span class="codefrag">DataNode</span> where it should store its blocks.
-		      </td>
-		      <td colspan="1" rowspan="1">
-		        If this is a comma-delimited list of directories, then data will 
-		        be stored in all named directories, typically on different 
-		        devices.
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.system.dir</td>
-		      <td colspan="1" rowspan="1">
-		        Path on the HDFS where where the Map/Reduce framework stores 
-		        system files e.g. <span class="codefrag">/hadoop/mapred/system/</span>.
-		      </td>
-		      <td colspan="1" rowspan="1">
-		        This is in the default filesystem (HDFS) and must be accessible 
-		        from both the server and client machines.
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.local.dir</td>
-		      <td colspan="1" rowspan="1">
-		        Comma-separated list of paths on the local filesystem where 
-		        temporary Map/Reduce data is written.
-		      </td>
-		      <td colspan="1" rowspan="1">Multiple paths help spread disk i/o.</td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.tasktracker.{map|reduce}.tasks.maximum</td>
-		      <td colspan="1" rowspan="1">
-		        The maximum number of Map/Reduce tasks, which are run 
-		        simultaneously on a given <span class="codefrag">TaskTracker</span>, individually.
-		      </td>
-		      <td colspan="1" rowspan="1">
-		        Defaults to 2 (2 maps and 2 reduces), but vary it depending on 
-		        your hardware.
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.hosts/dfs.hosts.exclude</td>
-		      <td colspan="1" rowspan="1">List of permitted/excluded DataNodes.</td>
-		      <td colspan="1" rowspan="1">
-		        If necessary, use these files to control the list of allowable 
-		        datanodes.
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.hosts/mapred.hosts.exclude</td>
-		      <td colspan="1" rowspan="1">List of permitted/excluded TaskTrackers.</td>
-		      <td colspan="1" rowspan="1">
-		        If necessary, use these files to control the list of allowable 
-		        TaskTrackers.
-		      </td>
-  		    
-</tr>
-        
-<tr>
-          
-<td colspan="1" rowspan="1">mapred.queue.names</td>
-          <td colspan="1" rowspan="1">Comma separated list of queues to which jobs can be submitted.</td>
-          <td colspan="1" rowspan="1">
-            The Map/Reduce system always supports atleast one queue
-            with the name as <em>default</em>. Hence, this parameter's
-            value should always contain the string <em>default</em>.
-            Some job schedulers supported in Hadoop, like the 
-            <a href="capacity_scheduler.html">Capacity 
-            Scheduler</a>, support multiple queues. If such a scheduler is
-            being used, the list of configured queue names must be
-            specified here. Once queues are defined, users can submit
-            jobs to a queue using the property name 
-            <em>mapred.job.queue.name</em> in the job configuration.
-            There could be a separate 
-            configuration file for configuring properties of these 
-            queues that is managed by the scheduler. 
-            Refer to the documentation of the scheduler for information on 
-            the same.
-          </td>
-        
-</tr>
-        
-<tr>
-          
-<td colspan="1" rowspan="1">mapred.acls.enabled</td>
-          <td colspan="1" rowspan="1">Specifies whether ACLs are supported for controlling job
-              submission and administration</td>
-          <td colspan="1" rowspan="1">
-            If <em>true</em>, ACLs would be checked while submitting
-            and administering jobs. ACLs can be specified using the
-            configuration parameters of the form
-            <em>mapred.queue.queue-name.acl-name</em>, defined below.
-          </td>
-        
-</tr>
-        
-<tr>
-          
-<td colspan="1" rowspan="1">mapred.queue.<em>queue-name</em>.acl-submit-job</td>
-          <td colspan="1" rowspan="1">List of users and groups that can submit jobs to the
-              specified <em>queue-name</em>.</td>
-          <td colspan="1" rowspan="1">
-            The list of users and groups are both comma separated
-            list of names. The two lists are separated by a blank.
-            Example: <em>user1,user2 group1,group2</em>.
-            If you wish to define only a list of groups, provide
-            a blank at the beginning of the value.
-          </td>
-        
-</tr>
-        
-<tr>
-          
-<td colspan="1" rowspan="1">mapred.queue.<em>queue-name</em>.acl-administer-job</td>
-          <td colspan="1" rowspan="1">List of users and groups that can change the priority
-              or kill jobs that have been submitted to the
-              specified <em>queue-name</em>.</td>
-          <td colspan="1" rowspan="1">
-            The list of users and groups are both comma separated
-            list of names. The two lists are separated by a blank.
-            Example: <em>user1,user2 group1,group2</em>.
-            If you wish to define only a list of groups, provide
-            a blank at the beginning of the value. Note that an
-            owner of a job can always change the priority or kill
-            his/her own job, irrespective of the ACLs.
-          </td>
-        
-</tr>
-		  
-</table>
-<p>Typically all the above parameters are marked as 
-          <a href="api/org/apache/hadoop/conf/Configuration.html#FinalParams">
-          final</a> to ensure that they cannot be overriden by user-applications.
-          </p>
-<a name="N1027C"></a><a name="Real-World+Cluster+Configurations"></a>
-<h5>Real-World Cluster Configurations</h5>
-<p>This section lists some non-default configuration parameters which 
-            have been used to run the <em>sort</em> benchmark on very large 
-            clusters.</p>
-<ul>
-              
-<li>
-                
-<p>Some non-default configuration values used to run sort900,
-                that is 9TB of data sorted on a cluster with 900 nodes:</p>
-                
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		          
-<tr>
-		            
-<th colspan="1" rowspan="1">Parameter</th>
-		            <th colspan="1" rowspan="1">Value</th> 
-		            <th colspan="1" rowspan="1">Notes</th>
-		          
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">dfs.block.size</td>
-                    <td colspan="1" rowspan="1">134217728</td>
-                    <td colspan="1" rowspan="1">HDFS blocksize of 128MB for large file-systems.</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">dfs.namenode.handler.count</td>
-                    <td colspan="1" rowspan="1">40</td>
-                    <td colspan="1" rowspan="1">
-                      More NameNode server threads to handle RPCs from large 
-                      number of DataNodes.
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
-                    <td colspan="1" rowspan="1">20</td>
-                    <td colspan="1" rowspan="1">
-                      Higher number of parallel copies run by reduces to fetch
-                      outputs from very large number of maps.
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.child.java.opts</td>
-                    <td colspan="1" rowspan="1">-Xmx512M</td>
-                    <td colspan="1" rowspan="1">
-                      Larger heap-size for child jvms of maps/reduces. 
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">fs.inmemory.size.mb</td>
-                    <td colspan="1" rowspan="1">200</td>
-                    <td colspan="1" rowspan="1">
-                      Larger amount of memory allocated for the in-memory 
-                      file-system used to merge map-outputs at the reduces.
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.sort.factor</td>
-                    <td colspan="1" rowspan="1">100</td>
-                    <td colspan="1" rowspan="1">More streams merged at once while sorting files.</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.sort.mb</td>
-                    <td colspan="1" rowspan="1">200</td>
-                    <td colspan="1" rowspan="1">Higher memory-limit while sorting data.</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.file.buffer.size</td>
-                    <td colspan="1" rowspan="1">131072</td>
-                    <td colspan="1" rowspan="1">Size of read/write buffer used in SequenceFiles.</td>
-                  
-</tr>
-                
-</table>
-              
-</li>
-              
-<li>
-                
-<p>Updates to some configuration values to run sort1400 and 
-                sort2000, that is 14TB of data sorted on 1400 nodes and 20TB of
-                data sorted on 2000 nodes:</p>
-                
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		          
-<tr>
-		            
-<th colspan="1" rowspan="1">Parameter</th>
-		            <th colspan="1" rowspan="1">Value</th> 
-		            <th colspan="1" rowspan="1">Notes</th>
-		          
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.job.tracker.handler.count</td>
-                    <td colspan="1" rowspan="1">60</td>
-                    <td colspan="1" rowspan="1">
-                      More JobTracker server threads to handle RPCs from large 
-                      number of TaskTrackers.
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
-                    <td colspan="1" rowspan="1">50</td>
-                    <td colspan="1" rowspan="1"></td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">tasktracker.http.threads</td>
-                    <td colspan="1" rowspan="1">50</td>
-                    <td colspan="1" rowspan="1">
-                      More worker threads for the TaskTracker's http server. The
-                      http server is used by reduces to fetch intermediate 
-                      map-outputs.
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.child.java.opts</td>
-                    <td colspan="1" rowspan="1">-Xmx1024M</td>
-                    <td colspan="1" rowspan="1">Larger heap-size for child jvms of maps/reduces.</td>
-                  
-</tr>
-                
-</table>
-              
-</li>
-            
-</ul>
-<a name="N1039A"></a><a name="Slaves"></a>
-<h4>Slaves</h4>
-<p>Typically you choose one machine in the cluster to act as the 
-          <span class="codefrag">NameNode</span> and one machine as to act as the 
-          <span class="codefrag">JobTracker</span>, exclusively. The rest of the machines act as 
-          both a <span class="codefrag">DataNode</span> and <span class="codefrag">TaskTracker</span> and are 
-          referred to as <em>slaves</em>.</p>
-<p>List all slave hostnames or IP addresses in your 
-          <span class="codefrag">conf/slaves</span> file, one per line.</p>
-<a name="N103B9"></a><a name="Logging"></a>
-<h4>Logging</h4>
-<p>Hadoop uses the <a href="http://logging.apache.org/log4j/">Apache 
-          log4j</a> via the <a href="http://commons.apache.org/logging/">Apache 
-          Commons Logging</a> framework for logging. Edit the 
-          <span class="codefrag">conf/log4j.properties</span> file to customize the Hadoop 
-          daemons' logging configuration (log-formats and so on).</p>
-<a name="N103CD"></a><a name="History+Logging"></a>
-<h5>History Logging</h5>
-<p> The job history files are stored in central location 
-            <span class="codefrag"> hadoop.job.history.location </span> which can be on DFS also,
-            whose default value is <span class="codefrag">${HADOOP_LOG_DIR}/history</span>. 
-            The history web UI is accessible from job tracker web UI.</p>
-<p> The history files are also logged to user specified directory
-            <span class="codefrag">hadoop.job.history.user.location</span> 
-            which defaults to job output directory. The files are stored in
-            "_logs/history/" in the specified directory. Hence, by default 
-            they will be in "mapred.output.dir/_logs/history/". User can stop
-            logging by giving the value <span class="codefrag">none</span> for 
-            <span class="codefrag">hadoop.job.history.user.location</span> 
-</p>
-<p> User can view the history logs summary in specified directory 
-            using the following command <br>
-            
-<span class="codefrag">$ bin/hadoop job -history output-dir</span>
-<br> 
-            This command will print job details, failed and killed tip
-            details. <br>
-            More details about the job such as successful tasks and 
-            task attempts made for each task can be viewed using the  
-            following command <br>
-            
-<span class="codefrag">$ bin/hadoop job -history all output-dir</span>
-<br>
-</p>
-<p>Once all the necessary configuration is complete, distribute the files
-      to the <span class="codefrag">HADOOP_CONF_DIR</span> directory on all the machines, 
-      typically <span class="codefrag">${HADOOP_HOME}/conf</span>.</p>
-</div>
-    
-<a name="N10405"></a><a name="Cluster+Restartability"></a>
-<h2 class="h3">Cluster Restartability</h2>
-<div class="section">
-<a name="N1040B"></a><a name="Map%2FReduce"></a>
-<h3 class="h4">Map/Reduce</h3>
-<p>The job tracker restart can recover running jobs if 
-        <span class="codefrag">mapred.jobtracker.restart.recover</span> is set true and 
-        <a href="#Logging">JobHistory logging</a> is enabled. Also 
-        <span class="codefrag">mapred.jobtracker.job.history.block.size</span> value should be 
-        set to an optimal value to dump job history to disk as soon as 
-        possible, the typical value is 3145728(3MB).</p>
-</div>
-    
-    
-<a name="N10420"></a><a name="Hadoop+Rack+Awareness"></a>
-<h2 class="h3">Hadoop Rack Awareness</h2>
-<div class="section">
-<p>The HDFS and the Map/Reduce components are rack-aware.</p>
-<p>The <span class="codefrag">NameNode</span> and the <span class="codefrag">JobTracker</span> obtains the
-      <span class="codefrag">rack id</span> of the slaves in the cluster by invoking an API 
-      <a href="api/org/apache/hadoop/net/DNSToSwitchMapping.html#resolve(java.util.List)">resolve</a> in an administrator configured
-      module. The API resolves the slave's DNS name (also IP address) to a 
-      rack id. What module to use can be configured using the configuration
-      item <span class="codefrag">topology.node.switch.mapping.impl</span>. The default 
-      implementation of the same runs a script/command configured using 
-      <span class="codefrag">topology.script.file.name</span>. If topology.script.file.name is
-      not set, the rack id <span class="codefrag">/default-rack</span> is returned for any 
-      passed IP address. The additional configuration in the Map/Reduce
-      part is <span class="codefrag">mapred.cache.task.levels</span> which determines the number
-      of levels (in the network topology) of caches. So, for example, if it is
-      the default value of 2, two levels of caches will be constructed - 
-      one for hosts (host -&gt; task mapping) and another for racks 
-      (rack -&gt; task mapping).
-      </p>
-</div>
-    
-    
-<a name="N10446"></a><a name="Hadoop+Startup"></a>
-<h2 class="h3">Hadoop Startup</h2>
-<div class="section">
-<p>To start a Hadoop cluster you will need to start both the HDFS and 
-      Map/Reduce cluster.</p>
-<p>
-        Format a new distributed filesystem:<br>
-        
-<span class="codefrag">$ bin/hadoop namenode -format</span>
-      
-</p>
-<p>
-        Start the HDFS with the following command, run on the designated
-        <span class="codefrag">NameNode</span>:<br>
-        
-<span class="codefrag">$ bin/start-dfs.sh</span>
-      
-</p>
-<p>The <span class="codefrag">bin/start-dfs.sh</span> script also consults the 
-      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">NameNode</span> 
-      and starts the <span class="codefrag">DataNode</span> daemon on all the listed slaves.</p>
-<p>
-        Start Map-Reduce with the following command, run on the designated
-        <span class="codefrag">JobTracker</span>:<br>
-        
-<span class="codefrag">$ bin/start-mapred.sh</span>
-      
-</p>
-<p>The <span class="codefrag">bin/start-mapred.sh</span> script also consults the 
-      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">JobTracker</span> 
-      and starts the <span class="codefrag">TaskTracker</span> daemon on all the listed slaves.
-      </p>
-</div>
-    
-    
-<a name="N1048C"></a><a name="Hadoop+Shutdown"></a>
-<h2 class="h3">Hadoop Shutdown</h2>
-<div class="section">
-<p>
-        Stop HDFS with the following command, run on the designated 
-        <span class="codefrag">NameNode</span>:<br>
-        
-<span class="codefrag">$ bin/stop-dfs.sh</span>
-      
-</p>
-<p>The <span class="codefrag">bin/stop-dfs.sh</span> script also consults the 
-      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">NameNode</span> 
-      and stops the <span class="codefrag">DataNode</span> daemon on all the listed slaves.</p>
-<p>
-        Stop Map/Reduce with the following command, run on the designated
-        the designated <span class="codefrag">JobTracker</span>:<br>
-        
-<span class="codefrag">$ bin/stop-mapred.sh</span>
-<br>
-      
-</p>
-<p>The <span class="codefrag">bin/stop-mapred.sh</span> script also consults the 
-      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">JobTracker</span> 
-      and stops the <span class="codefrag">TaskTracker</span> daemon on all the listed slaves.</p>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2008 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 151
docs/cluster_setup.pdf


+ 0 - 2
docs/cn/broken-links.xml

@@ -1,2 +0,0 @@
-<broken-links>
-</broken-links>

+ 0 - 730
docs/cn/cluster_setup.html

@@ -1,730 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop集群搭建</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">集群搭建</div>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="cluster_setup.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop集群搭建</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%9B%AE%E7%9A%84">目的</a>
-</li>
-<li>
-<a href="#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6">先决条件</a>
-</li>
-<li>
-<a href="#%E5%AE%89%E8%A3%85">安装</a>
-</li>
-<li>
-<a href="#%E9%85%8D%E7%BD%AE">配置</a>
-<ul class="minitoc">
-<li>
-<a href="#%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6">配置文件</a>
-</li>
-<li>
-<a href="#%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE">集群配置</a>
-<ul class="minitoc">
-<li>
-<a href="#%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83">配置Hadoop守护进程的运行环境</a>
-</li>
-<li>
-<a href="#%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E5%8F%82%E6%95%B0">配置Hadoop守护进程的运行参数</a>
-</li>
-<li>
-<a href="#Slaves">Slaves</a>
-</li>
-<li>
-<a href="#%E6%97%A5%E5%BF%97">日志</a>
-</li>
-</ul>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Hadoop%E7%9A%84%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5">Hadoop的机架感知</a>
-</li>
-<li>
-<a href="#%E5%90%AF%E5%8A%A8Hadoop">启动Hadoop</a>
-</li>
-<li>
-<a href="#%E5%81%9C%E6%AD%A2Hadoop">停止Hadoop</a>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="%E7%9B%AE%E7%9A%84"></a>
-<h2 class="h3">目的</h2>
-<div class="section">
-<p>本文描述了如何安装、配置和管理有实际意义的Hadoop集群,其规模可从几个节点的小集群到几千个节点的超大集群。</p>
-<p>如果你希望在单机上安装Hadoop玩玩,从<a href="quickstart.html">这里</a>能找到相关细节。</p>
-</div>
-    
-    
-<a name="N1001E"></a><a name="%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"></a>
-<h2 class="h3">先决条件</h2>
-<div class="section">
-<ol>
-        
-<li>
-          确保在你集群中的每个节点上都安装了所有<a href="quickstart.html#PreReqs">必需</a>软件。
-        </li>
-        
-<li>
-          
-<a href="quickstart.html#%E4%B8%8B%E8%BD%BD">获取</a>Hadoop软件包。
-        </li>
-      
-</ol>
-</div>
-    
-    
-<a name="N10036"></a><a name="%E5%AE%89%E8%A3%85"></a>
-<h2 class="h3">安装</h2>
-<div class="section">
-<p>安装Hadoop集群通常要将安装软件解压到集群内的所有机器上。</p>
-<p>通常,集群里的一台机器被指定为 
-	 <span class="codefrag">NameNode</span>,另一台不同的机器被指定为<span class="codefrag">JobTracker</span>。这些机器是<em>masters</em>。余下的机器即作为<span class="codefrag">DataNode</span><em>也</em>作为<span class="codefrag">TaskTracker</span>。这些机器是<em>slaves</em>。</p>
-<p>我们用<span class="codefrag">HADOOP_HOME</span>指代安装的根路径。通常,集群里的所有机器的<span class="codefrag">HADOOP_HOME</span>路径相同。</p>
-</div>
-    
-    
-<a name="N10060"></a><a name="%E9%85%8D%E7%BD%AE"></a>
-<h2 class="h3">配置</h2>
-<div class="section">
-<p>接下来的几节描述了如何配置Hadoop集群。</p>
-<a name="N10069"></a><a name="%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6"></a>
-<h3 class="h4">配置文件</h3>
-<p>对Hadoop的配置通过<span class="codefrag">conf/</span>目录下的两个重要配置文件完成:</p>
-<ol>
-          
-<li>
-            
-<a href="http://hadoop.apache.org/core/docs/current/hadoop-default.html">hadoop-default.xml</a> - 只读的默认配置。
-          </li>
-          
-<li>
-            
-<em>hadoop-site.xml</em> - 集群特有的配置。
-          </li>
-        
-</ol>
-<p>要了解更多关于这些配置文件如何影响Hadoop框架的细节,请看<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html">这里</a>。</p>
-<p>此外,通过设置<span class="codefrag">conf/hadoop-env.sh</span>中的变量为集群特有的值,你可以对<span class="codefrag">bin/</span>目录下的Hadoop脚本进行控制。</p>
-<a name="N10096"></a><a name="%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE"></a>
-<h3 class="h4">集群配置</h3>
-<p>要配置Hadoop集群,你需要设置Hadoop守护进程的<em>运行环境</em>和Hadoop守护进程的<em>运行参数</em>。</p>
-<p>Hadoop守护进程指<span class="codefrag">NameNode</span>/<span class="codefrag">DataNode</span> 
-        和<span class="codefrag">JobTracker</span>/<span class="codefrag">TaskTracker</span>。</p>
-<a name="N100B4"></a><a name="%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83"></a>
-<h4>配置Hadoop守护进程的运行环境</h4>
-<p>管理员可在<span class="codefrag">conf/hadoop-env.sh</span>脚本内对Hadoop守护进程的运行环境做特别指定。</p>
-<p>至少,你得设定<span class="codefrag">JAVA_HOME</span>使之在每一远端节点上都被正确设置。</p>
-<p>管理员可以通过配置选项<span class="codefrag">HADOOP_*_OPTS</span>来分别配置各个守护进程。
-          下表是可以配置的选项。
-          </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-<th colspan="1" rowspan="1">守护进程</th><th colspan="1" rowspan="1">配置选项</th>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">NameNode</td><td colspan="1" rowspan="1">HADOOP_NAMENODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">DataNode</td><td colspan="1" rowspan="1">HADOOP_DATANODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">SecondaryNamenode</td>
-              <td colspan="1" rowspan="1">HADOOP_SECONDARYNAMENODE_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">JobTracker</td><td colspan="1" rowspan="1">HADOOP_JOBTRACKER_OPTS</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">TaskTracker</td><td colspan="1" rowspan="1">HADOOP_TASKTRACKER_OPTS</td>
-</tr>
-          
-</table>
-<p>例如,配置Namenode时,为了使其能够并行回收垃圾(parallelGC),
-          要把下面的代码加入到<span class="codefrag">hadoop-env.sh</span> :
-          <br>
-<span class="codefrag">
-          export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC ${HADOOP_NAMENODE_OPTS}"
-          </span>
-<br>
-</p>
-<p>其它可定制的常用参数还包括:</p>
-<ul>
-            
-<li>
-              
-<span class="codefrag">HADOOP_LOG_DIR</span> - 守护进程日志文件的存放目录。如果不存在会被自动创建。
-            </li>
-            
-<li>
-              
-<span class="codefrag">HADOOP_HEAPSIZE</span> - 最大可用的堆大小,单位为MB。比如,<span class="codefrag">1000MB</span>。
-              这个参数用于设置hadoop守护进程的堆大小。缺省大小是<span class="codefrag">1000MB</span>。
-            </li>
-          
-</ul>
-<a name="N1012F"></a><a name="%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E5%8F%82%E6%95%B0"></a>
-<h4>配置Hadoop守护进程的运行参数</h4>
-<p>这部分涉及Hadoop集群的重要参数,这些参数在<span class="codefrag">conf/hadoop-site.xml</span>中指定。</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		    
-<tr>
-		      
-<th colspan="1" rowspan="1">参数</th>
-		      <th colspan="1" rowspan="1">取值</th> 
-		      <th colspan="1" rowspan="1">备注</th>
-		    
-</tr>
-  		    
-<tr>
-		      
-<td colspan="1" rowspan="1">fs.default.name</td>
-                       <td colspan="1" rowspan="1"><span class="codefrag">NameNode</span>的URI。</td>
-                       <td colspan="1" rowspan="1"><em>hdfs://主机名/</em></td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.job.tracker</td>
-		      <td colspan="1" rowspan="1"><span class="codefrag">JobTracker</span>的主机(或者IP)和端口。</td>
-		      <td colspan="1" rowspan="1"><em>主机:端口</em>。</td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.name.dir</td>
-		      <td colspan="1" rowspan="1">
-		        <span class="codefrag">NameNode</span>持久存储名字空间及事务日志的本地文件系统路径。</td>
-		      <td colspan="1" rowspan="1">当这个值是一个逗号分割的目录列表时,nametable数据将会被复制到所有目录中做冗余备份。
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.data.dir</td>
-		      <td colspan="1" rowspan="1"> 
-		        <span class="codefrag">DataNode</span>存放块数据的本地文件系统路径,逗号分割的列表。
-		      </td>
-		      <td colspan="1" rowspan="1">
-		        当这个值是逗号分割的目录列表时,数据将被存储在所有目录下,通常分布在不同设备上。
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.system.dir</td>
-		      <td colspan="1" rowspan="1">Map/Reduce框架存储系统文件的HDFS路径。比如<span class="codefrag">/hadoop/mapred/system/</span>。
-		      </td>
-		      <td colspan="1" rowspan="1">这个路径是默认文件系统(HDFS)下的路径, 须从服务器和客户端上均可访问。
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.local.dir</td>
-		      <td colspan="1" rowspan="1">本地文件系统下逗号分割的路径列表,Map/Reduce临时数据存放的地方。
-		      </td>
-		      <td colspan="1" rowspan="1">多路径有助于利用磁盘i/o。</td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.tasktracker.{map|reduce}.tasks.maximum</td>
-		      <td colspan="1" rowspan="1">某一<span class="codefrag">TaskTracker</span>上可运行的最大Map/Reduce任务数,这些任务将同时各自运行。
-		      </td>
-		      <td colspan="1" rowspan="1">
-		        默认为2(2个map和2个reduce),可依据硬件情况更改。
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">dfs.hosts/dfs.hosts.exclude</td>
-		      <td colspan="1" rowspan="1">许可/拒绝DataNode列表。</td>
-		      <td colspan="1" rowspan="1">
-		        如有必要,用这个文件控制许可的datanode列表。
-		      </td>
-		    
-</tr>
-		    
-<tr>
-		      
-<td colspan="1" rowspan="1">mapred.hosts/mapred.hosts.exclude</td>
-		      <td colspan="1" rowspan="1">许可/拒绝TaskTracker列表。</td>
-		      <td colspan="1" rowspan="1">
-		        如有必要,用这个文件控制许可的TaskTracker列表。
-		      </td>
-  		    
-</tr>
-		  
-</table>
-<p>通常,上述参数被标记为 
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html#FinalParams">
-          final</a> 以确保它们不被用户应用更改。
-          </p>
-<a name="N1020C"></a><a name="%E7%8E%B0%E5%AE%9E%E4%B8%96%E7%95%8C%E7%9A%84%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE"></a>
-<h5>现实世界的集群配置</h5>
-<p>这节罗列在大规模集群上运行<em>sort</em>基准测试(benchmark)时使用到的一些非缺省配置。</p>
-<ul>
-              
-<li>
-                
-<p>运行sort900的一些非缺省配置值,sort900即在900个节点的集群上对9TB的数据进行排序:</p>
-                
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		          
-<tr>
-		            
-<th colspan="1" rowspan="1">参数</th>
-		            <th colspan="1" rowspan="1">取值</th> 
-		            <th colspan="1" rowspan="1">备注</th>
-		          
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">dfs.block.size</td>
-                    <td colspan="1" rowspan="1">134217728</td>
-                    <td colspan="1" rowspan="1">针对大文件系统,HDFS的块大小取128MB。</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">dfs.namenode.handler.count</td>
-                    <td colspan="1" rowspan="1">40</td>
-                    <td colspan="1" rowspan="1">
-                      启动更多的NameNode服务线程去处理来自大量DataNode的RPC请求。
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
-                    <td colspan="1" rowspan="1">20</td>
-                    <td colspan="1" rowspan="1">
-			reduce启动更多的并行拷贝器以获取大量map的输出。
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.child.java.opts</td>
-                    <td colspan="1" rowspan="1">-Xmx512M</td>
-                    <td colspan="1" rowspan="1">
-			为map/reduce子虚拟机使用更大的堆。 
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">fs.inmemory.size.mb</td>
-                    <td colspan="1" rowspan="1">200</td>
-                    <td colspan="1" rowspan="1">
-                      为reduce阶段合并map输出所需的内存文件系统分配更多的内存。
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.sort.factor</td>
-                    <td colspan="1" rowspan="1">100</td>
-                    <td colspan="1" rowspan="1">文件排序时更多的流将同时被归并。</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.sort.mb</td>
-                    <td colspan="1" rowspan="1">200</td>
-                    <td colspan="1" rowspan="1">提高排序时的内存上限。</td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">io.file.buffer.size</td>
-                    <td colspan="1" rowspan="1">131072</td>
-                    <td colspan="1" rowspan="1">SequenceFile中用到的读/写缓存大小。</td>
-                  
-</tr>
-                
-</table>
-              
-</li>
-              
-<li>
-                
-<p>运行sort1400和sort2000时需要更新的配置,即在1400个节点上对14TB的数据进行排序和在2000个节点上对20TB的数据进行排序:</p>
-                
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-  		          
-<tr>
-		            
-<th colspan="1" rowspan="1">参数</th>
-		            <th colspan="1" rowspan="1">取值</th> 
-		            <th colspan="1" rowspan="1">备注</th>
-		          
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.job.tracker.handler.count</td>
-                    <td colspan="1" rowspan="1">60</td>
-                    <td colspan="1" rowspan="1">
-                      启用更多的JobTracker服务线程去处理来自大量TaskTracker的RPC请求。
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
-                    <td colspan="1" rowspan="1">50</td>
-                    <td colspan="1" rowspan="1"></td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">tasktracker.http.threads</td>
-                    <td colspan="1" rowspan="1">50</td>
-                    <td colspan="1" rowspan="1">
-                      为TaskTracker的Http服务启用更多的工作线程。reduce通过Http服务获取map的中间输出。
-                    </td>
-                  
-</tr>
-                  
-<tr>
-                    
-<td colspan="1" rowspan="1">mapred.child.java.opts</td>
-                    <td colspan="1" rowspan="1">-Xmx1024M</td>
-                    <td colspan="1" rowspan="1">使用更大的堆用于maps/reduces的子虚拟机</td>
-                  
-</tr>
-                
-</table>
-              
-</li>
-            
-</ul>
-<a name="N1032A"></a><a name="Slaves"></a>
-<h4>Slaves</h4>
-<p>通常,你选择集群中的一台机器作为<span class="codefrag">NameNode</span>,另外一台不同的机器作为<span class="codefrag">JobTracker</span>。余下的机器即作为<span class="codefrag">DataNode</span>又作为<span class="codefrag">TaskTracker</span>,这些被称之为<em>slaves</em>。</p>
-<p>在<span class="codefrag">conf/slaves</span>文件中列出所有slave的主机名或者IP地址,一行一个。</p>
-<a name="N10349"></a><a name="%E6%97%A5%E5%BF%97"></a>
-<h4>日志</h4>
-<p>Hadoop使用<a href="http://logging.apache.org/log4j/">Apache log4j</a>来记录日志,它由<a href="http://commons.apache.org/logging/">Apache Commons Logging</a>框架来实现。编辑<span class="codefrag">conf/log4j.properties</span>文件可以改变Hadoop守护进程的日志配置(日志格式等)。</p>
-<a name="N1035D"></a><a name="%E5%8E%86%E5%8F%B2%E6%97%A5%E5%BF%97"></a>
-<h5>历史日志</h5>
-<p>作业的历史文件集中存放在<span class="codefrag">hadoop.job.history.location</span>,这个也可以是在分布式文件系统下的路径,其默认值为<span class="codefrag">${HADOOP_LOG_DIR}/history</span>。jobtracker的web UI上有历史日志的web UI链接。</p>
-<p>历史文件在用户指定的目录<span class="codefrag">hadoop.job.history.user.location</span>也会记录一份,这个配置的缺省值为作业的输出目录。这些文件被存放在指定路径下的&ldquo;_logs/history/&rdquo;目录中。因此,默认情况下日志文件会在&ldquo;mapred.output.dir/_logs/history/&rdquo;下。如果将<span class="codefrag">hadoop.job.history.user.location</span>指定为值<span class="codefrag">none</span>,系统将不再记录此日志。</p>
-<p>用户可使用以下命令在指定路径下查看历史日志汇总<br>
-            
-<span class="codefrag">$ bin/hadoop job -history output-dir</span>
-<br> 
-            这条命令会显示作业的细节信息,失败和终止的任务细节。 <br>
-            关于作业的更多细节,比如成功的任务,以及对每个任务的所做的尝试次数等可以用下面的命令查看<br>
-            
-<span class="codefrag">$ bin/hadoop job -history all output-dir</span>
-<br>
-</p>
-<p>一但全部必要的配置完成,将这些文件分发到所有机器的<span class="codefrag">HADOOP_CONF_DIR</span>路径下,通常是<span class="codefrag">${HADOOP_HOME}/conf</span>。</p>
-</div>
-    
-    
-<a name="N10395"></a><a name="Hadoop%E7%9A%84%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5"></a>
-<h2 class="h3">Hadoop的机架感知</h2>
-<div class="section">
-<p>HDFS和Map/Reduce的组件是能够感知机架的。</p>
-<p>
-<span class="codefrag">NameNode</span>和<span class="codefrag">JobTracker</span>通过调用管理员配置模块中的API<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/net/DNSToSwitchMapping.html#resolve(java.util.List)">resolve</a>来获取集群里每个slave的<span class="codefrag">机架id</span>。该API将slave的DNS名称(或者IP地址)转换成机架id。使用哪个模块是通过配置项<span class="codefrag">topology.node.switch.mapping.impl</span>来指定的。模块的默认实现会调用<span class="codefrag">topology.script.file.name</span>配置项指定的一个的脚本/命令。 如果topology.script.file.name未被设置,对于所有传入的IP地址,模块会返回<span class="codefrag">/default-rack</span>作为机架id。在Map/Reduce部分还有一个额外的配置项<span class="codefrag">mapred.cache.task.levels</span>,该参数决定cache的级数(在网络拓扑中)。例如,如果默认值是2,会建立两级的cache- 一级针对主机(主机 -&gt; 任务的映射)另一级针对机架(机架 -&gt; 任务的映射)。
-      </p>
-</div>
-    
-    
-<a name="N103BA"></a><a name="%E5%90%AF%E5%8A%A8Hadoop"></a>
-<h2 class="h3">启动Hadoop</h2>
-<div class="section">
-<p>启动Hadoop集群需要启动HDFS集群和Map/Reduce集群。</p>
-<p>
-        格式化一个新的分布式文件系统:<br>
-        
-<span class="codefrag">$ bin/hadoop namenode -format</span>
-      
-</p>
-<p>
-	在分配的<span class="codefrag">NameNode</span>上,运行下面的命令启动HDFS:<br>
-        
-<span class="codefrag">$ bin/start-dfs.sh</span>
-      
-</p>
-<p>
-<span class="codefrag">bin/start-dfs.sh</span>脚本会参照<span class="codefrag">NameNode</span>上<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>文件的内容,在所有列出的slave上启动<span class="codefrag">DataNode</span>守护进程。</p>
-<p>
-	在分配的<span class="codefrag">JobTracker</span>上,运行下面的命令启动Map/Reduce:<br>
-        
-<span class="codefrag">$ bin/start-mapred.sh</span>
-      
-</p>
-<p>
-<span class="codefrag">bin/start-mapred.sh</span>脚本会参照<span class="codefrag">JobTracker</span>上<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>文件的内容,在所有列出的slave上启动<span class="codefrag">TaskTracker</span>守护进程。</p>
-</div>
-    
-    
-<a name="N103FE"></a><a name="%E5%81%9C%E6%AD%A2Hadoop"></a>
-<h2 class="h3">停止Hadoop</h2>
-<div class="section">
-<p>
-	在分配的<span class="codefrag">NameNode</span>上,执行下面的命令停止HDFS:<br>
-        
-<span class="codefrag">$ bin/stop-dfs.sh</span>
-      
-</p>
-<p>
-<span class="codefrag">bin/stop-dfs.sh</span>脚本会参照<span class="codefrag">NameNode</span>上<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>文件的内容,在所有列出的slave上停止<span class="codefrag">DataNode</span>守护进程。</p>
-<p>
-	在分配的<span class="codefrag">JobTracker</span>上,运行下面的命令停止Map/Reduce:<br>
-        
-<span class="codefrag">$ bin/stop-mapred.sh</span>
-<br>
-      
-</p>
-<p>
-<span class="codefrag">bin/stop-mapred.sh</span>脚本会参照<span class="codefrag">JobTracker</span>上<span class="codefrag">${HADOOP_CONF_DIR}/slaves</span>文件的内容,在所有列出的slave上停止<span class="codefrag">TaskTracker</span>守护进程。</p>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 209
docs/cn/cluster_setup.pdf


+ 0 - 1116
docs/cn/commands_manual.html

@@ -1,1116 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>命令手册</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">命令手册</div>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="commands_manual.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>命令手册</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E6%A6%82%E8%BF%B0">概述</a>
-<ul class="minitoc">
-<li>
-<a href="#%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9">常规选项</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E7%94%A8%E6%88%B7%E5%91%BD%E4%BB%A4"> 用户命令 </a>
-<ul class="minitoc">
-<li>
-<a href="#archive"> archive </a>
-</li>
-<li>
-<a href="#distcp"> distcp </a>
-</li>
-<li>
-<a href="#fs"> fs </a>
-</li>
-<li>
-<a href="#fsck"> fsck </a>
-</li>
-<li>
-<a href="#jar"> jar </a>
-</li>
-<li>
-<a href="#job"> job </a>
-</li>
-<li>
-<a href="#pipes"> pipes </a>
-</li>
-<li>
-<a href="#version"> version </a>
-</li>
-<li>
-<a href="#CLASSNAME"> CLASSNAME </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E7%AE%A1%E7%90%86%E5%91%BD%E4%BB%A4">管理命令</a>
-<ul class="minitoc">
-<li>
-<a href="#balancer"> balancer </a>
-</li>
-<li>
-<a href="#daemonlog"> daemonlog </a>
-</li>
-<li>
-<a href="#datanode"> datanode</a>
-</li>
-<li>
-<a href="#dfsadmin"> dfsadmin </a>
-</li>
-<li>
-<a href="#jobtracker"> jobtracker </a>
-</li>
-<li>
-<a href="#namenode"> namenode </a>
-</li>
-<li>
-<a href="#secondarynamenode"> secondarynamenode </a>
-</li>
-<li>
-<a href="#tasktracker"> tasktracker </a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-		
-<a name="N1000D"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3">概述</h2>
-<div class="section">
-<p>
-				所有的hadoop命令均由bin/hadoop脚本引发。不指定参数运行hadoop脚本会打印所有命令的描述。
-			</p>
-<p>
-				
-<span class="codefrag">用法:hadoop [--config confdir] [COMMAND] [GENERIC_OPTIONS] [COMMAND_OPTIONS]</span>
-			
-</p>
-<p>
-				Hadoop有一个选项解析框架用于解析一般的选项和运行类。
-			</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述 </th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">--config confdir</span></td>
-			            <td colspan="1" rowspan="1">覆盖缺省配置目录。缺省是${HADOOP_HOME}/conf。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">GENERIC_OPTIONS</span></td>
-			            <td colspan="1" rowspan="1">多个命令都支持的通用选项。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">COMMAND</span>
-<br>
-<span class="codefrag">命令选项S</span></td>
-			            <td colspan="1" rowspan="1">各种各样的命令和它们的选项会在下面提到。这些命令被分为
-			             <a href="commands_manual.html#%E7%94%A8%E6%88%B7%E5%91%BD%E4%BB%A4">用户命令</a> 
-			             <a href="commands_manual.html#%E7%AE%A1%E7%90%86%E5%91%BD%E4%BB%A4">管理命令</a>两组。</td>
-			           
-</tr>
-			     
-</table>
-<a name="N10061"></a><a name="%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9"></a>
-<h3 class="h4">常规选项</h3>
-<p>
-				  下面的选项被
-				  <a href="commands_manual.html#dfsadmin">dfsadmin</a>, 
-				  <a href="commands_manual.html#fs">fs</a>, <a href="commands_manual.html#fsck">fsck</a>和 
-				  <a href="commands_manual.html#job">job</a>支持。 
-				  应用程序要实现
-				  <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/util/Tool.html">Tool</a>来支持
-				  <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/util/GenericOptionsParser.html">
-				  常规选项</a>。
-				</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> GENERIC_OPTION </th><th colspan="1" rowspan="1"> 描述 </th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-conf &lt;configuration file&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定应用程序的配置文件。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-D &lt;property=value&gt;</span></td>
-			            <td colspan="1" rowspan="1">为指定property指定值value。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-fs &lt;local|namenode:port&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定namenode。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-jt &lt;local|jobtracker:port&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定job tracker。只适用于<a href="commands_manual.html#job">job</a>。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-files &lt;逗号分隔的文件列表&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定要拷贝到map reduce集群的文件的逗号分隔的列表。
-			            只适用于<a href="commands_manual.html#job">job</a>。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-libjars &lt;逗号分隔的jar列表&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定要包含到classpath中的jar文件的逗号分隔的列表。
-			            只适用于<a href="commands_manual.html#job">job</a>。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-archives &lt;逗号分隔的archive列表&gt;</span></td>
-			            <td colspan="1" rowspan="1">指定要被解压到计算节点上的档案文件的逗号分割的列表。
-			            只适用于<a href="commands_manual.html#job">job</a>。</td>
-			           
-</tr>
-				
-</table>
-</div>
-		
-		
-<a name="N10103"></a><a name="%E7%94%A8%E6%88%B7%E5%91%BD%E4%BB%A4"></a>
-<h2 class="h3"> 用户命令 </h2>
-<div class="section">
-<p>hadoop集群用户的常用命令。</p>
-<a name="N1010C"></a><a name="archive"></a>
-<h3 class="h4"> archive </h3>
-<p>
-					创建一个hadoop档案文件。参考 <a href="hadoop_archives.html">Hadoop Archives</a>.
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop archive -archiveName NAME &lt;src&gt;* &lt;dest&gt;</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-					   
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-archiveName NAME</span></td>
-			            <td colspan="1" rowspan="1">要创建的档案的名字。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">src</span></td>
-			            <td colspan="1" rowspan="1">文件系统的路径名,和通常含正则表达的一样。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">dest</span></td>
-			            <td colspan="1" rowspan="1">保存档案文件的目标目录。</td>
-			           
-</tr>
-			     
-</table>
-<a name="N10157"></a><a name="distcp"></a>
-<h3 class="h4"> distcp </h3>
-<p>
-					递归地拷贝文件或目录。参考<a href="distcp.html">DistCp指南</a>以获取等多信息。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop distcp &lt;srcurl&gt; &lt;desturl&gt;</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">srcurl</span></td>
-			            <td colspan="1" rowspan="1">源Url</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">desturl</span></td>
-			            <td colspan="1" rowspan="1">目标Url</td>
-			           
-</tr>
-			     
-</table>
-<a name="N10194"></a><a name="fs"></a>
-<h3 class="h4"> fs </h3>
-<p>
-					
-<span class="codefrag">用法:hadoop fs [</span><a href="commands_manual.html#%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9">GENERIC_OPTIONS</a><span class="codefrag">] 
-					[COMMAND_OPTIONS]</span>
-				
-</p>
-<p>
-					运行一个常规的文件系统客户端。
-				</p>
-<p>
-					各种命令选项可以参考<a href="hdfs_shell.html">HDFS Shell指南</a>。
-				</p>
-<a name="N101B0"></a><a name="fsck"></a>
-<h3 class="h4"> fsck </h3>
-<p>
-					运行HDFS文件系统检查工具。参考<a href="hdfs_user_guide.html#fsck">Fsck</a>了解更多。
-				</p>
-<p>
-<span class="codefrag">用法:hadoop fsck [</span><a href="commands_manual.html#%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9">GENERIC_OPTIONS</a><span class="codefrag">] 
-				&lt;path&gt; [-move | -delete | -openforwrite] [-files [-blocks 
-				[-locations | -racks]]]</span>
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述 </th>
-</tr>
-			          
-<tr>
-			            
-<td colspan="1" rowspan="1"><span class="codefrag">&lt;path&gt;</span></td>
-			            <td colspan="1" rowspan="1">检查的起始目录。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-move</span></td>
-			            <td colspan="1" rowspan="1">移动受损文件到/lost+found</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-delete</span></td>
-			            <td colspan="1" rowspan="1">删除受损文件。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-openforwrite</span></td>
-			            <td colspan="1" rowspan="1">打印出写打开的文件。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-files</span></td>
-			            <td colspan="1" rowspan="1">打印出正被检查的文件。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-blocks</span></td>
-			            <td colspan="1" rowspan="1">打印出块信息报告。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-locations</span></td>
-			            <td colspan="1" rowspan="1">打印出每个块的位置信息。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-racks</span></td>
-			            <td colspan="1" rowspan="1">打印出data-node的网络拓扑结构。</td>
-			           
-</tr>
-					
-</table>
-<a name="N10244"></a><a name="jar"></a>
-<h3 class="h4"> jar </h3>
-<p>
-					运行jar文件。用户可以把他们的Map Reduce代码捆绑到jar文件中,使用这个命令执行。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop jar &lt;jar&gt; [mainClass] args...</span>
-				
-</p>
-<p>
-					streaming作业是通过这个命令执行的。参考<a href="streaming.html#%E5%85%B6%E4%BB%96%E4%BE%8B%E5%AD%90">Streaming examples</a>中的例子。
-				</p>
-<p>
-					Word count例子也是通过jar命令运行的。参考<a href="mapred_tutorial.html#%E7%94%A8%E6%B3%95">Wordcount example</a>。
-				</p>
-<a name="N10262"></a><a name="job"></a>
-<h3 class="h4"> job </h3>
-<p>
-					用于和Map Reduce作业交互和命令。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop job [</span><a href="commands_manual.html#%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9">GENERIC_OPTIONS</a><span class="codefrag">] 
-					[-submit &lt;job-file&gt;] | [-status &lt;job-id&gt;] | 
-					[-counter &lt;job-id&gt; &lt;group-name&gt; &lt;counter-name&gt;] | [-kill &lt;job-id&gt;] | 
-					[-events &lt;job-id&gt; &lt;from-event-#&gt; &lt;#-of-events&gt;] | [-history [all] &lt;jobOutputDir&gt;] |
-					[-list [all]] | [-kill-task &lt;task-id&gt;] | [-fail-task &lt;task-id&gt;]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-submit &lt;job-file&gt;</span></td>
-			            <td colspan="1" rowspan="1">提交作业</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-status &lt;job-id&gt;</span></td>
-			            <td colspan="1" rowspan="1">打印map和reduce完成百分比和所有计数器。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-counter &lt;job-id&gt; &lt;group-name&gt; &lt;counter-name&gt;</span></td>
-			            <td colspan="1" rowspan="1">打印计数器的值。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-kill &lt;job-id&gt;</span></td>
-			            <td colspan="1" rowspan="1">杀死指定作业。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-events &lt;job-id&gt; &lt;from-event-#&gt; &lt;#-of-events&gt;</span></td>
-			            <td colspan="1" rowspan="1">打印给定范围内jobtracker接收到的事件细节。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-history [all] &lt;jobOutputDir&gt;</span></td>
-			            <td colspan="1" rowspan="1">-history &lt;jobOutputDir&gt; 打印作业的细节、失败及被杀死原因的细节。更多的关于一个作业的细节比如成功的任务,做过的任务尝试等信息可以通过指定[all]选项查看。
-			            </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-list [all]</span></td>
-			            <td colspan="1" rowspan="1">-list all显示所有作业。-list只显示将要完成的作业。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-kill-task &lt;task-id&gt;</span></td>
-			            <td colspan="1" rowspan="1">杀死任务。被杀死的任务不会不利于失败尝试。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-fail-task &lt;task-id&gt;</span></td>
-			            <td colspan="1" rowspan="1">使任务失败。被失败的任务会对失败尝试不利。</td>
-			           
-</tr>
-					
-</table>
-<a name="N10302"></a><a name="pipes"></a>
-<h3 class="h4"> pipes </h3>
-<p>
-					运行pipes作业。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop pipes [-conf &lt;path&gt;] [-jobconf &lt;key=value&gt;, &lt;key=value&gt;, ...] 
-					[-input &lt;path&gt;] [-output &lt;path&gt;] [-jar &lt;jar file&gt;] [-inputformat &lt;class&gt;] 
-					[-map &lt;class&gt;] [-partitioner &lt;class&gt;] [-reduce &lt;class&gt;] [-writer &lt;class&gt;] 
-					[-program &lt;executable&gt;] [-reduces &lt;num&gt;] </span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			          
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-conf &lt;path&gt;</span></td>
-			            <td colspan="1" rowspan="1">作业的配置</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-jobconf &lt;key=value&gt;, &lt;key=value&gt;, ...</span></td>
-			            <td colspan="1" rowspan="1">增加/覆盖作业的配置项</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-input &lt;path&gt;</span></td>
-			            <td colspan="1" rowspan="1">输入目录</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-output &lt;path&gt;</span></td>
-			            <td colspan="1" rowspan="1">输出目录</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-jar &lt;jar file&gt;</span></td>
-			            <td colspan="1" rowspan="1">Jar文件名</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-inputformat &lt;class&gt;</span></td>
-			            <td colspan="1" rowspan="1">InputFormat类</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-map &lt;class&gt;</span></td>
-			            <td colspan="1" rowspan="1">Java Map类</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-partitioner &lt;class&gt;</span></td>
-			            <td colspan="1" rowspan="1">Java Partitioner</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-reduce &lt;class&gt;</span></td>
-			            <td colspan="1" rowspan="1">Java Reduce类</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-writer &lt;class&gt;</span></td>
-			            <td colspan="1" rowspan="1">Java RecordWriter</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-program &lt;executable&gt;</span></td>
-			            <td colspan="1" rowspan="1">可执行程序的URI</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-reduces &lt;num&gt;</span></td>
-			            <td colspan="1" rowspan="1">reduce个数</td>
-			           
-</tr>
-					
-</table>
-<a name="N103C7"></a><a name="version"></a>
-<h3 class="h4"> version </h3>
-<p>
-					打印版本信息。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop version</span>
-				
-</p>
-<a name="N103D7"></a><a name="CLASSNAME"></a>
-<h3 class="h4"> CLASSNAME </h3>
-<p>
-					 hadoop脚本可用于调调用任何类。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop CLASSNAME</span>
-				
-</p>
-<p>
-					 运行名字为CLASSNAME的类。
-				</p>
-</div>
-		
-		
-<a name="N103EB"></a><a name="%E7%AE%A1%E7%90%86%E5%91%BD%E4%BB%A4"></a>
-<h2 class="h3">管理命令</h2>
-<div class="section">
-<p>hadoop集群管理员常用的命令。</p>
-<a name="N103F4"></a><a name="balancer"></a>
-<h3 class="h4"> balancer </h3>
-<p>
-					运行集群平衡工具。管理员可以简单的按Ctrl-C来停止平衡过程。参考<a href="hdfs_user_guide.html#Rebalancer">Rebalancer</a>了解更多。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop balancer [-threshold &lt;threshold&gt;]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-threshold &lt;threshold&gt;</span></td>
-			            <td colspan="1" rowspan="1">磁盘容量的百分比。这会覆盖缺省的阀值。</td>
-			           
-</tr>
-			     
-</table>
-<a name="N10423"></a><a name="daemonlog"></a>
-<h3 class="h4"> daemonlog </h3>
-<p>
-					 获取或设置每个守护进程的日志级别。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop daemonlog  -getlevel &lt;host:port&gt; &lt;name&gt;</span>
-<br>
-					
-<span class="codefrag">用法:hadoop daemonlog  -setlevel &lt;host:port&gt; &lt;name&gt; &lt;level&gt;</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-getlevel &lt;host:port&gt; &lt;name&gt;</span></td>
-			            <td colspan="1" rowspan="1">打印运行在&lt;host:port&gt;的守护进程的日志级别。这个命令内部会连接http://&lt;host:port&gt;/logLevel?log=&lt;name&gt;</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-setlevel &lt;host:port&gt; &lt;name&gt; &lt;level&gt;</span></td>
-			            <td colspan="1" rowspan="1">设置运行在&lt;host:port&gt;的守护进程的日志级别。这个命令内部会连接http://&lt;host:port&gt;/logLevel?log=&lt;name&gt;</td>
-			           
-</tr>
-			     
-</table>
-<a name="N10460"></a><a name="datanode"></a>
-<h3 class="h4"> datanode</h3>
-<p>
-					运行一个HDFS的datanode。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop datanode [-rollback]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-rollback</span></td>
-			            <td colspan="1" rowspan="1">将datanode回滚到前一个版本。这需要在停止datanode,分发老的hadoop版本之后使用。
-			            </td>
-			           
-</tr>
-			     
-</table>
-<a name="N1048B"></a><a name="dfsadmin"></a>
-<h3 class="h4"> dfsadmin </h3>
-<p>
-					运行一个HDFS的dfsadmin客户端。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop dfsadmin  [</span><a href="commands_manual.html#%E5%B8%B8%E8%A7%84%E9%80%89%E9%A1%B9">GENERIC_OPTIONS</a><span class="codefrag">] [-report] [-safemode enter | leave | get | wait] [-refreshNodes]
-					 [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] 
-					 [-setQuota &lt;quota&gt; &lt;dirname&gt;...&lt;dirname&gt;] [-clrQuota &lt;dirname&gt;...&lt;dirname&gt;] 
-					 [-help [cmd]]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-report</span></td>
-			            <td colspan="1" rowspan="1">报告文件系统的基本信息和统计信息。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-safemode enter | leave | get | wait</span></td>
-			            <td colspan="1" rowspan="1">安全模式维护命令。安全模式是Namenode的一个状态,这种状态下,Namenode <br>
-					1.  不接受对名字空间的更改(只读)<br> 
-					2.  不复制或删除块<br>
-					Namenode会在启动时自动进入安全模式,当配置的块最小百分比数满足最小的副本数条件时,会自动离开安全模式。安全模式可以手动进入,但是这样的话也必须手动关闭安全模式。
-                </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-refreshNodes</span></td>
-			            <td colspan="1" rowspan="1">重新读取hosts和exclude文件,更新允许连到Namenode的或那些需要退出或入编的Datanode的集合。
-                </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-finalizeUpgrade</span></td>
-			            <td colspan="1" rowspan="1">终结HDFS的升级操作。Datanode删除前一个版本的工作目录,之后Namenode也这样做。这个操作完结整个升级过程。
-                </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-upgradeProgress status | details | force</span></td>
-			            <td colspan="1" rowspan="1">请求当前系统的升级状态,状态的细节,或者强制升级操作进行。
-                </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-metasave filename</span></td>
-			            <td colspan="1" rowspan="1">保存Namenode的主要数据结构到hadoop.log.dir属性指定的目录下的&lt;filename&gt;文件。对于下面的每一项,&lt;filename&gt;中都会一行内容与之对应<br>
-                        1. Namenode收到的Datanode的心跳信号<br>
-                        2. 等待被复制的块<br>
-                        3. 正在被复制的块<br>
-                        4. 等待被删除的块</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-setQuota &lt;quota&gt; &lt;dirname&gt;...&lt;dirname&gt;</span></td>
-			            <td colspan="1" rowspan="1">为每个目录 &lt;dirname&gt;设定配额&lt;quota&gt;。目录配额是一个长整型整数,强制限定了目录树下的名字个数。<br>
-                命令会在这个目录上工作良好,以下情况会报错:<br>
-                1. N不是一个正整数,或者<br>
-                2. 用户不是管理员,或者<br>
-                3. 这个目录不存在或是文件,或者<br>
-                4. 目录会马上超出新设定的配额。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-clrQuota &lt;dirname&gt;...&lt;dirname&gt;</span></td>
-			            <td colspan="1" rowspan="1">为每一个目录&lt;dirname&gt;清除配额设定。<br>
-                命令会在这个目录上工作良好,以下情况会报错:<br>
-                1. 这个目录不存在或是文件,或者<br>
-                2. 用户不是管理员。<br>
-                如果目录原来没有配额不会报错。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-help [cmd]</span></td>
-			            <td colspan="1" rowspan="1">显示给定命令的帮助信息,如果没有给定命令,则显示所有命令的帮助信息。</td>
-			           
-</tr>
-			     
-</table>
-<a name="N1054B"></a><a name="jobtracker"></a>
-<h3 class="h4"> jobtracker </h3>
-<p>
-					运行MapReduce job Tracker节点。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop jobtracker</span>
-				
-</p>
-<a name="N1055B"></a><a name="namenode"></a>
-<h3 class="h4"> namenode </h3>
-<p>
-					运行namenode。有关升级,回滚,升级终结的更多信息请参考<a href="hdfs_user_guide.html#%E5%8D%87%E7%BA%A7%E5%92%8C%E5%9B%9E%E6%BB%9A">升级和回滚</a>。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-format</span></td>
-			            <td colspan="1" rowspan="1">格式化namenode。它启动namenode,格式化namenode,之后关闭namenode。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-upgrade</span></td>
-			            <td colspan="1" rowspan="1">分发新版本的hadoop后,namenode应以upgrade选项启动。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-rollback</span></td>
-			            <td colspan="1" rowspan="1">将namenode回滚到前一版本。这个选项要在停止集群,分发老的hadoop版本后使用。
-			            </td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-finalize</span></td>
-			            <td colspan="1" rowspan="1">finalize会删除文件系统的前一状态。最近的升级会被持久化,rollback选项将再不可用,升级终结操作之后,它会停掉namenode。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-importCheckpoint</span></td>
-			            <td colspan="1" rowspan="1">从检查点目录装载镜像并保存到当前检查点目录,检查点目录由fs.checkpoint.dir指定。
-			            </td>
-			           
-</tr>
-			     
-</table>
-<a name="N105C2"></a><a name="secondarynamenode"></a>
-<h3 class="h4"> secondarynamenode </h3>
-<p>
-					运行HDFS的secondary namenode。参考<a href="hdfs_user_guide.html#Secondary+NameNode">Secondary Namenode</a>了解更多。 
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize]</span>
-				
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-			          
-<tr>
-<th colspan="1" rowspan="1"> 命令选项 </th><th colspan="1" rowspan="1"> 描述</th>
-</tr>
-			
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-checkpoint [force]</span></td>
-			            <td colspan="1" rowspan="1">如果EditLog的大小 &gt;= fs.checkpoint.size,启动Secondary namenode的检查点过程。
-			            如果使用了-force,将不考虑EditLog的大小。</td>
-			           
-</tr>
-			           
-<tr>
-			          	
-<td colspan="1" rowspan="1"><span class="codefrag">-geteditsize</span></td>
-			            <td colspan="1" rowspan="1">打印EditLog大小。</td>
-			           
-</tr>
-			     
-</table>
-<a name="N105FF"></a><a name="tasktracker"></a>
-<h3 class="h4"> tasktracker </h3>
-<p>
-					运行MapReduce的task Tracker节点。
-				</p>
-<p>
-					
-<span class="codefrag">用法:hadoop tasktracker</span>
-				
-</p>
-</div>
-		
-		
-		      
-
-	
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 261
docs/cn/commands_manual.pdf


+ 0 - 255
docs/cn/core-default.html

@@ -1,255 +0,0 @@
-<html>
-<body>
-<table border="1">
-<tr>
-<td>name</td><td>value</td><td>description</td>
-</tr>
-<tr>
-<td><a name="hadoop.tmp.dir">hadoop.tmp.dir</a></td><td>/tmp/hadoop-${user.name}</td><td>A base for other temporary directories.</td>
-</tr>
-<tr>
-<td><a name="hadoop.native.lib">hadoop.native.lib</a></td><td>true</td><td>Should native hadoop libraries, if present, be used.</td>
-</tr>
-<tr>
-<td><a name="hadoop.http.filter.initializers">hadoop.http.filter.initializers</a></td><td></td><td>A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
-  ordering of the filters.</td>
-</tr>
-<tr>
-<td><a name="hadoop.security.authorization">hadoop.security.authorization</a></td><td>false</td><td>Is service-level authorization enabled?</td>
-</tr>
-<tr>
-<td><a name="hadoop.logfile.size">hadoop.logfile.size</a></td><td>10000000</td><td>The max size of each log file</td>
-</tr>
-<tr>
-<td><a name="hadoop.logfile.count">hadoop.logfile.count</a></td><td>10</td><td>The max number of log files</td>
-</tr>
-<tr>
-<td><a name="io.file.buffer.size">io.file.buffer.size</a></td><td>4096</td><td>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</td>
-</tr>
-<tr>
-<td><a name="io.bytes.per.checksum">io.bytes.per.checksum</a></td><td>512</td><td>The number of bytes per checksum.  Must not be larger than
-  io.file.buffer.size.</td>
-</tr>
-<tr>
-<td><a name="io.skip.checksum.errors">io.skip.checksum.errors</a></td><td>false</td><td>If true, when a checksum error is encountered while
-  reading a sequence file, entries are skipped, instead of throwing an
-  exception.</td>
-</tr>
-<tr>
-<td><a name="io.compression.codecs">io.compression.codecs</a></td><td>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</td><td>A list of the compression codec classes that can be used 
-               for compression/decompression.</td>
-</tr>
-<tr>
-<td><a name="io.serializations">io.serializations</a></td><td>org.apache.hadoop.io.serializer.WritableSerialization</td><td>A list of serialization classes that can be used for
-  obtaining serializers and deserializers.</td>
-</tr>
-<tr>
-<td><a name="fs.default.name">fs.default.name</a></td><td>file:///</td><td>The name of the default file system.  A URI whose
-  scheme and authority determine the FileSystem implementation.  The
-  uri's scheme determines the config property (fs.SCHEME.impl) naming
-  the FileSystem implementation class.  The uri's authority is used to
-  determine the host, port, etc. for a filesystem.</td>
-</tr>
-<tr>
-<td><a name="fs.trash.interval">fs.trash.interval</a></td><td>0</td><td>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.file.impl">fs.file.impl</a></td><td>org.apache.hadoop.fs.LocalFileSystem</td><td>The FileSystem for file: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.hdfs.impl">fs.hdfs.impl</a></td><td>org.apache.hadoop.hdfs.DistributedFileSystem</td><td>The FileSystem for hdfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.s3.impl">fs.s3.impl</a></td><td>org.apache.hadoop.fs.s3.S3FileSystem</td><td>The FileSystem for s3: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.s3n.impl">fs.s3n.impl</a></td><td>org.apache.hadoop.fs.s3native.NativeS3FileSystem</td><td>The FileSystem for s3n: (Native S3) uris.</td>
-</tr>
-<tr>
-<td><a name="fs.kfs.impl">fs.kfs.impl</a></td><td>org.apache.hadoop.fs.kfs.KosmosFileSystem</td><td>The FileSystem for kfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.hftp.impl">fs.hftp.impl</a></td><td>org.apache.hadoop.hdfs.HftpFileSystem</td><td></td>
-</tr>
-<tr>
-<td><a name="fs.hsftp.impl">fs.hsftp.impl</a></td><td>org.apache.hadoop.hdfs.HsftpFileSystem</td><td></td>
-</tr>
-<tr>
-<td><a name="fs.ftp.impl">fs.ftp.impl</a></td><td>org.apache.hadoop.fs.ftp.FTPFileSystem</td><td>The FileSystem for ftp: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.ramfs.impl">fs.ramfs.impl</a></td><td>org.apache.hadoop.fs.InMemoryFileSystem</td><td>The FileSystem for ramfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.har.impl">fs.har.impl</a></td><td>org.apache.hadoop.fs.HarFileSystem</td><td>The filesystem for Hadoop archives. </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.dir">fs.checkpoint.dir</a></td><td>${hadoop.tmp.dir}/dfs/namesecondary</td><td>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.edits.dir">fs.checkpoint.edits.dir</a></td><td>${fs.checkpoint.dir}</td><td>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.period">fs.checkpoint.period</a></td><td>3600</td><td>The number of seconds between two periodic checkpoints.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.size">fs.checkpoint.size</a></td><td>67108864</td><td>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.block.size">fs.s3.block.size</a></td><td>67108864</td><td>Block size to use when writing files to S3.</td>
-</tr>
-<tr>
-<td><a name="fs.s3.buffer.dir">fs.s3.buffer.dir</a></td><td>${hadoop.tmp.dir}/s3</td><td>Determines where on the local filesystem the S3 filesystem
-  should store files before sending them to S3
-  (or after retrieving them from S3).
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.maxRetries">fs.s3.maxRetries</a></td><td>4</td><td>The maximum number of retries for reading or writing files to S3, 
-  before we signal failure to the application.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.sleepTimeSeconds">fs.s3.sleepTimeSeconds</a></td><td>10</td><td>The number of seconds to sleep between each S3 retry.
-  </td>
-</tr>
-<tr>
-<td><a name="local.cache.size">local.cache.size</a></td><td>10737418240</td><td>The limit on the size of cache you want to keep, set by default
-  to 10GB. This will act as a soft limit on the cache directory for out of band data.
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.compress.blocksize">io.seqfile.compress.blocksize</a></td><td>1000000</td><td>The minimum block size for compression in block compressed 
-          SequenceFiles.
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.lazydecompress">io.seqfile.lazydecompress</a></td><td>true</td><td>Should values of block-compressed SequenceFiles be decompressed
-          only when necessary.
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.sorter.recordlimit">io.seqfile.sorter.recordlimit</a></td><td>1000000</td><td>The limit on number of records to be kept in memory in a spill 
-          in SequenceFiles.Sorter
-  </td>
-</tr>
-<tr>
-<td><a name="io.mapfile.bloom.size">io.mapfile.bloom.size</a></td><td>1048576</td><td>The size of BloomFilter-s used in BloomMapFile. Each time this many
-  keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
-  Larger values minimize the number of filters, which slightly increases the performance,
-  but may waste too much space if the total number of keys is usually much smaller
-  than this number.
-  </td>
-</tr>
-<tr>
-<td><a name="io.mapfile.bloom.error.rate">io.mapfile.bloom.error.rate</a></td><td>0.005</td><td>The rate of false positives in BloomFilter-s used in BloomMapFile.
-  As this value decreases, the size of BloomFilter-s increases exponentially. This
-  value is the probability of encountering false positives (default is 0.5%).
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.util.hash.type">hadoop.util.hash.type</a></td><td>murmur</td><td>The default implementation of Hash. Currently this can take one of the
-  two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.kill.max">ipc.client.kill.max</a></td><td>10</td><td>Defines the maximum number of clients to disconnect in one go.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>10000</td><td>The maximum time in msec after which a client will bring down the
-               connection to the server.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.connect.max.retries">ipc.client.connect.max.retries</a></td><td>10</td><td>Indicates the number of retries a client will make to establish
-               a server connection.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.server.listen.queue.size">ipc.server.listen.queue.size</a></td><td>128</td><td>Indicates the length of the listen queue for servers accepting
-               client connections.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.server.tcpnodelay">ipc.server.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the server. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.tcpnodelay">ipc.client.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the client. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </td>
-</tr>
-<tr>
-<td><a name="webinterface.private.actions">webinterface.private.actions</a></td><td>false</td><td> If set to true, the web interfaces of JT and NN may contain 
-                actions, such as kill job, delete file, etc., that should 
-                not be exposed to public. Enable this option if the interfaces 
-                are only reachable by those who have the right authorization.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.default">hadoop.rpc.socket.factory.class.default</a></td><td>org.apache.hadoop.net.StandardSocketFactory</td><td> Default SocketFactory to use. This parameter is expected to be
-    formatted as "package.FactoryClassName".
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.ClientProtocol">hadoop.rpc.socket.factory.class.ClientProtocol</a></td><td></td><td> SocketFactory to use to connect to a DFS. If null or empty, use
-    hadoop.rpc.socket.class.default. This socket factory is also used by
-    DFSClient to create sockets to DataNodes.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.socks.server">hadoop.socks.server</a></td><td></td><td> Address (host:port) of the SOCKS server to be used by the
-    SocksSocketFactory.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.node.switch.mapping.impl">topology.node.switch.mapping.impl</a></td><td>org.apache.hadoop.net.ScriptBasedMapping</td><td> The default implementation of the DNSToSwitchMapping. It
-    invokes a script specified in topology.script.file.name to resolve
-    node names. If the value for topology.script.file.name is not set, the
-    default value of DEFAULT_RACK is returned for all node names.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.script.file.name">topology.script.file.name</a></td><td></td><td> The script name that should be invoked to resolve DNS names to
-    NetworkTopology names. Example: the script would take host.foo.bar as an
-    argument, and return /rack1 as the output.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.script.number.args">topology.script.number.args</a></td><td>100</td><td> The max number of args that the script configured with 
-    topology.script.file.name should be run with. Each arg is an
-    IP address.
-  </td>
-</tr>
-</table>
-</body>
-</html>

+ 0 - 563
docs/cn/distcp.html

@@ -1,563 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>DistCp</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">DistCp使用指南</div>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="distcp.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>DistCp</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E6%A6%82%E8%BF%B0">概述</a>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95">使用方法</a>
-<ul class="minitoc">
-<li>
-<a href="#%E5%9F%BA%E6%9C%AC%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95">基本使用方法</a>
-</li>
-<li>
-<a href="#options">选项</a>
-<ul class="minitoc">
-<li>
-<a href="#%E9%80%89%E9%A1%B9%E7%B4%A2%E5%BC%95">选项索引</a>
-</li>
-<li>
-<a href="#uo">更新和覆盖</a>
-</li>
-</ul>
-</li>
-</ul>
-</li>
-<li>
-<a href="#etc">附录</a>
-<ul class="minitoc">
-<li>
-<a href="#Map%E6%95%B0%E7%9B%AE">Map数目</a>
-</li>
-<li>
-<a href="#cpver">不同HDFS版本间的拷贝</a>
-</li>
-<li>
-<a href="#Map%2FReduce%E5%92%8C%E5%89%AF%E6%95%88%E5%BA%94">Map/Reduce和副效应</a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-
-    
-<a name="N1000D"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3">概述</h2>
-<div class="section">
-<p>DistCp(分布式拷贝)是用于大规模集群内部和集群之间拷贝的工具。
-	  它使用Map/Reduce实现文件分发,错误处理和恢复,以及报告生成。
-      它把文件和目录的列表作为map任务的输入,每个任务会完成源列表中部分文件的拷贝。
-      由于使用了Map/Reduce方法,这个工具在语义和执行上都会有特殊的地方。
-      这篇文档会为常用DistCp操作提供指南并阐述它的工作模型。
-      </p>
-</div>
-
-    
-<a name="N10017"></a><a name="%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95"></a>
-<h2 class="h3">使用方法</h2>
-<div class="section">
-<a name="N1001D"></a><a name="%E5%9F%BA%E6%9C%AC%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95"></a>
-<h3 class="h4">基本使用方法</h3>
-<p>DistCp最常用在集群之间的拷贝:</p>
-<p>
-<span class="codefrag">bash$ hadoop distcp hdfs://nn1:8020/foo/bar \</span>
-<br>
-           
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 hdfs://nn2:8020/bar/foo</span>
-</p>
-<p>这条命令会把nn1集群的<span class="codefrag">/foo/bar</span>目录下的所有文件或目录名展开并存储到一个临时文件中,这些文件内容的拷贝工作被分配给多个map任务,
-        然后每个TaskTracker分别执行从nn1到nn2的拷贝操作。注意DistCp使用绝对路径进行操作。
-        </p>
-<p>命令行中可以指定多个源目录:</p>
-<p>
-<span class="codefrag">bash$ hadoop distcp hdfs://nn1:8020/foo/a \</span>
-<br>
-           
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 hdfs://nn1:8020/foo/b \</span>
-<br>
-           
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-                 hdfs://nn2:8020/bar/foo</span>
-</p>
-<p>或者使用<span class="codefrag">-f</span>选项,从文件里获得多个源:<br>
-        
-<span class="codefrag">bash$ hadoop distcp -f hdfs://nn1:8020/srclist \</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              &nbsp;hdfs://nn2:8020/bar/foo</span>
-<br>
-</p>
-<p>其中<span class="codefrag">srclist</span> 的内容是<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b</span>
-</p>
-<p>当从多个源拷贝时,如果两个源冲突,DistCp会停止拷贝并提示出错信息,
-        如果在目的位置发生冲突,会根据<a href="#options">选项设置</a>解决。
-        默认情况会跳过已经存在的目标文件(比如不用源文件做替换操作)。每次操作结束时
-        都会报告跳过的文件数目,但是如果某些拷贝操作失败了,但在之后的尝试成功了,
-        那么报告的信息可能不够精确(请参考<a href="#etc">附录</a>)。
-	</p>
-<p>每个TaskTracker必须都能够与源端和目的端文件系统进行访问和交互。
-        对于HDFS来说,源和目的端要运行相同版本的协议或者使用向下兼容的协议。
-        (请参考<a href="#cpver">不同版本间的拷贝</a> )。
-        </p>
-<p>拷贝完成后,建议生成源端和目的端文件的列表,并交叉检查,来确认拷贝真正成功。
-        因为DistCp使用Map/Reduce和文件系统API进行操作,所以这三者或它们之间有任何问题
-        都会影响拷贝操作。一些Distcp命令的成功执行可以通过再次执行带-update参数的该命令来完成,
-        但用户在如此操作之前应该对该命令的语法很熟悉。
-        </p>
-<p>值得注意的是,当另一个客户端同时在向源文件写入时,拷贝很有可能会失败。
-        尝试覆盖HDFS上正在被写入的文件的操作也会失败。
-        如果一个源文件在拷贝之前被移动或删除了,拷贝失败同时输出异常
-        FileNotFoundException。</p>
-<a name="N1007B"></a><a name="options"></a>
-<h3 class="h4">选项</h3>
-<a name="N10081"></a><a name="%E9%80%89%E9%A1%B9%E7%B4%A2%E5%BC%95"></a>
-<h4>选项索引</h4>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-<th colspan="1" rowspan="1"> 标识  </th><th colspan="1" rowspan="1"> 描述 </th><th colspan="1" rowspan="1"> 备注 </th>
-</tr>
-
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-p[rbugp]</span></td>
-              <td colspan="1" rowspan="1">Preserve<br>
-                  &nbsp;&nbsp;r: replication number<br>
-                  &nbsp;&nbsp;b: block size<br>
-                  &nbsp;&nbsp;u: user<br>
-                  &nbsp;&nbsp;g: group<br>
-                  &nbsp;&nbsp;p: permission<br>
-</td>
-              <td colspan="1" rowspan="1">修改次数不会被保留。并且当指定
-              <span class="codefrag">-update</span> 时,更新的状态<strong>不</strong>会
-              被同步,除非文件大小不同(比如文件被重新创建)。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-i</span></td>
-              <td colspan="1" rowspan="1">忽略失败</td>
-              <td colspan="1" rowspan="1">就像在 <a href="#etc">附录</a>中提到的,这个选项会比默认情况提供关于拷贝的更精确的统计, 同时它还将保留失败拷贝操作的日志,这些日志信息可以用于调试。最后,如果一个map失败了,但并没完成所有分块任务的尝试,这不会导致整个作业的失败。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-log &lt;logdir&gt;</span></td>
-              <td colspan="1" rowspan="1">记录日志到 &lt;logdir&gt;</td>
-              <td colspan="1" rowspan="1">DistCp为每个文件的每次尝试拷贝操作都记录日志,并把日志作为map的输出。
-              如果一个map失败了,当重新执行时这个日志不会被保留。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-m &lt;num_maps&gt;</span></td>
-              <td colspan="1" rowspan="1">同时拷贝的最大数目</td>
-              <td colspan="1" rowspan="1">指定了拷贝数据时map的数目。请注意并不是map数越多吞吐量越大。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-overwrite</span></td>
-              <td colspan="1" rowspan="1">覆盖目标</td>
-              <td colspan="1" rowspan="1">如果一个map失败并且没有使用<span class="codefrag">-i</span>选项,不仅仅那些拷贝失败的文件,这个分块任务中的所有文件都会被重新拷贝。
-			  就像<a href="#uo">下面</a>提到的,它会改变生成目标路径的语义,所以
-              用户要小心使用这个选项。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-update</span></td>
-              <td colspan="1" rowspan="1">如果源和目标的大小不一样则进行覆盖</td>
-              <td colspan="1" rowspan="1">像之前提到的,这不是"同步"操作。
-              执行覆盖的唯一标准是源文件和目标文件大小是否相同;如果不同,则源文件替换目标文件。
-              像 <a href="#uo">下面</a>提到的,它也改变生成目标路径的语义,
-              用户使用要小心。
-              </td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">-f &lt;urilist_uri&gt;</span></td>
-              <td colspan="1" rowspan="1">使用&lt;urilist_uri&gt; 作为源文件列表</td>
-              <td colspan="1" rowspan="1">这等价于把所有文件名列在命令行中。
-              <span class="codefrag">urilist_uri</span> 列表应该是完整合法的URI。
-              </td>
-</tr>
-
-        
-</table>
-<a name="N10133"></a><a name="uo"></a>
-<h4>更新和覆盖</h4>
-<p>这里给出一些 <span class="codefrag">-update</span>和 <span class="codefrag">-overwrite</span>的例子。
-        考虑一个从<span class="codefrag">/foo/a</span> 和
-        <span class="codefrag">/foo/b</span> 到 <span class="codefrag">/bar/foo</span>的拷贝,源路径包括:
-        </p>
-<p>
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a/aa</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a/ab</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b/ba</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b/ab</span>
-</p>
-<p>如果没设置<span class="codefrag">-update</span>或 <span class="codefrag">-overwrite</span>选项,
-        那么两个源都会映射到目标端的
-        <span class="codefrag">/bar/foo/ab</span>。
-        如果设置了这两个选项,每个源目录的内容都会和目标目录的
-         <strong>内容</strong> 做比较。DistCp碰到这类冲突的情况会终止操作并退出。</p>
-<p>默认情况下,<span class="codefrag">/bar/foo/a</span> 和
-        <span class="codefrag">/bar/foo/b</span> 目录都会被创建,所以并不会有冲突。</p>
-<p>现在考虑一个使用<span class="codefrag">-update</span>合法的操作:<br>
-        
-<span class="codefrag">distcp -update hdfs://nn1:8020/foo/a \</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              hdfs://nn1:8020/foo/b \</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              hdfs://nn2:8020/bar</span>
-</p>
-<p>其中源路径/大小:</p>
-<p>
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a/aa 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/a/ab 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b/ba 64</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn1:8020/foo/b/bb 32</span>
-</p>
-<p>和目的路径/大小:</p>
-<p>
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/aa 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/ba 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/bb 64</span>
-</p>
-<p>会产生:</p>
-<p>
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/aa 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/ab 32</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/ba 64</span>
-<br>
-        
-<span class="codefrag">&nbsp;&nbsp;&nbsp;&nbsp;hdfs://nn2:8020/bar/bb 32</span>
-</p>
-<p>只有nn2的<span class="codefrag">aa</span>文件没有被覆盖。如果指定了
-        <span class="codefrag">-overwrite</span>选项,所有文件都会被覆盖。
-        </p>
-</div> <!-- Usage -->
-
-    
-<a name="N101E4"></a><a name="etc"></a>
-<h2 class="h3">附录</h2>
-<div class="section">
-<a name="N101EA"></a><a name="Map%E6%95%B0%E7%9B%AE"></a>
-<h3 class="h4">Map数目</h3>
-<p>DistCp会尝试着均分需要拷贝的内容,这样每个map拷贝差不多相等大小的内容。
-	          但因为文件是最小的拷贝粒度,所以配置增加同时拷贝(如map)的数目不一定会增加实际同时拷贝的数目以及总吞吐量。
-          </p>
-<p>如果没使用<span class="codefrag">-m</span>选项,DistCp会尝试在调度工作时指定map的数目
-          为 <span class="codefrag">min (total_bytes / bytes.per.map, 20 * num_task_trackers)</span>,
-		  其中<span class="codefrag">bytes.per.map</span>默认是256MB。</p>
-<p>建议对于长时间运行或定期运行的作业,根据源和目标集群大小、拷贝数量大小以及带宽调整map的数目。
-          </p>
-<a name="N10203"></a><a name="cpver"></a>
-<h3 class="h4">不同HDFS版本间的拷贝</h3>
-<p>对于不同Hadoop版本间的拷贝,用户应该使用HftpFileSystem。
-        这是一个只读文件系统,所以DistCp必须运行在目标端集群上(更确切的说是在能够写入目标集群的TaskTracker上)。
-        源的格式是
-        <span class="codefrag">hftp://&lt;dfs.http.address&gt;/&lt;path&gt;</span>
-        (默认情况<span class="codefrag">dfs.http.address</span>是
-        &lt;namenode&gt;:50070)。</p>
-<a name="N10213"></a><a name="Map%2FReduce%E5%92%8C%E5%89%AF%E6%95%88%E5%BA%94"></a>
-<h3 class="h4">Map/Reduce和副效应</h3>
-<p>像前面提到的,map拷贝输入文件失败时,会带来一些副效应。
-        </p>
-<ul>
-
-          
-<li>除非使用了<span class="codefrag">-i</span>,任务产生的日志会被新的尝试替换掉。
-          </li>
-
-          
-<li>除非使用了<span class="codefrag">-overwrite</span>,文件被之前的map成功拷贝后当又一次执行拷贝时会被标记为
-          "被忽略"。</li>
-
-          
-<li>如果map失败了<span class="codefrag">mapred.map.max.attempts</span>次,剩下的map任务会被终止(除非使用了<span class="codefrag">-i</span>)。
-          </li>
-
-          
-<li>如果<span class="codefrag">mapred.speculative.execution</span>被设置为
-          <span class="codefrag">final</span>和<span class="codefrag">true</span>,则拷贝的结果是未定义的。</li>
-
-        
-</ul>
-</div> <!-- Appendix -->
-
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 149
docs/cn/distcp.pdf


+ 0 - 1108
docs/cn/hadoop-default.html

@@ -1,1108 +0,0 @@
-<html>
-<body>
-<table border="1">
-<tr>
-<td>name</td><td>value</td><td>description</td>
-</tr>
-<tr>
-<td><a name="hadoop.tmp.dir">hadoop.tmp.dir</a></td><td>/tmp/hadoop-${user.name}</td><td>A base for other temporary directories.</td>
-</tr>
-<tr>
-<td><a name="hadoop.native.lib">hadoop.native.lib</a></td><td>true</td><td>Should native hadoop libraries, if present, be used.</td>
-</tr>
-<tr>
-<td><a name="hadoop.http.filter.initializers">hadoop.http.filter.initializers</a></td><td></td><td>A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
-  ordering of the filters.</td>
-</tr>
-<tr>
-<td><a name="hadoop.logfile.size">hadoop.logfile.size</a></td><td>10000000</td><td>The max size of each log file</td>
-</tr>
-<tr>
-<td><a name="hadoop.logfile.count">hadoop.logfile.count</a></td><td>10</td><td>The max number of log files</td>
-</tr>
-<tr>
-<td><a name="hadoop.job.history.location">hadoop.job.history.location</a></td><td></td><td> If job tracker is static the history files are stored 
-  in this single well known place. If No value is set here, by default,
-  it is in the local file system at ${hadoop.log.dir}/history.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.job.history.user.location">hadoop.job.history.user.location</a></td><td></td><td> User can specify a location to store the history files of 
-  a particular job. If nothing is specified, the logs are stored in 
-  output directory. The files are stored in "_logs/history/" in the directory.
-  User can stop logging by giving the value "none". 
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.logging.level">dfs.namenode.logging.level</a></td><td>info</td><td>The logging level for dfs namenode. Other values are "dir"(trac
-e namespace mutations), "block"(trace block under/over replications and block
-creations/deletions), or "all".</td>
-</tr>
-<tr>
-<td><a name="io.sort.factor">io.sort.factor</a></td><td>10</td><td>The number of streams to merge at once while sorting
-  files.  This determines the number of open file handles.</td>
-</tr>
-<tr>
-<td><a name="io.sort.mb">io.sort.mb</a></td><td>100</td><td>The total amount of buffer memory to use while sorting 
-  files, in megabytes.  By default, gives each merge stream 1MB, which
-  should minimize seeks.</td>
-</tr>
-<tr>
-<td><a name="io.sort.record.percent">io.sort.record.percent</a></td><td>0.05</td><td>The percentage of io.sort.mb dedicated to tracking record
-  boundaries. Let this value be r, io.sort.mb be x. The maximum number
-  of records collected before the collection thread must block is equal
-  to (r * x) / 4</td>
-</tr>
-<tr>
-<td><a name="io.sort.spill.percent">io.sort.spill.percent</a></td><td>0.80</td><td>The soft limit in either the buffer or record collection
-  buffers. Once reached, a thread will begin to spill the contents to disk
-  in the background. Note that this does not imply any chunking of data to
-  the spill. A value less than 0.5 is not recommended.</td>
-</tr>
-<tr>
-<td><a name="io.file.buffer.size">io.file.buffer.size</a></td><td>4096</td><td>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</td>
-</tr>
-<tr>
-<td><a name="io.bytes.per.checksum">io.bytes.per.checksum</a></td><td>512</td><td>The number of bytes per checksum.  Must not be larger than
-  io.file.buffer.size.</td>
-</tr>
-<tr>
-<td><a name="io.skip.checksum.errors">io.skip.checksum.errors</a></td><td>false</td><td>If true, when a checksum error is encountered while
-  reading a sequence file, entries are skipped, instead of throwing an
-  exception.</td>
-</tr>
-<tr>
-<td><a name="io.map.index.skip">io.map.index.skip</a></td><td>0</td><td>Number of index entries to skip between each entry.
-  Zero by default. Setting this to values larger than zero can
-  facilitate opening large map files using less memory.</td>
-</tr>
-<tr>
-<td><a name="io.compression.codecs">io.compression.codecs</a></td><td>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</td><td>A list of the compression codec classes that can be used 
-               for compression/decompression.</td>
-</tr>
-<tr>
-<td><a name="io.serializations">io.serializations</a></td><td>org.apache.hadoop.io.serializer.WritableSerialization</td><td>A list of serialization classes that can be used for
-  obtaining serializers and deserializers.</td>
-</tr>
-<tr>
-<td><a name="fs.default.name">fs.default.name</a></td><td>file:///</td><td>The name of the default file system.  A URI whose
-  scheme and authority determine the FileSystem implementation.  The
-  uri's scheme determines the config property (fs.SCHEME.impl) naming
-  the FileSystem implementation class.  The uri's authority is used to
-  determine the host, port, etc. for a filesystem.</td>
-</tr>
-<tr>
-<td><a name="fs.trash.interval">fs.trash.interval</a></td><td>0</td><td>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.file.impl">fs.file.impl</a></td><td>org.apache.hadoop.fs.LocalFileSystem</td><td>The FileSystem for file: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.hdfs.impl">fs.hdfs.impl</a></td><td>org.apache.hadoop.hdfs.DistributedFileSystem</td><td>The FileSystem for hdfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.s3.impl">fs.s3.impl</a></td><td>org.apache.hadoop.fs.s3.S3FileSystem</td><td>The FileSystem for s3: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.s3n.impl">fs.s3n.impl</a></td><td>org.apache.hadoop.fs.s3native.NativeS3FileSystem</td><td>The FileSystem for s3n: (Native S3) uris.</td>
-</tr>
-<tr>
-<td><a name="fs.kfs.impl">fs.kfs.impl</a></td><td>org.apache.hadoop.fs.kfs.KosmosFileSystem</td><td>The FileSystem for kfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.hftp.impl">fs.hftp.impl</a></td><td>org.apache.hadoop.hdfs.HftpFileSystem</td><td></td>
-</tr>
-<tr>
-<td><a name="fs.hsftp.impl">fs.hsftp.impl</a></td><td>org.apache.hadoop.hdfs.HsftpFileSystem</td><td></td>
-</tr>
-<tr>
-<td><a name="fs.ftp.impl">fs.ftp.impl</a></td><td>org.apache.hadoop.fs.ftp.FTPFileSystem</td><td>The FileSystem for ftp: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.ramfs.impl">fs.ramfs.impl</a></td><td>org.apache.hadoop.fs.InMemoryFileSystem</td><td>The FileSystem for ramfs: uris.</td>
-</tr>
-<tr>
-<td><a name="fs.har.impl">fs.har.impl</a></td><td>org.apache.hadoop.fs.HarFileSystem</td><td>The filesystem for Hadoop archives. </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.dir">fs.checkpoint.dir</a></td><td>${hadoop.tmp.dir}/dfs/namesecondary</td><td>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.edits.dir">fs.checkpoint.edits.dir</a></td><td>${fs.checkpoint.dir}</td><td>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.period">fs.checkpoint.period</a></td><td>3600</td><td>The number of seconds between two periodic checkpoints.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.checkpoint.size">fs.checkpoint.size</a></td><td>67108864</td><td>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.secondary.http.address">dfs.secondary.http.address</a></td><td>0.0.0.0:50090</td><td>
-    The secondary namenode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.address">dfs.datanode.address</a></td><td>0.0.0.0:50010</td><td>
-    The address where the datanode server will listen to.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.http.address">dfs.datanode.http.address</a></td><td>0.0.0.0:50075</td><td>
-    The datanode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.ipc.address">dfs.datanode.ipc.address</a></td><td>0.0.0.0:50020</td><td>
-    The datanode ipc server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.handler.count">dfs.datanode.handler.count</a></td><td>3</td><td>The number of server threads for the datanode.</td>
-</tr>
-<tr>
-<td><a name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
-    The address and the base port where the dfs namenode web ui will listen on.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.enable">dfs.https.enable</a></td><td>false</td><td>Decide if HTTPS(SSL) is supported on HDFS
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.need.client.auth">dfs.https.need.client.auth</a></td><td>false</td><td>Whether SSL client certificate authentication is required
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.server.keystore.resource">dfs.https.server.keystore.resource</a></td><td>ssl-server.xml</td><td>Resource file from which ssl server keystore
-  information will be extracted
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.client.keystore.resource">dfs.https.client.keystore.resource</a></td><td>ssl-client.xml</td><td>Resource file from which ssl client keystore
-  information will be extracted
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.https.address">dfs.datanode.https.address</a></td><td>0.0.0.0:50475</td><td></td>
-</tr>
-<tr>
-<td><a name="dfs.https.address">dfs.https.address</a></td><td>0.0.0.0:50470</td><td></td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.dns.interface">dfs.datanode.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.dns.nameserver">dfs.datanode.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.considerLoad">dfs.replication.considerLoad</a></td><td>true</td><td>Decide if chooseTarget considers the target's load or not
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.default.chunk.view.size">dfs.default.chunk.view.size</a></td><td>32768</td><td>The number of bytes to view for a file on the browser.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.du.reserved">dfs.datanode.du.reserved</a></td><td>0</td><td>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.name.dir">dfs.name.dir</a></td><td>${hadoop.tmp.dir}/dfs/name</td><td>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </td>
-</tr>
-<tr>
-<td><a name="dfs.name.edits.dir">dfs.name.edits.dir</a></td><td>${dfs.name.dir}</td><td>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.name.dir
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.web.ugi">dfs.web.ugi</a></td><td>webuser,webgroup</td><td>The user account used by the web interface.
-    Syntax: USERNAME,GROUP1,GROUP2, ...
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.permissions">dfs.permissions</a></td><td>true</td><td>
-    If "true", enable permission checking in HDFS.
-    If "false", permission checking is turned off,
-    but all other behavior is unchanged.
-    Switching from one parameter value to the other does not change the mode,
-    owner or group of files or directories.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.permissions.supergroup">dfs.permissions.supergroup</a></td><td>supergroup</td><td>The name of the group of super-users.</td>
-</tr>
-<tr>
-<td><a name="dfs.data.dir">dfs.data.dir</a></td><td>${hadoop.tmp.dir}/dfs/data</td><td>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication">dfs.replication</a></td><td>3</td><td>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.max">dfs.replication.max</a></td><td>512</td><td>Maximal block replication. 
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.min">dfs.replication.min</a></td><td>1</td><td>Minimal block replication. 
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.block.size">dfs.block.size</a></td><td>67108864</td><td>The default block size for new files.</td>
-</tr>
-<tr>
-<td><a name="dfs.df.interval">dfs.df.interval</a></td><td>60000</td><td>Disk usage statistics refresh interval in msec.</td>
-</tr>
-<tr>
-<td><a name="dfs.client.block.write.retries">dfs.client.block.write.retries</a></td><td>3</td><td>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.blockreport.intervalMsec">dfs.blockreport.intervalMsec</a></td><td>3600000</td><td>Determines block reporting interval in milliseconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.blockreport.initialDelay">dfs.blockreport.initialDelay</a></td><td>0</td><td>Delay for first block report in seconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.heartbeat.interval">dfs.heartbeat.interval</a></td><td>3</td><td>Determines datanode heartbeat interval in seconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.handler.count">dfs.namenode.handler.count</a></td><td>10</td><td>The number of server threads for the namenode.</td>
-</tr>
-<tr>
-<td><a name="dfs.safemode.threshold.pct">dfs.safemode.threshold.pct</a></td><td>0.999f</td><td>
-  	Specifies the percentage of blocks that should satisfy 
-  	the minimal replication requirement defined by dfs.replication.min.
-  	Values less than or equal to 0 mean not to start in safe mode.
-  	Values greater than 1 will make safe mode permanent.
- 	</td>
-</tr>
-<tr>
-<td><a name="dfs.safemode.extension">dfs.safemode.extension</a></td><td>30000</td><td>
-  	Determines extension of safe mode in milliseconds 
-  	after the threshold level is reached.
- 	</td>
-</tr>
-<tr>
-<td><a name="dfs.balance.bandwidthPerSec">dfs.balance.bandwidthPerSec</a></td><td>1048576</td><td>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.hosts">dfs.hosts</a></td><td></td><td>Names a file that contains a list of hosts that are
-  permitted to connect to the namenode. The full pathname of the file
-  must be specified.  If the value is empty, all hosts are
-  permitted.</td>
-</tr>
-<tr>
-<td><a name="dfs.hosts.exclude">dfs.hosts.exclude</a></td><td></td><td>Names a file that contains a list of hosts that are
-  not permitted to connect to the namenode.  The full pathname of the
-  file must be specified.  If the value is empty, no hosts are
-  excluded.</td>
-</tr>
-<tr>
-<td><a name="dfs.max.objects">dfs.max.objects</a></td><td>0</td><td>The maximum number of files, directories and blocks
-  dfs supports. A value of zero indicates no limit to the number
-  of objects that dfs supports.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.decommission.interval">dfs.namenode.decommission.interval</a></td><td>30</td><td>Namenode periodicity in seconds to check if decommission is 
-  complete.</td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.decommission.nodes.per.interval">dfs.namenode.decommission.nodes.per.interval</a></td><td>5</td><td>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</td>
-</tr>
-<tr>
-<td><a name="dfs.replication.interval">dfs.replication.interval</a></td><td>3</td><td>The periodicity in seconds with which the namenode computes 
-  repliaction work for datanodes. </td>
-</tr>
-<tr>
-<td><a name="dfs.access.time.precision">dfs.access.time.precision</a></td><td>3600000</td><td>The access time for HDFS file is precise upto this value. 
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.block.size">fs.s3.block.size</a></td><td>67108864</td><td>Block size to use when writing files to S3.</td>
-</tr>
-<tr>
-<td><a name="fs.s3.buffer.dir">fs.s3.buffer.dir</a></td><td>${hadoop.tmp.dir}/s3</td><td>Determines where on the local filesystem the S3 filesystem
-  should store files before sending them to S3
-  (or after retrieving them from S3).
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.maxRetries">fs.s3.maxRetries</a></td><td>4</td><td>The maximum number of retries for reading or writing files to S3, 
-  before we signal failure to the application.
-  </td>
-</tr>
-<tr>
-<td><a name="fs.s3.sleepTimeSeconds">fs.s3.sleepTimeSeconds</a></td><td>10</td><td>The number of seconds to sleep between each S3 retry.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker">mapred.job.tracker</a></td><td>local</td><td>The host and port that the MapReduce job tracker runs
-  at.  If "local", then jobs are run in-process as a single map
-  and reduce task.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.http.address">mapred.job.tracker.http.address</a></td><td>0.0.0.0:50030</td><td>
-    The job tracker http server address and port the server will listen on.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.handler.count">mapred.job.tracker.handler.count</a></td><td>10</td><td>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.tracker.report.address">mapred.task.tracker.report.address</a></td><td>127.0.0.1:0</td><td>The interface and port that task tracker server listens on. 
-  Since it is only connected to by the tasks, it uses the local interface.
-  EXPERT ONLY. Should only be changed if your host does not have the loopback 
-  interface.</td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir">mapred.local.dir</a></td><td>${hadoop.tmp.dir}/mapred/local</td><td>The local directory where MapReduce stores intermediate
-  data files.  May be a comma-separated list of
-  directories on different devices in order to spread disk i/o.
-  Directories that do not exist are ignored.
-  </td>
-</tr>
-<tr>
-<td><a name="local.cache.size">local.cache.size</a></td><td>10737418240</td><td>The limit on the size of cache you want to keep, set by default
-  to 10GB. This will act as a soft limit on the cache directory for out of band data.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.system.dir">mapred.system.dir</a></td><td>${hadoop.tmp.dir}/mapred/system</td><td>The shared directory where MapReduce stores control files.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.temp.dir">mapred.temp.dir</a></td><td>${hadoop.tmp.dir}/mapred/temp</td><td>A shared directory for temporary files.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir.minspacestart">mapred.local.dir.minspacestart</a></td><td>0</td><td>If the space in mapred.local.dir drops under this, 
-  do not ask for more tasks.
-  Value in bytes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir.minspacekill">mapred.local.dir.minspacekill</a></td><td>0</td><td>If the space in mapred.local.dir drops under this, 
-  	do not ask more tasks until all the current ones have finished and 
-  	cleaned up. Also, to save the rest of the tasks we have running, 
-  	kill one of them, to clean up some space. Start with the reduce tasks,
-  	then go with the ones that have finished the least.
-  	Value in bytes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.expiry.interval">mapred.tasktracker.expiry.interval</a></td><td>600000</td><td>Expert: The time-interval, in miliseconds, after which
-  a tasktracker is declared 'lost' if it doesn't send heartbeats.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.instrumentation">mapred.tasktracker.instrumentation</a></td><td>org.apache.hadoop.mapred.TaskTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each TaskTracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.vmem.reserved">mapred.tasktracker.vmem.reserved</a></td><td>-1</td><td>Configuration property to specify the amount of virtual memory
-    that has to be reserved by the TaskTracker for system usage (OS, TT etc).
-    The reserved virtual memory should be a part of the total virtual memory
-    available on the TaskTracker.
-    
-    The reserved virtual memory and the total virtual memory values are
-    reported by the TaskTracker as part of heart-beat so that they can
-    considered by a scheduler. Please refer to the documentation of the
-    configured scheduler to see how this property is used.
-    
-    These two values are also used by a TaskTracker for tracking tasks' memory
-    usage. Memory management functionality on a TaskTracker is disabled if this
-    property is set to -1, if it more than the total virtual memory on the 
-    tasktracker, or if either of the values is negative.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.pmem.reserved">mapred.tasktracker.pmem.reserved</a></td><td>-1</td><td>Configuration property to specify the amount of physical memory
-    that has to be reserved by the TaskTracker for system usage (OS, TT etc).
-    The reserved physical memory should be a part of the total physical memory
-    available on the TaskTracker.
-
-    The reserved physical memory and the total physical memory values are
-    reported by the TaskTracker as part of heart-beat so that they can
-    considered by a scheduler. Please refer to the documentation of the
-    configured scheduler to see how this property is used.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.default.maxvmem">mapred.task.default.maxvmem</a></td><td>-1</td><td>
-    Cluster-wide configuration in bytes to be set by the administrators that
-    provides default amount of maximum virtual memory for job's tasks. This has
-    to be set on both the JobTracker node for the sake of scheduling decisions
-    and on the TaskTracker nodes for the sake of memory management.
-
-    If a job doesn't specify its virtual memory requirement by setting
-    mapred.task.maxvmem to -1, tasks are assured a memory limit set
-    to this property. This property is set to -1 by default.
-
-    This value should in general be less than the cluster-wide
-    configuration mapred.task.limit.maxvmem. If not or if it is not set,
-    TaskTracker's memory management will be disabled and a scheduler's memory
-    based scheduling decisions may be affected. Please refer to the
-    documentation of the configured scheduler to see how this property is used.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.limit.maxvmem">mapred.task.limit.maxvmem</a></td><td>-1</td><td>
-    Cluster-wide configuration in bytes to be set by the site administrators
-    that provides an upper limit on the maximum virtual memory that can be
-    specified by a job via mapred.task.maxvmem. This has to be set on both the
-    JobTracker node for the sake of scheduling decisions and on the TaskTracker
-    nodes for the sake of memory management.
-    
-    The job configuration mapred.task.maxvmem should not be more than this
-    value, otherwise depending on the scheduler being configured, the job may
-    be rejected or the job configuration may just be ignored. Please refer to
-    the documentation of the configured scheduler to see how this property is
-    used.
-
-    If it is not set a TaskTracker, TaskTracker's memory management will be
-    disabled.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.maxvmem">mapred.task.maxvmem</a></td><td>-1</td><td>
-    The maximum amount of virtual memory any task of a job will use, in bytes.
-
-    This value will be used by TaskTrackers for monitoring the memory usage of
-    tasks of this jobs. If a TaskTracker's memory management functionality is
-    enabled, each task of this job will be allowed to use a maximum virtual
-    memory specified by this property. If the task's memory usage goes over 
-    this value, the task will be failed by the TT. If not set, the
-    cluster-wide configuration mapred.task.default.maxvmem is used as the
-    default value for memory requirements. If this property cascaded with
-    mapred.task.default.maxvmem becomes equal to -1, the job's tasks will
-    not be assured any particular amount of virtual memory and may be killed by
-    a TT that intends to control the total memory usage of the tasks via memory
-    management functionality. If the memory management functionality is
-    disabled on a TT, this value is ignored.
-
-    This value should not be more than the cluster-wide configuration
-    mapred.task.limit.maxvmem.
-
-    This value may be used by schedulers that support scheduling based on job's
-    memory requirements. Please refer to the documentation of the scheduler
-    being configured to see if it does memory based scheduling and if it does,
-    how this property is used by that scheduler.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.maxpmem">mapred.task.maxpmem</a></td><td>-1</td><td>
-   The maximum amount of physical memory any task of a job will use in bytes.
-
-   This value may be used by schedulers that support scheduling based on job's
-   memory requirements. In general, a task of this job will be scheduled on a
-   TaskTracker, only if the amount of physical memory still unoccupied on the
-   TaskTracker is greater than or equal to this value. Different schedulers can
-   take different decisions, some might just ignore this value. Please refer to
-   the documentation of the scheduler being configured to see if it does
-   memory based scheduling and if it does, how this variable is used by that
-   scheduler.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.memory_calculator_plugin">mapred.tasktracker.memory_calculator_plugin</a></td><td></td><td>
-   Name of the class whose instance will be used to query memory information
-   on the tasktracker.
-   
-   The class must be an instance of 
-   org.apache.hadoop.util.MemoryCalculatorPlugin. If the value is null, the
-   tasktracker attempts to use a class appropriate to the platform. 
-   Currently, the only platform supported is Linux.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.taskmemorymanager.monitoring-interval">mapred.tasktracker.taskmemorymanager.monitoring-interval</a></td><td>5000</td><td>The interval, in milliseconds, for which the tasktracker waits
-   between two cycles of monitoring its tasks' memory usage. Used only if
-   tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
-   </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill">mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</a></td><td>5000</td><td>The time, in milliseconds, the tasktracker waits for sending a
-  SIGKILL to a process that has overrun memory limits, after it has been sent
-  a SIGTERM. Used only if tasks' memory management is enabled via
-  mapred.tasktracker.tasks.maxmemory.</td>
-</tr>
-<tr>
-<td><a name="mapred.map.tasks">mapred.map.tasks</a></td><td>2</td><td>The default number of map tasks per job.  Typically set
-  to a prime several times greater than number of available hosts.
-  Ignored when mapred.job.tracker is "local".  
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.tasks">mapred.reduce.tasks</a></td><td>1</td><td>The default number of reduce tasks per job.  Typically set
-  to a prime close to the number of available hosts.  Ignored when
-  mapred.job.tracker is "local".
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.restart.recover">mapred.jobtracker.restart.recover</a></td><td>false</td><td>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.job.history.block.size">mapred.jobtracker.job.history.block.size</a></td><td>3145728</td><td>The block size of the job history file. Since the job recovery
-               uses job history, its important to dump job history to disk as 
-               soon as possible. Note that this is an expert level parameter.
-               The default value is set to 3 MB.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.taskScheduler">mapred.jobtracker.taskScheduler</a></td><td>org.apache.hadoop.mapred.JobQueueTaskScheduler</td><td>The class responsible for scheduling the tasks.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.taskScheduler.maxRunningTasksPerJob">mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</a></td><td></td><td>The maximum number of running tasks for a job before
-  it gets preempted. No limits if undefined.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.max.attempts">mapred.map.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per map task.
-  In other words, framework will try to execute a map task these many number
-  of times before giving up on it.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.max.attempts">mapred.reduce.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per reduce task.
-  In other words, framework will try to execute a reduce task these many number
-  of times before giving up on it.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.parallel.copies">mapred.reduce.parallel.copies</a></td><td>5</td><td>The default number of parallel transfers run by reduce
-  during the copy(shuffle) phase.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.copy.backoff">mapred.reduce.copy.backoff</a></td><td>300</td><td>The maximum amount of time (in seconds) a reducer spends on 
-  fetching one map output before declaring it as failed.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.timeout">mapred.task.timeout</a></td><td>600000</td><td>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.map.tasks.maximum">mapred.tasktracker.map.tasks.maximum</a></td><td>2</td><td>The maximum number of map tasks that will be run
-  simultaneously by a task tracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.reduce.tasks.maximum">mapred.tasktracker.reduce.tasks.maximum</a></td><td>2</td><td>The maximum number of reduce tasks that will be run
-  simultaneously by a task tracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.completeuserjobs.maximum">mapred.jobtracker.completeuserjobs.maximum</a></td><td>100</td><td>The maximum number of complete jobs per user to keep around 
-  before delegating them to the job history.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.instrumentation">mapred.jobtracker.instrumentation</a></td><td>org.apache.hadoop.mapred.JobTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each JobTracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.java.opts">mapred.child.java.opts</a></td><td>-Xmx200m</td><td>Java opts for the task tracker child processes.  
-  The following symbol, if present, will be interpolated: @taskid@ is replaced 
-  by current TaskID. Any other occurrences of '@' will go unchanged.
-  For example, to enable verbose gc logging to a file named for the taskid in
-  /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-        -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-  
-  The configuration variable mapred.child.ulimit can be used to control the
-  maximum virtual memory of the child processes. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.ulimit">mapred.child.ulimit</a></td><td></td><td>The maximum virtual memory, in KB, of a process launched by the 
-  Map-Reduce framework. This can be used to control both the Mapper/Reducer 
-  tasks and applications using Hadoop Pipes, Hadoop Streaming etc. 
-  By default it is left unspecified to let cluster admins control it via 
-  limits.conf and other such relevant mechanisms.
-  
-  Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
-  JavaVM, else the VM might not start. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.tmp">mapred.child.tmp</a></td><td>./tmp</td><td> To set the value of tmp directory for map and reduce tasks.
-  If the value is an absolute path, it is directly assigned. Otherwise, it is
-  prepended with task's working directory. The java tasks are executed with
-  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
-  streaming are set with environment variable,
-   TMPDIR='the absolute path of the tmp dir'
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.inmem.merge.threshold">mapred.inmem.merge.threshold</a></td><td>1000</td><td>The threshold, in terms of the number of files 
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.shuffle.merge.percent">mapred.job.shuffle.merge.percent</a></td><td>0.66</td><td>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.shuffle.input.buffer.percent">mapred.job.shuffle.input.buffer.percent</a></td><td>0.70</td><td>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.reduce.input.buffer.percent">mapred.job.reduce.input.buffer.percent</a></td><td>0.0</td><td>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.tasks.speculative.execution">mapred.map.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some map tasks 
-               may be executed in parallel.</td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.tasks.speculative.execution">mapred.reduce.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some reduce tasks 
-               may be executed in parallel.</td>
-</tr>
-<tr>
-<td><a name="mapred.job.reuse.jvm.num.tasks">mapred.job.reuse.jvm.num.tasks</a></td><td>1</td><td>How many tasks to run per jvm. If set to -1, there is
-  no limit. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.min.split.size">mapred.min.split.size</a></td><td>0</td><td>The minimum size chunk that map input should be split
-  into.  Note that some file formats may have minimum split sizes that
-  take priority over this setting.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.maxtasks.per.job">mapred.jobtracker.maxtasks.per.job</a></td><td>-1</td><td>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </td>
-</tr>
-<tr>
-<td><a name="mapred.submit.replication">mapred.submit.replication</a></td><td>10</td><td>The replication level for submitted job files.  This
-  should be around the square root of the number of nodes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.dns.interface">mapred.tasktracker.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a task
-  tracker should report its IP address.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.dns.nameserver">mapred.tasktracker.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
-  which a TaskTracker should use to determine the host name used by
-  the JobTracker for communication and display purposes.
-  </td>
-</tr>
-<tr>
-<td><a name="tasktracker.http.threads">tasktracker.http.threads</a></td><td>40</td><td>The number of worker threads that for the http server. This is
-               used for map output fetching
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.tracker.http.address">mapred.task.tracker.http.address</a></td><td>0.0.0.0:50060</td><td>
-    The task tracker http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="keep.failed.task.files">keep.failed.task.files</a></td><td>false</td><td>Should the files for failed tasks be kept. This should only be 
-               used on jobs that are failing, because the storage is never
-               reclaimed. It also prevents the map outputs from being erased
-               from the reduce directory as they are consumed.</td>
-</tr>
-<tr>
-<td><a name="mapred.output.compress">mapred.output.compress</a></td><td>false</td><td>Should the job outputs be compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.output.compression.type">mapred.output.compression.type</a></td><td>RECORD</td><td>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.output.compression.codec">mapred.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the job outputs are compressed, how should they be compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.compress.map.output">mapred.compress.map.output</a></td><td>false</td><td>Should the outputs of the maps be compressed before being
-               sent across the network. Uses SequenceFile compression.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.output.compression.codec">mapred.map.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the map outputs are compressed, how should they be 
-               compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.compress.blocksize">io.seqfile.compress.blocksize</a></td><td>1000000</td><td>The minimum block size for compression in block compressed 
-  				SequenceFiles.
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.lazydecompress">io.seqfile.lazydecompress</a></td><td>true</td><td>Should values of block-compressed SequenceFiles be decompressed
-  				only when necessary.
-  </td>
-</tr>
-<tr>
-<td><a name="io.seqfile.sorter.recordlimit">io.seqfile.sorter.recordlimit</a></td><td>1000000</td><td>The limit on number of records to be kept in memory in a spill 
-  				in SequenceFiles.Sorter
-  </td>
-</tr>
-<tr>
-<td><a name="map.sort.class">map.sort.class</a></td><td>org.apache.hadoop.util.QuickSort</td><td>The default sort class for sorting keys.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.userlog.limit.kb">mapred.userlog.limit.kb</a></td><td>0</td><td>The maximum size of user-logs of each task in KB. 0 disables the cap.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.userlog.retain.hours">mapred.userlog.retain.hours</a></td><td>24</td><td>The maximum time, in hours, for which the user-logs are to be 
-  				retained.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.hosts">mapred.hosts</a></td><td></td><td>Names a file that contains the list of nodes that may
-  connect to the jobtracker.  If the value is empty, all hosts are
-  permitted.</td>
-</tr>
-<tr>
-<td><a name="mapred.hosts.exclude">mapred.hosts.exclude</a></td><td></td><td>Names a file that contains the list of hosts that
-  should be excluded by the jobtracker.  If the value is empty, no
-  hosts are excluded.</td>
-</tr>
-<tr>
-<td><a name="mapred.max.tracker.blacklists">mapred.max.tracker.blacklists</a></td><td>4</td><td>The number of blacklists for a taskTracker by various jobs 
-               after which the task tracker could be blacklisted across
-               all jobs. The tracker will be given a tasks later 
-               (after a day). The tracker will become a healthy 
-               tracker after a restart. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.max.tracker.failures">mapred.max.tracker.failures</a></td><td>4</td><td>The number of task-failures on a tasktracker of a given job 
-               after which new tasks of that job aren't assigned to it.
-  </td>
-</tr>
-<tr>
-<td><a name="jobclient.output.filter">jobclient.output.filter</a></td><td>FAILED</td><td>The filter for controlling the output of the task's userlogs sent
-               to the console of the JobClient. 
-               The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and 
-               ALL.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.active">mapred.job.tracker.persist.jobstatus.active</a></td><td>false</td><td>Indicates if persistency of job status information is
-      active or not.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.hours">mapred.job.tracker.persist.jobstatus.hours</a></td><td>0</td><td>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.dir">mapred.job.tracker.persist.jobstatus.dir</a></td><td>/jobtracker/jobsInfo</td><td>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile">mapred.task.profile</a></td><td>false</td><td>To set whether the system should collect profiler
-     information for some of the tasks in this job? The information is stored
-     in the user log directory. The value is "true" if task profiling
-     is enabled.</td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile.maps">mapred.task.profile.maps</a></td><td>0-2</td><td> To set the ranges of map tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile.reduces">mapred.task.profile.reduces</a></td><td>0-2</td><td> To set the ranges of reduce tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.line.input.format.linespermap">mapred.line.input.format.linespermap</a></td><td>1</td><td> Number of lines per split in NLineInputFormat.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.attempts.to.start.skipping">mapred.skip.attempts.to.start.skipping</a></td><td>2</td><td> The number of Task attempts AFTER which skip mode 
-    will be kicked off. When skip mode is kicked off, the 
-    tasks reports the range of records which it will process 
-    next, to the TaskTracker. So that on failures, TT knows which 
-    ones are possibly the bad records. On further executions, 
-    those are skipped.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.map.auto.incr.proc.count">mapred.skip.map.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true, 
-    SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented 
-    by MapRunner after invoking the map function. This value must be set to 
-    false for applications which process the records asynchronously 
-    or buffer the input records. For example streaming. 
-    In such cases applications should increment this counter on their own.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.reduce.auto.incr.proc.count">mapred.skip.reduce.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true, 
-    SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented 
-    by framework after invoking the reduce function. This value must be set to 
-    false for applications which process the records asynchronously 
-    or buffer the input records. For example streaming. 
-    In such cases applications should increment this counter on their own.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.out.dir">mapred.skip.out.dir</a></td><td></td><td> If no value is specified here, the skipped records are 
-    written to the output directory at _logs/skip.
-    User can stop writing skipped records by giving the value "none". 
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.map.max.skip.records">mapred.skip.map.max.skip.records</a></td><td>0</td><td> The number of acceptable skip records surrounding the bad 
-    record PER bad record in mapper. The number includes the bad record as well.
-    To turn the feature of detection/skipping of bad records off, set the 
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying  
-    until this threshold is met OR all attempts get exhausted for this task. 
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to 
-    narrow down. Whatever records(depends on application) get skipped are 
-    acceptable.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.reduce.max.skip.groups">mapred.skip.reduce.max.skip.groups</a></td><td>0</td><td> The number of acceptable skip groups surrounding the bad 
-    group PER bad group in reducer. The number includes the bad group as well.
-    To turn the feature of detection/skipping of bad groups off, set the 
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying  
-    until this threshold is met OR all attempts get exhausted for this task. 
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to 
-    narrow down. Whatever groups(depends on application) get skipped are 
-    acceptable.
-    </td>
-</tr>
-<tr>
-<td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.kill.max">ipc.client.kill.max</a></td><td>10</td><td>Defines the maximum number of clients to disconnect in one go.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>10000</td><td>The maximum time in msec after which a client will bring down the
-               connection to the server.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.connect.max.retries">ipc.client.connect.max.retries</a></td><td>10</td><td>Indicates the number of retries a client will make to establish
-               a server connection.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.server.listen.queue.size">ipc.server.listen.queue.size</a></td><td>128</td><td>Indicates the length of the listen queue for servers accepting
-               client connections.
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.server.tcpnodelay">ipc.server.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the server. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </td>
-</tr>
-<tr>
-<td><a name="ipc.client.tcpnodelay">ipc.client.tcpnodelay</a></td><td>false</td><td>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the client. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </td>
-</tr>
-<tr>
-<td><a name="job.end.retry.attempts">job.end.retry.attempts</a></td><td>0</td><td>Indicates how many times hadoop should attempt to contact the
-               notification URL </td>
-</tr>
-<tr>
-<td><a name="job.end.retry.interval">job.end.retry.interval</a></td><td>30000</td><td>Indicates time in milliseconds between notification URL retry
-                calls</td>
-</tr>
-<tr>
-<td><a name="webinterface.private.actions">webinterface.private.actions</a></td><td>false</td><td> If set to true, the web interfaces of JT and NN may contain 
-                actions, such as kill job, delete file, etc., that should 
-                not be exposed to public. Enable this option if the interfaces 
-                are only reachable by those who have the right authorization.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.default">hadoop.rpc.socket.factory.class.default</a></td><td>org.apache.hadoop.net.StandardSocketFactory</td><td> Default SocketFactory to use. This parameter is expected to be
-    formatted as "package.FactoryClassName".
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.ClientProtocol">hadoop.rpc.socket.factory.class.ClientProtocol</a></td><td></td><td> SocketFactory to use to connect to a DFS. If null or empty, use
-    hadoop.rpc.socket.class.default. This socket factory is also used by
-    DFSClient to create sockets to DataNodes.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.JobSubmissionProtocol">hadoop.rpc.socket.factory.class.JobSubmissionProtocol</a></td><td></td><td> SocketFactory to use to connect to a Map/Reduce master
-    (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.socks.server">hadoop.socks.server</a></td><td></td><td> Address (host:port) of the SOCKS server to be used by the
-    SocksSocketFactory.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.node.switch.mapping.impl">topology.node.switch.mapping.impl</a></td><td>org.apache.hadoop.net.ScriptBasedMapping</td><td> The default implementation of the DNSToSwitchMapping. It
-    invokes a script specified in topology.script.file.name to resolve
-    node names. If the value for topology.script.file.name is not set, the
-    default value of DEFAULT_RACK is returned for all node names.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.script.file.name">topology.script.file.name</a></td><td></td><td> The script name that should be invoked to resolve DNS names to
-    NetworkTopology names. Example: the script would take host.foo.bar as an
-    argument, and return /rack1 as the output.
-  </td>
-</tr>
-<tr>
-<td><a name="topology.script.number.args">topology.script.number.args</a></td><td>100</td><td> The max number of args that the script configured with 
-    topology.script.file.name should be run with. Each arg is an
-    IP address.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.cache.levels">mapred.task.cache.levels</a></td><td>2</td><td> This is the max level of the task cache. For example, if
-    the level is 2, the tasks cached are at the host level and at the rack
-    level.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.names">mapred.queue.names</a></td><td>default</td><td> Comma separated list of queues configured for this jobtracker.
-    Jobs are added to queues and schedulers can configure different 
-    scheduling properties for the various queues. To configure a property 
-    for a queue, the name of the queue must match the name specified in this 
-    value. Queue properties that are common to all schedulers are configured 
-    here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME,
-    for e.g. mapred.queue.default.submit-job-acl.
-    The number of queues configured in this parameter could depend on the
-    type of scheduler being used, as specified in 
-    mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler
-    supports only a single queue, which is the default configured here.
-    Before adding more queues, ensure that the scheduler you've configured
-    supports multiple queues.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.acls.enabled">mapred.acls.enabled</a></td><td>false</td><td> Specifies whether ACLs are enabled, and should be checked
-    for various operations.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.default.acl-submit-job">mapred.queue.default.acl-submit-job</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
-    to submit jobs to the 'default' queue. The user list and the group list
-    are separated by a blank. For e.g. alice,bob group1,group2. 
-    If set to the special value '*', it means all users are allowed to 
-    submit jobs. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.default.acl-administer-jobs">mapred.queue.default.acl-administer-jobs</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
-    to delete jobs or modify job's priority for jobs not owned by the current
-    user in the 'default' queue. The user list and the group list
-    are separated by a blank. For e.g. alice,bob group1,group2. 
-    If set to the special value '*', it means all users are allowed to do 
-    this operation.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.queue.name">mapred.job.queue.name</a></td><td>default</td><td> Queue to which a job is submitted. This must match one of the
-    queues defined in mapred.queue.names for the system. Also, the ACL setup
-    for the queue must allow the current user to submit a job to the queue.
-    Before specifying a queue, ensure that the system is configured with 
-    the queue, and access is allowed for submitting jobs to the queue.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.indexcache.mb">mapred.tasktracker.indexcache.mb</a></td><td>10</td><td> The maximum memory that a task tracker allows for the 
-    index cache that is used when serving map outputs to reducers.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.merge.recordsBeforeProgress">mapred.merge.recordsBeforeProgress</a></td><td>10000</td><td> The number of records to process during merge before
-   sending a progress notification to the TaskTracker.
-  </td>
-</tr>
-</table>
-</body>
-</html>

+ 0 - 302
docs/cn/hadoop_archives.html

@@ -1,302 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop Archives</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Hadoop Archives</div>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hadoop_archives.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop Archives</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E4%BB%80%E4%B9%88%E6%98%AFHadoop+archives%3F"> 什么是Hadoop archives? </a>
-</li>
-<li>
-<a href="#%E5%A6%82%E4%BD%95%E5%88%9B%E5%BB%BAarchive%3F"> 如何创建archive? </a>
-</li>
-<li>
-<a href="#%E5%A6%82%E4%BD%95%E6%9F%A5%E7%9C%8Barchives%E4%B8%AD%E7%9A%84%E6%96%87%E4%BB%B6%3F"> 如何查看archives中的文件? </a>
-</li>
-</ul>
-</div>
-        
-<a name="N1000D"></a><a name="%E4%BB%80%E4%B9%88%E6%98%AFHadoop+archives%3F"></a>
-<h2 class="h3"> 什么是Hadoop archives? </h2>
-<div class="section">
-<p>
-        Hadoop archives是特殊的档案格式。一个Hadoop archive对应一个文件系统目录。
-        Hadoop archive的扩展名是*.har。Hadoop archive包含元数据(形式是_index和_masterindx)和数据(part-*)文件。_index文件包含了档案中的文件的文件名和位置信息。
-        </p>
-</div>
-        
-<a name="N10017"></a><a name="%E5%A6%82%E4%BD%95%E5%88%9B%E5%BB%BAarchive%3F"></a>
-<h2 class="h3"> 如何创建archive? </h2>
-<div class="section">
-<p>
-        
-<span class="codefrag">用法: hadoop archive -archiveName name &lt;src&gt;* &lt;dest&gt;</span>
-        
-</p>
-<p>
-        由-archiveName选项指定你要创建的archive的名字。比如foo.har。archive的名字的扩展名应该是*.har。输入是文件系统的路径名,路径名的格式和平时的表达方式一样。创建的archive会保存到目标目录下。注意创建archives是一个Map/Reduce job。你应该在map reduce集群上运行这个命令。下面是一个例子:
-        </p>
-<p>
-        
-<span class="codefrag">hadoop archive -archiveName foo.har /user/hadoop/dir1 /user/hadoop/dir2 /user/zoo/</span>
-        
-</p>
-<p>
-        在上面的例子中,
-        /user/hadoop/dir1 和 /user/hadoop/dir2 会被归档到这个文件系统目录下
-        -- /user/zoo/foo.har。当创建archive时,源文件不会被更改或删除。
-        </p>
-</div>
-        
-<a name="N1002F"></a><a name="%E5%A6%82%E4%BD%95%E6%9F%A5%E7%9C%8Barchives%E4%B8%AD%E7%9A%84%E6%96%87%E4%BB%B6%3F"></a>
-<h2 class="h3"> 如何查看archives中的文件? </h2>
-<div class="section">
-<p>
-        archive作为文件系统层暴露给外界。所以所有的fs shell命令都能在archive上运行,但是要使用不同的URI。
-        另外,archive是不可改变的。所以重命名,删除和创建都会返回错误。Hadoop Archives 的URI是
-        </p>
-<p>
-<span class="codefrag">har://scheme-hostname:port/archivepath/fileinarchive</span>
-</p>
-<p>
-        如果没提供scheme-hostname,它会使用默认的文件系统。这种情况下URI是这种形式
-        </p>
-<p>
-<span class="codefrag">
-        har:///archivepath/fileinarchive</span>
-</p>
-<p>
-        这是一个archive的例子。archive的输入是/dir。这个dir目录包含文件filea,fileb。
-        把/dir归档到/user/hadoop/foo.bar的命令是
-        </p>
-<p>
-<span class="codefrag">hadoop archive -archiveName foo.har /dir /user/hadoop</span>
-        
-</p>
-<p>
-        获得创建的archive中的文件列表,使用命令
-        </p>
-<p>
-<span class="codefrag">hadoop dfs -lsr har:///user/hadoop/foo.har</span>
-</p>
-<p>查看archive中的filea文件的命令-
-        </p>
-<p>
-<span class="codefrag">hadoop dfs -cat har:///user/hadoop/foo.har/dir/filea</span>
-</p>
-</div>
-	
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 137
docs/cn/hadoop_archives.pdf


+ 0 - 226
docs/cn/hdfs-default.html

@@ -1,226 +0,0 @@
-<html>
-<body>
-<table border="1">
-<tr>
-<td>name</td><td>value</td><td>description</td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.logging.level">dfs.namenode.logging.level</a></td><td>info</td><td>The logging level for dfs namenode. Other values are "dir"(trac
-e namespace mutations), "block"(trace block under/over replications and block
-creations/deletions), or "all".</td>
-</tr>
-<tr>
-<td><a name="dfs.secondary.http.address">dfs.secondary.http.address</a></td><td>0.0.0.0:50090</td><td>
-    The secondary namenode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.address">dfs.datanode.address</a></td><td>0.0.0.0:50010</td><td>
-    The address where the datanode server will listen to.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.http.address">dfs.datanode.http.address</a></td><td>0.0.0.0:50075</td><td>
-    The datanode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.ipc.address">dfs.datanode.ipc.address</a></td><td>0.0.0.0:50020</td><td>
-    The datanode ipc server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.handler.count">dfs.datanode.handler.count</a></td><td>3</td><td>The number of server threads for the datanode.</td>
-</tr>
-<tr>
-<td><a name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
-    The address and the base port where the dfs namenode web ui will listen on.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.enable">dfs.https.enable</a></td><td>false</td><td>Decide if HTTPS(SSL) is supported on HDFS
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.need.client.auth">dfs.https.need.client.auth</a></td><td>false</td><td>Whether SSL client certificate authentication is required
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.server.keystore.resource">dfs.https.server.keystore.resource</a></td><td>ssl-server.xml</td><td>Resource file from which ssl server keystore
-  information will be extracted
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.https.client.keystore.resource">dfs.https.client.keystore.resource</a></td><td>ssl-client.xml</td><td>Resource file from which ssl client keystore
-  information will be extracted
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.https.address">dfs.datanode.https.address</a></td><td>0.0.0.0:50475</td><td></td>
-</tr>
-<tr>
-<td><a name="dfs.https.address">dfs.https.address</a></td><td>0.0.0.0:50470</td><td></td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.dns.interface">dfs.datanode.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.dns.nameserver">dfs.datanode.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.considerLoad">dfs.replication.considerLoad</a></td><td>true</td><td>Decide if chooseTarget considers the target's load or not
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.default.chunk.view.size">dfs.default.chunk.view.size</a></td><td>32768</td><td>The number of bytes to view for a file on the browser.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.datanode.du.reserved">dfs.datanode.du.reserved</a></td><td>0</td><td>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.name.dir">dfs.name.dir</a></td><td>${hadoop.tmp.dir}/dfs/name</td><td>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </td>
-</tr>
-<tr>
-<td><a name="dfs.name.edits.dir">dfs.name.edits.dir</a></td><td>${dfs.name.dir}</td><td>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.name.dir
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.web.ugi">dfs.web.ugi</a></td><td>webuser,webgroup</td><td>The user account used by the web interface.
-    Syntax: USERNAME,GROUP1,GROUP2, ...
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.permissions">dfs.permissions</a></td><td>true</td><td>
-    If "true", enable permission checking in HDFS.
-    If "false", permission checking is turned off,
-    but all other behavior is unchanged.
-    Switching from one parameter value to the other does not change the mode,
-    owner or group of files or directories.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.permissions.supergroup">dfs.permissions.supergroup</a></td><td>supergroup</td><td>The name of the group of super-users.</td>
-</tr>
-<tr>
-<td><a name="dfs.data.dir">dfs.data.dir</a></td><td>${hadoop.tmp.dir}/dfs/data</td><td>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication">dfs.replication</a></td><td>3</td><td>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.max">dfs.replication.max</a></td><td>512</td><td>Maximal block replication. 
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.replication.min">dfs.replication.min</a></td><td>1</td><td>Minimal block replication. 
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.block.size">dfs.block.size</a></td><td>67108864</td><td>The default block size for new files.</td>
-</tr>
-<tr>
-<td><a name="dfs.df.interval">dfs.df.interval</a></td><td>60000</td><td>Disk usage statistics refresh interval in msec.</td>
-</tr>
-<tr>
-<td><a name="dfs.client.block.write.retries">dfs.client.block.write.retries</a></td><td>3</td><td>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.blockreport.intervalMsec">dfs.blockreport.intervalMsec</a></td><td>3600000</td><td>Determines block reporting interval in milliseconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.blockreport.initialDelay">dfs.blockreport.initialDelay</a></td><td>0</td><td>Delay for first block report in seconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.heartbeat.interval">dfs.heartbeat.interval</a></td><td>3</td><td>Determines datanode heartbeat interval in seconds.</td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.handler.count">dfs.namenode.handler.count</a></td><td>10</td><td>The number of server threads for the namenode.</td>
-</tr>
-<tr>
-<td><a name="dfs.safemode.threshold.pct">dfs.safemode.threshold.pct</a></td><td>0.999f</td><td>
-    Specifies the percentage of blocks that should satisfy 
-    the minimal replication requirement defined by dfs.replication.min.
-    Values less than or equal to 0 mean not to start in safe mode.
-    Values greater than 1 will make safe mode permanent.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.safemode.extension">dfs.safemode.extension</a></td><td>30000</td><td>
-    Determines extension of safe mode in milliseconds 
-    after the threshold level is reached.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.balance.bandwidthPerSec">dfs.balance.bandwidthPerSec</a></td><td>1048576</td><td>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.hosts">dfs.hosts</a></td><td></td><td>Names a file that contains a list of hosts that are
-  permitted to connect to the namenode. The full pathname of the file
-  must be specified.  If the value is empty, all hosts are
-  permitted.</td>
-</tr>
-<tr>
-<td><a name="dfs.hosts.exclude">dfs.hosts.exclude</a></td><td></td><td>Names a file that contains a list of hosts that are
-  not permitted to connect to the namenode.  The full pathname of the
-  file must be specified.  If the value is empty, no hosts are
-  excluded.</td>
-</tr>
-<tr>
-<td><a name="dfs.max.objects">dfs.max.objects</a></td><td>0</td><td>The maximum number of files, directories and blocks
-  dfs supports. A value of zero indicates no limit to the number
-  of objects that dfs supports.
-  </td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.decommission.interval">dfs.namenode.decommission.interval</a></td><td>30</td><td>Namenode periodicity in seconds to check if decommission is 
-  complete.</td>
-</tr>
-<tr>
-<td><a name="dfs.namenode.decommission.nodes.per.interval">dfs.namenode.decommission.nodes.per.interval</a></td><td>5</td><td>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</td>
-</tr>
-<tr>
-<td><a name="dfs.replication.interval">dfs.replication.interval</a></td><td>3</td><td>The periodicity in seconds with which the namenode computes 
-  repliaction work for datanodes. </td>
-</tr>
-<tr>
-<td><a name="dfs.access.time.precision">dfs.access.time.precision</a></td><td>3600000</td><td>The access time for HDFS file is precise upto this value. 
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </td>
-</tr>
-</table>
-</body>
-</html>

+ 0 - 664
docs/cn/hdfs_design.html

@@ -1,664 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title> 
-      Hadoop分布式文件系统:架构和设计
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">HDFS构架设计</div>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hdfs_design.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1> 
-      Hadoop分布式文件系统:架构和设计
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E5%BC%95%E8%A8%80"> 引言 </a>
-</li>
-<li>
-<a href="#%E5%89%8D%E6%8F%90%E5%92%8C%E8%AE%BE%E8%AE%A1%E7%9B%AE%E6%A0%87"> 前提和设计目标 </a>
-<ul class="minitoc">
-<li>
-<a href="#%E7%A1%AC%E4%BB%B6%E9%94%99%E8%AF%AF"> 硬件错误 </a>
-</li>
-<li>
-<a href="#%E6%B5%81%E5%BC%8F%E6%95%B0%E6%8D%AE%E8%AE%BF%E9%97%AE"> 流式数据访问 </a>
-</li>
-<li>
-<a href="#%E5%A4%A7%E8%A7%84%E6%A8%A1%E6%95%B0%E6%8D%AE%E9%9B%86"> 大规模数据集 </a>
-</li>
-<li>
-<a href="#%E7%AE%80%E5%8D%95%E7%9A%84%E4%B8%80%E8%87%B4%E6%80%A7%E6%A8%A1%E5%9E%8B"> 简单的一致性模型 </a>
-</li>
-<li>
-<a href="#%E2%80%9C%E7%A7%BB%E5%8A%A8%E8%AE%A1%E7%AE%97%E6%AF%94%E7%A7%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%9B%B4%E5%88%92%E7%AE%97%E2%80%9D"> &ldquo;移动计算比移动数据更划算&rdquo; </a>
-</li>
-<li>
-<a href="#%E5%BC%82%E6%9E%84%E8%BD%AF%E7%A1%AC%E4%BB%B6%E5%B9%B3%E5%8F%B0%E9%97%B4%E7%9A%84%E5%8F%AF%E7%A7%BB%E6%A4%8D%E6%80%A7"> 异构软硬件平台间的可移植性 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Namenode+%E5%92%8C+Datanode"> Namenode 和 Datanode </a>
-</li>
-<li>
-<a href="#%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F%E7%9A%84%E5%90%8D%E5%AD%97%E7%A9%BA%E9%97%B4+%28namespace%29"> 文件系统的名字空间 (namespace) </a>
-</li>
-<li>
-<a href="#%E6%95%B0%E6%8D%AE%E5%A4%8D%E5%88%B6"> 数据复制 </a>
-<ul class="minitoc">
-<li>
-<a href="#%E5%89%AF%E6%9C%AC%E5%AD%98%E6%94%BE%3A+%E6%9C%80%E6%9C%80%E5%BC%80%E5%A7%8B%E7%9A%84%E4%B8%80%E6%AD%A5"> 副本存放: 最最开始的一步 </a>
-</li>
-<li>
-<a href="#%E5%89%AF%E6%9C%AC%E9%80%89%E6%8B%A9"> 副本选择 </a>
-</li>
-<li>
-<a href="#%E5%AE%89%E5%85%A8%E6%A8%A1%E5%BC%8F"> 安全模式 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F%E5%85%83%E6%95%B0%E6%8D%AE%E7%9A%84%E6%8C%81%E4%B9%85%E5%8C%96"> 文件系统元数据的持久化 </a>
-</li>
-<li>
-<a href="#%E9%80%9A%E8%AE%AF%E5%8D%8F%E8%AE%AE"> 通讯协议 </a>
-</li>
-<li>
-<a href="#%E5%81%A5%E5%A3%AE%E6%80%A7"> 健壮性 </a>
-<ul class="minitoc">
-<li>
-<a href="#%E7%A3%81%E7%9B%98%E6%95%B0%E6%8D%AE%E9%94%99%E8%AF%AF%EF%BC%8C%E5%BF%83%E8%B7%B3%E6%A3%80%E6%B5%8B%E5%92%8C%E9%87%8D%E6%96%B0%E5%A4%8D%E5%88%B6"> 磁盘数据错误,心跳检测和重新复制 </a>
-</li>
-<li>
-<a href="#%E9%9B%86%E7%BE%A4%E5%9D%87%E8%A1%A1"> 集群均衡 </a>
-</li>
-<li>
-<a href="#%E6%95%B0%E6%8D%AE%E5%AE%8C%E6%95%B4%E6%80%A7"> 数据完整性 </a>
-</li>
-<li>
-<a href="#%E5%85%83%E6%95%B0%E6%8D%AE%E7%A3%81%E7%9B%98%E9%94%99%E8%AF%AF"> 元数据磁盘错误 </a>
-</li>
-<li>
-<a href="#%E5%BF%AB%E7%85%A7"> 快照 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E6%95%B0%E6%8D%AE%E7%BB%84%E7%BB%87"> 数据组织 </a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%95%B0%E6%8D%AE%E5%9D%97"> 数据块 </a>
-</li>
-<li>
-<a href="#Staging"> Staging </a>
-</li>
-<li>
-<a href="#%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%A4%8D%E5%88%B6"> 流水线复制 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E5%8F%AF%E8%AE%BF%E9%97%AE%E6%80%A7"> 可访问性 </a>
-<ul class="minitoc">
-<li>
-<a href="#DFSShell"> DFSShell </a>
-</li>
-<li>
-<a href="#DFSAdmin"> DFSAdmin </a>
-</li>
-<li>
-<a href="#%E6%B5%8F%E8%A7%88%E5%99%A8%E6%8E%A5%E5%8F%A3"> 浏览器接口 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E5%AD%98%E5%82%A8%E7%A9%BA%E9%97%B4%E5%9B%9E%E6%94%B6"> 存储空间回收 </a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%96%87%E4%BB%B6%E7%9A%84%E5%88%A0%E9%99%A4%E5%92%8C%E6%81%A2%E5%A4%8D"> 文件的删除和恢复 </a>
-</li>
-<li>
-<a href="#%E5%87%8F%E5%B0%91%E5%89%AF%E6%9C%AC%E7%B3%BB%E6%95%B0"> 减少副本系数 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99"> 参考资料 </a>
-</li>
-</ul>
-</div>
-    
-<a name="N10014"></a><a name="%E5%BC%95%E8%A8%80"></a>
-<h2 class="h3"> 引言 </h2>
-<div class="section">
-<p>
-	      Hadoop分布式文件系统(<acronym title="Hadoop分布式文件系统">HDFS</acronym>)被设计成适合运行在通用硬件(commodity hardware)上的分布式文件系统。它和现有的分布式文件系统有很多共同点。但同时,它和其他的分布式文件系统的区别也是很明显的。HDFS是一个高度容错性的系统,适合部署在廉价的机器上。HDFS能提供高吞吐量的数据访问,非常适合大规模数据集上的应用。HDFS放宽了一部分POSIX约束,来实现流式读取文件系统数据的目的。HDFS在最开始是作为Apache Nutch搜索引擎项目的基础架构而开发的。HDFS是Apache Hadoop Core项目的一部分。这个项目的地址是<a href="http://hadoop.apache.org/core/">http://hadoop.apache.org/core/</a>。
-      </p>
-</div>
-
-    
-<a name="N10026"></a><a name="%E5%89%8D%E6%8F%90%E5%92%8C%E8%AE%BE%E8%AE%A1%E7%9B%AE%E6%A0%87"></a>
-<h2 class="h3"> 前提和设计目标 </h2>
-<div class="section">
-<a name="N1002C"></a><a name="%E7%A1%AC%E4%BB%B6%E9%94%99%E8%AF%AF"></a>
-<h3 class="h4"> 硬件错误 </h3>
-<p>
-	硬件错误是常态而不是异常。HDFS可能由成百上千的服务器所构成,每个服务器上存储着文件系统的部分数据。我们面对的现实是构成系统的组件数目是巨大的,而且任一组件都有可能失效,这意味着总是有一部分HDFS的组件是不工作的。因此错误检测和快速、自动的恢复是HDFS最核心的架构目标。
-       </p>
-<a name="N10036"></a><a name="%E6%B5%81%E5%BC%8F%E6%95%B0%E6%8D%AE%E8%AE%BF%E9%97%AE"></a>
-<h3 class="h4"> 流式数据访问 </h3>
-<p>
-运行在HDFS上的应用和普通的应用不同,需要流式访问它们的数据集。HDFS的设计中更多的考虑到了数据批处理,而不是用户交互处理。比之数据访问的低延迟问题,更关键的在于数据访问的高吞吐量。POSIX标准设置的很多硬性约束对HDFS应用系统不是必需的。为了提高数据的吞吐量,在一些关键方面对POSIX的语义做了一些修改。        
-        </p>
-<a name="N10040"></a><a name="%E5%A4%A7%E8%A7%84%E6%A8%A1%E6%95%B0%E6%8D%AE%E9%9B%86"></a>
-<h3 class="h4"> 大规模数据集 </h3>
-<p>
-        运行在HDFS上的应用具有很大的数据集。HDFS上的一个典型文件大小一般都在G字节至T字节。因此,HDFS被调节以支持大文件存储。它应该能提供整体上高的数据传输带宽,能在一个集群里扩展到数百个节点。一个单一的HDFS实例应该能支撑数以千万计的文件。
-        </p>
-<a name="N1004A"></a><a name="%E7%AE%80%E5%8D%95%E7%9A%84%E4%B8%80%E8%87%B4%E6%80%A7%E6%A8%A1%E5%9E%8B"></a>
-<h3 class="h4"> 简单的一致性模型 </h3>
-<p>
-        HDFS应用需要一个&ldquo;一次写入多次读取&rdquo;的文件访问模型。一个文件经过创建、写入和关闭之后就不需要改变。这一假设简化了数据一致性问题,并且使高吞吐量的数据访问成为可能。Map/Reduce应用或者网络爬虫应用都非常适合这个模型。目前还有计划在将来扩充这个模型,使之支持文件的附加写操作。 
-        </p>
-<a name="N10058"></a><a name="%E2%80%9C%E7%A7%BB%E5%8A%A8%E8%AE%A1%E7%AE%97%E6%AF%94%E7%A7%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%9B%B4%E5%88%92%E7%AE%97%E2%80%9D"></a>
-<h3 class="h4"> &ldquo;移动计算比移动数据更划算&rdquo; </h3>
-<p>
-        一个应用请求的计算,离它操作的数据越近就越高效,在数据达到海量级别的时候更是如此。因为这样就能降低网络阻塞的影响,提高系统数据的吞吐量。将计算移动到数据附近,比之将数据移动到应用所在显然更好。HDFS为应用提供了将它们自己移动到数据附近的接口。 
-        </p>
-<a name="N10062"></a><a name="%E5%BC%82%E6%9E%84%E8%BD%AF%E7%A1%AC%E4%BB%B6%E5%B9%B3%E5%8F%B0%E9%97%B4%E7%9A%84%E5%8F%AF%E7%A7%BB%E6%A4%8D%E6%80%A7"></a>
-<h3 class="h4"> 异构软硬件平台间的可移植性 </h3>
-<p>
-        HDFS在设计的时候就考虑到平台的可移植性。这种特性方便了HDFS作为大规模数据应用平台的推广。
-        </p>
-</div>
-
- 
-    
-<a name="N1006D"></a><a name="Namenode+%E5%92%8C+Datanode"></a>
-<h2 class="h3"> Namenode 和 Datanode </h2>
-<div class="section">
-<p>
-      HDFS采用master/slave架构。一个HDFS集群是由一个Namenode和一定数目的Datanodes组成。Namenode是一个中心服务器,负责管理文件系统的名字空间(namespace)以及客户端对文件的访问。集群中的Datanode一般是一个节点一个,负责管理它所在节点上的存储。HDFS暴露了文件系统的名字空间,用户能够以文件的形式在上面存储数据。从内部看,一个文件其实被分成一个或多个数据块,这些块存储在一组Datanode上。Namenode执行文件系统的名字空间操作,比如打开、关闭、重命名文件或目录。它也负责确定数据块到具体Datanode节点的映射。Datanode负责处理文件系统客户端的读写请求。在Namenode的统一调度下进行数据块的创建、删除和复制。
-      </p>
-<div id="" style="text-align: center;">
-<img id="" class="figure" alt="HDFS 架构" src="images/hdfsarchitecture.gif"></div>
-<p>
-      Namenode和Datanode被设计成可以在普通的商用机器上运行。这些机器一般运行着GNU/Linux操作系统(<acronym title="操作系统">OS</acronym>)。HDFS采用Java语言开发,因此任何支持Java的机器都可以部署Namenode或Datanode。由于采用了可移植性极强的Java语言,使得HDFS可以部署到多种类型的机器上。一个典型的部署场景是一台机器上只运行一个Namenode实例,而集群中的其它机器分别运行一个Datanode实例。这种架构并不排斥在一台机器上运行多个Datanode,只不过这样的情况比较少见。
-      </p>
-<p>
-      集群中单一Namenode的结构大大简化了系统的架构。Namenode是所有HDFS元数据的仲裁者和管理者,这样,用户数据永远不会流过Namenode。
-      </p>
-</div> 
-
-    
-<a name="N10089"></a><a name="%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F%E7%9A%84%E5%90%8D%E5%AD%97%E7%A9%BA%E9%97%B4+%28namespace%29"></a>
-<h2 class="h3"> 文件系统的名字空间 (namespace) </h2>
-<div class="section">
-<p>
-      HDFS支持传统的层次型文件组织结构。用户或者应用程序可以创建目录,然后将文件保存在这些目录里。文件系统名字空间的层次结构和大多数现有的文件系统类似:用户可以创建、删除、移动或重命名文件。当前,HDFS不支持用户磁盘配额和访问权限控制,也不支持硬链接和软链接。但是HDFS架构并不妨碍实现这些特性。
-      </p>
-<p>
-      Namenode负责维护文件系统的名字空间,任何对文件系统名字空间或属性的修改都将被Namenode记录下来。应用程序可以设置HDFS保存的文件的副本数目。文件副本的数目称为文件的副本系数,这个信息也是由Namenode保存的。
-      </p>
-</div>
-
-    
-<a name="N10096"></a><a name="%E6%95%B0%E6%8D%AE%E5%A4%8D%E5%88%B6"></a>
-<h2 class="h3"> 数据复制 </h2>
-<div class="section">
-<p>
-      HDFS被设计成能够在一个大集群中跨机器可靠地存储超大文件。它将每个文件存储成一系列的数据块,除了最后一个,所有的数据块都是同样大小的。为了容错,文件的所有数据块都会有副本。每个文件的数据块大小和副本系数都是可配置的。应用程序可以指定某个文件的副本数目。副本系数可以在文件创建的时候指定,也可以在之后改变。HDFS中的文件都是一次性写入的,并且严格要求在任何时候只能有一个写入者。 
-      </p>
-<p>
-      Namenode全权管理数据块的复制,它周期性地从集群中的每个Datanode接收心跳信号和块状态报告(Blockreport)。接收到心跳信号意味着该Datanode节点工作正常。块状态报告包含了一个该Datanode上所有数据块的列表。
-    </p>
-<div id="" style="text-align: center;">
-<img id="" class="figure" alt="HDFS Datanodes" src="images/hdfsdatanodes.gif"></div>
-<a name="N100A6"></a><a name="%E5%89%AF%E6%9C%AC%E5%AD%98%E6%94%BE%3A+%E6%9C%80%E6%9C%80%E5%BC%80%E5%A7%8B%E7%9A%84%E4%B8%80%E6%AD%A5"></a>
-<h3 class="h4"> 副本存放: 最最开始的一步 </h3>
-<p>
-        副本的存放是HDFS可靠性和性能的关键。优化的副本存放策略是HDFS区分于其他大部分分布式文件系统的重要特性。这种特性需要做大量的调优,并需要经验的积累。HDFS采用一种称为机架感知(rack-aware)的策略来改进数据的可靠性、可用性和网络带宽的利用率。目前实现的副本存放策略只是在这个方向上的第一步。实现这个策略的短期目标是验证它在生产环境下的有效性,观察它的行为,为实现更先进的策略打下测试和研究的基础。 
-        </p>
-<p>
-	大型HDFS实例一般运行在跨越多个机架的计算机组成的集群上,不同机架上的两台机器之间的通讯需要经过交换机。在大多数情况下,同一个机架内的两台机器间的带宽会比不同机架的两台机器间的带宽大。        
-        </p>
-<p>
-        通过一个<a href="cluster_setup.html#Hadoop%E7%9A%84%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5">机架感知</a>的过程,Namenode可以确定每个Datanode所属的机架id。一个简单但没有优化的策略就是将副本存放在不同的机架上。这样可以有效防止当整个机架失效时数据的丢失,并且允许读数据的时候充分利用多个机架的带宽。这种策略设置可以将副本均匀分布在集群中,有利于当组件失效情况下的负载均衡。但是,因为这种策略的一个写操作需要传输数据块到多个机架,这增加了写的代价。 
-        </p>
-<p>
-        在大多数情况下,副本系数是3,HDFS的存放策略是将一个副本存放在本地机架的节点上,一个副本放在同一机架的另一个节点上,最后一个副本放在不同机架的节点上。这种策略减少了机架间的数据传输,这就提高了写操作的效率。机架的错误远远比节点的错误少,所以这个策略不会影响到数据的可靠性和可用性。于此同时,因为数据块只放在两个(不是三个)不同的机架上,所以此策略减少了读取数据时需要的网络传输总带宽。在这种策略下,副本并不是均匀分布在不同的机架上。三分之一的副本在一个节点上,三分之二的副本在一个机架上,其他副本均匀分布在剩下的机架中,这一策略在不损害数据可靠性和读取性能的情况下改进了写的性能。
-        </p>
-<p>
-        当前,这里介绍的默认副本存放策略正在开发的过程中。
-        </p>
-<a name="N100C0"></a><a name="%E5%89%AF%E6%9C%AC%E9%80%89%E6%8B%A9"></a>
-<h3 class="h4"> 副本选择 </h3>
-<p>
-        为了降低整体的带宽消耗和读取延时,HDFS会尽量让读取程序读取离它最近的副本。如果在读取程序的同一个机架上有一个副本,那么就读取该副本。如果一个HDFS集群跨越多个数据中心,那么客户端也将首先读本地数据中心的副本。
-        </p>
-<a name="N100CA"></a><a name="%E5%AE%89%E5%85%A8%E6%A8%A1%E5%BC%8F"></a>
-<h3 class="h4"> 安全模式 </h3>
-<p>
-	Namenode启动后会进入一个称为安全模式的特殊状态。处于安全模式的Namenode是不会进行数据块的复制的。Namenode从所有的 Datanode接收心跳信号和块状态报告。块状态报告包括了某个Datanode所有的数据块列表。每个数据块都有一个指定的最小副本数。当Namenode检测确认某个数据块的副本数目达到这个最小值,那么该数据块就会被认为是副本安全(safely replicated)的;在一定百分比(这个参数可配置)的数据块被Namenode检测确认是安全之后(加上一个额外的30秒等待时间),Namenode将退出安全模式状态。接下来它会确定还有哪些数据块的副本没有达到指定数目,并将这些数据块复制到其他Datanode上。
-        </p>
-</div>
-
-    
-<a name="N100D5"></a><a name="%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F%E5%85%83%E6%95%B0%E6%8D%AE%E7%9A%84%E6%8C%81%E4%B9%85%E5%8C%96"></a>
-<h2 class="h3"> 文件系统元数据的持久化 </h2>
-<div class="section">
-<p>
-	Namenode上保存着HDFS的名字空间。对于任何对文件系统元数据产生修改的操作,Namenode都会使用一种称为EditLog的事务日志记录下来。例如,在HDFS中创建一个文件,Namenode就会在Editlog中插入一条记录来表示;同样地,修改文件的副本系数也将往Editlog插入一条记录。Namenode在本地操作系统的文件系统中存储这个Editlog。整个文件系统的名字空间,包括数据块到文件的映射、文件的属性等,都存储在一个称为FsImage的文件中,这个文件也是放在Namenode所在的本地文件系统上。
-        </p>
-<p>
-        Namenode在内存中保存着整个文件系统的名字空间和文件数据块映射(Blockmap)的映像。这个关键的元数据结构设计得很紧凑,因而一个有4G内存的Namenode足够支撑大量的文件和目录。当Namenode启动时,它从硬盘中读取Editlog和FsImage,将所有Editlog中的事务作用在内存中的FsImage上,并将这个新版本的FsImage从内存中保存到本地磁盘上,然后删除旧的Editlog,因为这个旧的Editlog的事务都已经作用在FsImage上了。这个过程称为一个检查点(checkpoint)。在当前实现中,检查点只发生在Namenode启动时,在不久的将来将实现支持周期性的检查点。
-        </p>
-<p>
-	Datanode将HDFS数据以文件的形式存储在本地的文件系统中,它并不知道有关HDFS文件的信息。它把每个HDFS数据块存储在本地文件系统的一个单独的文件中。Datanode并不在同一个目录创建所有的文件,实际上,它用试探的方法来确定每个目录的最佳文件数目,并且在适当的时候创建子目录。在同一个目录中创建所有的本地文件并不是最优的选择,这是因为本地文件系统可能无法高效地在单个目录中支持大量的文件。当一个Datanode启动时,它会扫描本地文件系统,产生一个这些本地文件对应的所有HDFS数据块的列表,然后作为报告发送到Namenode,这个报告就是块状态报告。         
-        </p>
-</div>
-
-    
-<a name="N100E5"></a><a name="%E9%80%9A%E8%AE%AF%E5%8D%8F%E8%AE%AE"></a>
-<h2 class="h3"> 通讯协议 </h2>
-<div class="section">
-<p>
-      所有的HDFS通讯协议都是建立在TCP/IP协议之上。客户端通过一个可配置的<acronym title="Transmission Control Protocol">TCP</acronym>端口连接到Namenode,通过ClientProtocol协议与Namenode交互。而Datanode使用DatanodeProtocol协议与Namenode交互。一个远程过程调用(<acronym title="Remote Procedure Call">RPC</acronym>)模型被抽象出来封装ClientProtocol和Datanodeprotocol协议。在设计上,Namenode不会主动发起RPC,而是响应来自客户端或 Datanode 的RPC请求。 
-      </p>
-</div> 
-
-    
-<a name="N100F7"></a><a name="%E5%81%A5%E5%A3%AE%E6%80%A7"></a>
-<h2 class="h3"> 健壮性 </h2>
-<div class="section">
-<p>
-	      HDFS的主要目标就是即使在出错的情况下也要保证数据存储的可靠性。常见的三种出错情况是:Namenode出错, Datanode出错和网络割裂(network partitions)。
-      </p>
-<a name="N10100"></a><a name="%E7%A3%81%E7%9B%98%E6%95%B0%E6%8D%AE%E9%94%99%E8%AF%AF%EF%BC%8C%E5%BF%83%E8%B7%B3%E6%A3%80%E6%B5%8B%E5%92%8C%E9%87%8D%E6%96%B0%E5%A4%8D%E5%88%B6"></a>
-<h3 class="h4"> 磁盘数据错误,心跳检测和重新复制 </h3>
-<p>
-        每个Datanode节点周期性地向Namenode发送心跳信号。网络割裂可能导致一部分Datanode跟Namenode失去联系。Namenode通过心跳信号的缺失来检测这一情况,并将这些近期不再发送心跳信号Datanode标记为宕机,不会再将新的<acronym title="Input/Output">IO</acronym>请求发给它们。任何存储在宕机Datanode上的数据将不再有效。Datanode的宕机可能会引起一些数据块的副本系数低于指定值,Namenode不断地检测这些需要复制的数据块,一旦发现就启动复制操作。在下列情况下,可能需要重新复制:某个Datanode节点失效,某个副本遭到损坏,Datanode上的硬盘错误,或者文件的副本系数增大。
-        </p>
-<a name="N1010E"></a><a name="%E9%9B%86%E7%BE%A4%E5%9D%87%E8%A1%A1"></a>
-<h3 class="h4"> 集群均衡 </h3>
-<p>
-        HDFS的架构支持数据均衡策略。如果某个Datanode节点上的空闲空间低于特定的临界点,按照均衡策略系统就会自动地将数据从这个Datanode移动到其他空闲的Datanode。当对某个文件的请求突然增加,那么也可能启动一个计划创建该文件新的副本,并且同时重新平衡集群中的其他数据。这些均衡策略目前还没有实现。
-        </p>
-<a name="N10118"></a><a name="%E6%95%B0%E6%8D%AE%E5%AE%8C%E6%95%B4%E6%80%A7"></a>
-<h3 class="h4"> 数据完整性 </h3>
-<p>
-        <!-- XXX "checksum checking" sounds funny -->
-        从某个Datanode获取的数据块有可能是损坏的,损坏可能是由Datanode的存储设备错误、网络错误或者软件bug造成的。HDFS客户端软件实现了对HDFS文件内容的校验和(checksum)检查。当客户端创建一个新的HDFS文件,会计算这个文件每个数据块的校验和,并将校验和作为一个单独的隐藏文件保存在同一个HDFS名字空间下。当客户端获取文件内容后,它会检验从Datanode获取的数据跟相应的校验和文件中的校验和是否匹配,如果不匹配,客户端可以选择从其他Datanode获取该数据块的副本。
-        </p>
-<a name="N10124"></a><a name="%E5%85%83%E6%95%B0%E6%8D%AE%E7%A3%81%E7%9B%98%E9%94%99%E8%AF%AF"></a>
-<h3 class="h4"> 元数据磁盘错误 </h3>
-<p>
-        FsImage和Editlog是HDFS的核心数据结构。如果这些文件损坏了,整个HDFS实例都将失效。因而,Namenode可以配置成支持维护多个FsImage和Editlog的副本。任何对FsImage或者Editlog的修改,都将同步到它们的副本上。这种多副本的同步操作可能会降低Namenode每秒处理的名字空间事务数量。然而这个代价是可以接受的,因为即使HDFS的应用是数据密集的,它们也非元数据密集的。当Namenode重启的时候,它会选取最近的完整的FsImage和Editlog来使用。
-        </p>
-<p> 
-        Namenode是HDFS集群中的单点故障(single point of failure)所在。如果Namenode机器故障,是需要手工干预的。目前,自动重启或在另一台机器上做Namenode故障转移的功能还没实现。
-        </p>
-<a name="N10131"></a><a name="%E5%BF%AB%E7%85%A7"></a>
-<h3 class="h4"> 快照 </h3>
-<p>
-        快照支持某一特定时刻的数据的复制备份。利用快照,可以让HDFS在数据损坏时恢复到过去一个已知正确的时间点。HDFS目前还不支持快照功能,但计划在将来的版本进行支持。
-        </p>
-</div>
-    
-<a name="N1013C"></a><a name="%E6%95%B0%E6%8D%AE%E7%BB%84%E7%BB%87"></a>
-<h2 class="h3"> 数据组织 </h2>
-<div class="section">
-<a name="N10144"></a><a name="%E6%95%B0%E6%8D%AE%E5%9D%97"></a>
-<h3 class="h4"> 数据块 </h3>
-<p>
-        HDFS被设计成支持大文件,适用HDFS的是那些需要处理大规模的数据集的应用。这些应用都是只写入数据一次,但却读取一次或多次,并且读取速度应能满足流式读取的需要。HDFS支持文件的&ldquo;一次写入多次读取&rdquo;语义。一个典型的数据块大小是64MB。因而,HDFS中的文件总是按照64M被切分成不同的块,每个块尽可能地存储于不同的Datanode中。
-        </p>
-<a name="N1014E"></a><a name="Staging"></a>
-<h3 class="h4"> Staging </h3>
-<p>
-        客户端创建文件的请求其实并没有立即发送给Namenode,事实上,在刚开始阶段HDFS客户端会先将文件数据缓存到本地的一个临时文件。应用程序的写操作被透明地重定向到这个临时文件。当这个临时文件累积的数据量超过一个数据块的大小,客户端才会联系Namenode。Namenode将文件名插入文件系统的层次结构中,并且分配一个数据块给它。然后返回Datanode的标识符和目标数据块给客户端。接着客户端将这块数据从本地临时文件上传到指定的Datanode上。当文件关闭时,在临时文件中剩余的没有上传的数据也会传输到指定的Datanode上。然后客户端告诉Namenode文件已经关闭。此时Namenode才将文件创建操作提交到日志里进行存储。如果Namenode在文件关闭前宕机了,则该文件将丢失。
-        </p>
-<p>
-        上述方法是对在HDFS上运行的目标应用进行认真考虑后得到的结果。这些应用需要进行文件的流式写入。如果不采用客户端缓存,由于网络速度和网络堵塞会对吞估量造成比较大的影响。这种方法并不是没有先例的,早期的文件系统,比如<acronym title="Andrew File System">AFS</acronym>,就用客户端缓存来提高性能。为了达到更高的数据上传效率,已经放松了POSIX标准的要求。
-        </p>
-<a name="N10161"></a><a name="%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%A4%8D%E5%88%B6"></a>
-<h3 class="h4"> 流水线复制 </h3>
-<p>
-        当客户端向HDFS文件写入数据的时候,一开始是写到本地临时文件中。假设该文件的副本系数设置为3,当本地临时文件累积到一个数据块的大小时,客户端会从Namenode获取一个Datanode列表用于存放副本。然后客户端开始向第一个Datanode传输数据,第一个Datanode一小部分一小部分(4 KB)地接收数据,将每一部分写入本地仓库,并同时传输该部分到列表中第二个Datanode节点。第二个Datanode也是这样,一小部分一小部分地接收数据,写入本地仓库,并同时传给第三个Datanode。最后,第三个Datanode接收数据并存储在本地。因此,Datanode能流水线式地从前一个节点接收数据,并在同时转发给下一个节点,数据以流水线的方式从前一个Datanode复制到下一个。
-        </p>
-</div>
-
-    
-<a name="N1016C"></a><a name="%E5%8F%AF%E8%AE%BF%E9%97%AE%E6%80%A7"></a>
-<h2 class="h3"> 可访问性 </h2>
-<div class="section">
-<p>
-      HDFS给应用提供了多种访问方式。用户可以通过<a href="http://hadoop.apache.org/core/docs/current/api/">Java API</a>接口访问,也可以通过C语言的封装API访问,还可以通过浏览器的方式访问HDFS中的文件。通过<acronym title="Web-based Distributed Authoring and Versioning">WebDAV</acronym>协议访问的方式正在开发中。
-      </p>
-<a name="N10181"></a><a name="DFSShell"></a>
-<h3 class="h4"> DFSShell </h3>
-<p>
-        HDFS以文件和目录的形式组织用户数据。它提供了一个命令行的接口(DFSShell)让用户与HDFS中的数据进行交互。命令的语法和用户熟悉的其他shell(例如 bash, csh)工具类似。下面是一些动作/命令的示例:
-        </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-            
-<th colspan="1" rowspan="1"> 动作 </th><th colspan="1" rowspan="1"> 命令 </th>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 创建一个名为 <span class="codefrag">/foodir</span> 的目录 </td> <td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfs -mkdir /foodir</span> </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 创建一个名为 <span class="codefrag">/foodir</span> 的目录 </td> <td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfs -mkdir /foodir</span> </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 查看名为 <span class="codefrag">/foodir/myfile.txt</span> 的文件内容 </td> <td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfs -cat /foodir/myfile.txt</span> </td>
-          
-</tr>
-        
-</table>
-<p>
-        DFSShell 可以用在那些通过脚本语言和文件系统进行交互的应用程序上。
-        </p>
-<a name="N101D6"></a><a name="DFSAdmin"></a>
-<h3 class="h4"> DFSAdmin </h3>
-<p>
-		DFSAdmin 命令用来管理HDFS集群。这些命令只有HDSF的管理员才能使用。下面是一些动作/命令的示例:
-        </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-            
-<th colspan="1" rowspan="1"> 动作 </th><th colspan="1" rowspan="1"> 命令 </th>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 将集群置于安全模式 </td> <td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfsadmin -safemode enter</span> </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 显示Datanode列表 </td> <td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfsadmin -report</span> </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1"> 使Datanode节点 <span class="codefrag">datanodename</span>退役</td><td colspan="1" rowspan="1"> <span class="codefrag">bin/hadoop dfsadmin -decommission datanodename</span> </td>
-          
-</tr>
-        
-</table>
-<a name="N10221"></a><a name="%E6%B5%8F%E8%A7%88%E5%99%A8%E6%8E%A5%E5%8F%A3"></a>
-<h3 class="h4"> 浏览器接口 </h3>
-<p>
-	一个典型的HDFS安装会在一个可配置的TCP端口开启一个Web服务器用于暴露HDFS的名字空间。用户可以用浏览器来浏览HDFS的名字空间和查看文件的内容。
-       </p>
-</div> 
-
-    
-<a name="N1022C"></a><a name="%E5%AD%98%E5%82%A8%E7%A9%BA%E9%97%B4%E5%9B%9E%E6%94%B6"></a>
-<h2 class="h3"> 存储空间回收 </h2>
-<div class="section">
-<a name="N10232"></a><a name="%E6%96%87%E4%BB%B6%E7%9A%84%E5%88%A0%E9%99%A4%E5%92%8C%E6%81%A2%E5%A4%8D"></a>
-<h3 class="h4"> 文件的删除和恢复 </h3>
-<p>
-       当用户或应用程序删除某个文件时,这个文件并没有立刻从HDFS中删除。实际上,HDFS会将这个文件重命名转移到<span class="codefrag">/trash</span>目录。只要文件还在<span class="codefrag">/trash</span>目录中,该文件就可以被迅速地恢复。文件在<span class="codefrag">/trash</span>中保存的时间是可配置的,当超过这个时间时,Namenode就会将该文件从名字空间中删除。删除文件会使得该文件相关的数据块被释放。注意,从用户删除文件到HDFS空闲空间的增加之间会有一定时间的延迟。</p>
-<p>
-只要被删除的文件还在<span class="codefrag">/trash</span>目录中,用户就可以恢复这个文件。如果用户想恢复被删除的文件,他/她可以浏览<span class="codefrag">/trash</span>目录找回该文件。<span class="codefrag">/trash</span>目录仅仅保存被删除文件的最后副本。<span class="codefrag">/trash</span>目录与其他的目录没有什么区别,除了一点:在该目录上HDFS会应用一个特殊策略来自动删除文件。目前的默认策略是删除<span class="codefrag">/trash</span>中保留时间超过6小时的文件。将来,这个策略可以通过一个被良好定义的接口配置。
-        </p>
-<a name="N10257"></a><a name="%E5%87%8F%E5%B0%91%E5%89%AF%E6%9C%AC%E7%B3%BB%E6%95%B0"></a>
-<h3 class="h4"> 减少副本系数 </h3>
-<p>
-        当一个文件的副本系数被减小后,Namenode会选择过剩的副本删除。下次心跳检测时会将该信息传递给Datanode。Datanode遂即移除相应的数据块,集群中的空闲空间加大。同样,在调用<span class="codefrag">setReplication</span> API结束和集群中空闲空间增加间会有一定的延迟。</p>
-</div>
-
-
-    
-<a name="N10265"></a><a name="%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99"></a>
-<h2 class="h3"> 参考资料 </h2>
-<div class="section">
-<p>
-      HDFS Java API: 
-      <a href="http://hadoop.apache.org/core/docs/current/api/"> 
-        http://hadoop.apache.org/core/docs/current/api/
-      </a>
-      
-</p>
-<p>
-      HDFS 源代码: 
-      <a href="http://hadoop.apache.org/core/version_control.html"> 
-        http://hadoop.apache.org/core/version_control.html
-      </a>
-      
-</p>
-</div> 
-
-  
-<p align="right">
-<font size="-2">by&nbsp;Dhruba Borthakur</font>
-</p>
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 424
docs/cn/hdfs_design.pdf


+ 0 - 504
docs/cn/hdfs_permissions_guide.html

@@ -1,504 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>
-      HDFS权限管理用户指南
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">HDFS权限指南</div>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hdfs_permissions_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>
-      HDFS权限管理用户指南
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E6%A6%82%E8%BF%B0">概述</a>
-</li>
-<li>
-<a href="#%E7%94%A8%E6%88%B7%E8%BA%AB%E4%BB%BD">用户身份</a>
-</li>
-<li>
-<a href="#%E7%90%86%E8%A7%A3%E7%B3%BB%E7%BB%9F%E7%9A%84%E5%AE%9E%E7%8E%B0">理解系统的实现</a>
-</li>
-<li>
-<a href="#%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9FAPI%E5%8F%98%E6%9B%B4">文件系统API变更</a>
-</li>
-<li>
-<a href="#Shell%E5%91%BD%E4%BB%A4%E5%8F%98%E6%9B%B4">Shell命令变更</a>
-</li>
-<li>
-<a href="#%E8%B6%85%E7%BA%A7%E7%94%A8%E6%88%B7">超级用户</a>
-</li>
-<li>
-<a href="#Web%E6%9C%8D%E5%8A%A1%E5%99%A8">Web服务器</a>
-</li>
-<li>
-<a href="#%E5%9C%A8%E7%BA%BF%E5%8D%87%E7%BA%A7">在线升级</a>
-</li>
-<li>
-<a href="#%E9%85%8D%E7%BD%AE%E5%8F%82%E6%95%B0">配置参数</a>
-</li>
-</ul>
-</div>
-    
-<a name="N1000D"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3">概述</h2>
-<div class="section">
-<p>
-		Hadoop分布式文件系统实现了一个和POSIX系统类似的文件和目录的权限模型。每个文件和目录有一个<em>所有者(owner)</em>和一个<em>组(group)</em>。文件或目录对其所有者、同组的其他用户以及所有其他用户分别有着不同的权限。对文件而言,当读取这个文件时需要有<em>r</em>权限,当写入或者追加到文件时需要有<em>w</em>权限。对目录而言,当列出目录内容时需要具有<em>r</em>权限,当新建或删除子文件或子目录时需要有<em>w</em>权限,当访问目录的子节点时需要有<em>x</em>权限。不同于POSIX模型,HDFS权限模型中的文件没有<em>sticky</em>,<em>setuid</em>或<em>setgid</em>位,因为这里没有可执行文件的概念。为了简单起见,这里也没有目录的<em>sticky</em>,<em>setuid</em>或<em>setgid</em>位。总的来说,文件或目录的权限就是它的<em>模式(mode)</em>。HDFS采用了Unix表示和显示模式的习惯,包括使用八进制数来表示权限。当新建一个文件或目录,它的所有者即客户进程的用户,它的所属组是父目录的组(BSD的规定)。
-	</p>
-<p>
-		每个访问HDFS的用户进程的标识分为两个部分,分别是<em>用户名</em>和<em>组名列表</em>。每次用户进程访问一个文件或目录<span class="codefrag">foo</span>,HDFS都要对其进行权限检查,
-	</p>
-<ul>
-		
-<li>
-		   如果用户即<span class="codefrag">foo</span>的所有者,则检查所有者的访问权限;
-		</li>
-		
-<li>
-		   如果<span class="codefrag">foo</span>关联的组在组名列表中出现,则检查组用户的访问权限;
-		</li>
-		
-<li>
-		   否则检查<span class="codefrag">foo</span>其他用户的访问权限。
-		</li>
-	
-</ul>
-<p>
-		如果权限检查失败,则客户的操作会失败。
-</p>
-</div>
-
-
-<a name="N10065"></a><a name="%E7%94%A8%E6%88%B7%E8%BA%AB%E4%BB%BD"></a>
-<h2 class="h3">用户身份</h2>
-<div class="section">
-<p>
-在这个版本的Hadoop中,客户端用户身份是通过宿主操作系统给出。对类Unix系统来说,
-</p>
-<ul>
-
-<li>
-   用户名等于<span class="codefrag">`whoami`</span>;
-</li>
-
-<li>
-   组列表等于<span class="codefrag">`bash -c groups`</span>。
-</li>
-
-</ul>
-<p>
-将来会增加其他的方式来确定用户身份(比如Kerberos、LDAP等)。期待用上文中提到的第一种方式来防止一个用户假冒另一个用户是不现实的。这种用户身份识别机制结合权限模型允许一个协作团体以一种有组织的形式共享文件系统中的资源。
-</p>
-<p>
-不管怎样,用户身份机制对HDFS本身来说只是外部特性。HDFS并不提供创建用户身份、创建组或处理用户凭证等功能。
-</p>
-</div>
-
-
-<a name="N10083"></a><a name="%E7%90%86%E8%A7%A3%E7%B3%BB%E7%BB%9F%E7%9A%84%E5%AE%9E%E7%8E%B0"></a>
-<h2 class="h3">理解系统的实现</h2>
-<div class="section">
-<p>
-	每次文件或目录操作都传递完整的路径名给name node,每一个操作都会对此路径做权限检查。客户框架会隐式地将用户身份和与name node的连接关联起来,从而减少改变现有客户端API的需求。经常会有这种情况,当对一个文件的某一操作成功后,之后同样的操作却会失败,这是因为文件或路径上的某些目录已经不复存在了。比如,客户端首先开始读一个文件,它向name node发出一个请求以获取文件第一个数据块的位置。但接下去的获取其他数据块的第二个请求可能会失败。另一方面,删除一个文件并不会撤销客户端已经获得的对文件数据块的访问权限。而权限管理能使得客户端对一个文件的访问许可在两次请求之间被收回。重复一下,权限的改变并不会撤销当前客户端对文件数据块的访问许可。
-</p>
-<p>
-map-reduce框架通过传递字符串来指派用户身份,没有做其他特别的安全方面的考虑。文件或目录的所有者和组属性是以字符串的形式保存,而不是像传统的Unix方式转换为用户和组的数字ID。
-</p>
-<p>
-这个发行版本的权限管理特性并不需要改变data node的任何行为。Data node上的数据块上并没有任何<em>Hadoop</em>所有者或权限等关联属性。
-</p>
-</div>
-     
-
-<a name="N10096"></a><a name="%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9FAPI%E5%8F%98%E6%9B%B4"></a>
-<h2 class="h3">文件系统API变更</h2>
-<div class="section">
-<p>
-	如果权限检查失败,所有使用一个路径参数的方法都可能抛出<span class="codefrag">AccessControlException</span>异常。
-</p>
-<p>新增方法:</p>
-<ul>
-	
-<li>
-		
-<span class="codefrag">public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException;</span>
-	
-</li>
-	
-<li>
-		
-<span class="codefrag">public boolean mkdirs(Path f, FsPermission permission) throws IOException;</span>
-	
-</li>
-	
-<li>
-		
-<span class="codefrag">public void setPermission(Path p, FsPermission permission) throws IOException;</span>
-	
-</li>
-	
-<li>
-		
-<span class="codefrag">public void setOwner(Path p, String username, String groupname) throws IOException;</span>
-	
-</li>
-	
-<li>
-		
-<span class="codefrag">public FileStatus getFileStatus(Path f) throws IOException;</span> 也会返回路径关联的所有者、组和模式属性。
-	</li>
-
-
-</ul>
-<p>
-新建文件或目录的模式受配置参数<span class="codefrag">umask</span>的约束。当使用之前的 <span class="codefrag">create(path, &hellip;)</span> 方法(<em>没有指定</em>权限参数)时,新文件的模式是<span class="codefrag">666&thinsp;&amp;&thinsp;^umask</span>。当使用新的 <span class="codefrag">create(path, </span><em>permission</em><span class="codefrag">, &hellip;)</span> 方法(<em>指定了</em>权限参数<em>P</em>)时,新文件的模式是<span class="codefrag">P&thinsp;&amp;&thinsp;^umask&thinsp;&amp;&thinsp;666</span>。当使用先前的 <span class="codefrag">mkdirs(path)</span> 方法(<em>没有指定</em> 权限参数)新建一个目录时,新目录的模式是<span class="codefrag">777&thinsp;&amp;&thinsp;^umask</span>。当使用新的 <span class="codefrag">mkdirs(path, </span><em>permission</em> <span class="codefrag">)</span> 方法(<em>指定了</em>权限参数<em>P</em>)新建一个目录时,新目录的模式是<span class="codefrag">P&thinsp;&amp;&thinsp;^umask&thinsp;&amp;&thinsp;777</span>。
-</p>
-</div>
-
-     
-
-<a name="N10100"></a><a name="Shell%E5%91%BD%E4%BB%A4%E5%8F%98%E6%9B%B4"></a>
-<h2 class="h3">Shell命令变更</h2>
-<div class="section">
-<p>新增操作:</p>
-<dl>
-	
-<dt>
-<span class="codefrag">chmod [-R]</span> <em>mode file &hellip;</em>
-</dt>
-	
-<dd>
-		只有文件的所有者或者超级用户才有权限改变文件模式。
-	</dd>
-	
-<dt>
-<span class="codefrag">chgrp [-R]</span> <em>group file &hellip;</em>
-</dt>
-	
-<dd>
-		使用<span class="codefrag">chgrp</span>命令的用户必须属于特定的组且是文件的所有者,或者用户是超级用户。
-	</dd>
-	
-<dt>
-<span class="codefrag">chown [-R]</span> <em>[owner][:[group]] file &hellip;</em>
-</dt>
-	
-<dd>
-		文件的所有者的只能被超级用户更改。
-	</dd>
-	
-<dt>
-<span class="codefrag">ls </span> <em>file &hellip;</em>
-</dt>
-<dd></dd>
-	
-<dt>
-<span class="codefrag">lsr </span> <em>file &hellip;</em>
-</dt>
-	
-<dd>
-		输出格式做了调整以显示所有者、组和模式。
-	</dd>
-
-</dl>
-</div>
-
-     
-
-<a name="N1013F"></a><a name="%E8%B6%85%E7%BA%A7%E7%94%A8%E6%88%B7"></a>
-<h2 class="h3">超级用户</h2>
-<div class="section">
-<p>
-超级用户即运行name node进程的用户。宽泛的讲,如果你启动了name node,你就是超级用户。超级用户干任何事情,因为超级用户能够通过所有的权限检查。没有永久记号保留谁<em>过去</em>是超级用户;当name node开始运行时,进程自动判断谁<em>现在</em>是超级用户。HDFS的超级用户不一定非得是name node主机上的超级用户,也不需要所有的集群的超级用户都是一个。同样的,在个人工作站上运行HDFS的实验者,不需任何配置就已方便的成为了他的部署实例的超级用户。
-	</p>
-<p>
-	另外,管理员可以用配置参数指定一组特定的用户,如果做了设定,这个组的成员也会是超级用户。
-</p>
-</div>
-
-
-<a name="N10152"></a><a name="Web%E6%9C%8D%E5%8A%A1%E5%99%A8"></a>
-<h2 class="h3">Web服务器</h2>
-<div class="section">
-<p>
-Web服务器的身份是一个可配置参数。Name node并没有<em>真实</em>用户的概念,但是Web服务器表现地就像它具有管理员选定的用户的身份(用户名和组)一样。除非这个选定的身份是超级用户,否则会有名字空间中的一部分对Web服务器来说不可见。
-</p>
-</div>
-
-
-<a name="N1015F"></a><a name="%E5%9C%A8%E7%BA%BF%E5%8D%87%E7%BA%A7"></a>
-<h2 class="h3">在线升级</h2>
-<div class="section">
-<p>
-如果集群在0.15版本的数据集(<span class="codefrag">fsimage</span>)上启动,所有的文件和目录都有所有者<em>O</em>,组<em>G</em>,和模式<em>M</em>,这里 <em>O</em> 和 <em>G</em> 分别是超级用户的用户标识和组名,<em>M</em>是一个配置参数。</p>
-</div>
-
-
-<a name="N1017E"></a><a name="%E9%85%8D%E7%BD%AE%E5%8F%82%E6%95%B0"></a>
-<h2 class="h3">配置参数</h2>
-<div class="section">
-<dl>
-	
-<dt>
-<span class="codefrag">dfs.permissions = true </span>
-</dt>
-	
-<dd>
-		如果是 <span class="codefrag">true</span>,则打开前文所述的权限系统。如果是 <span class="codefrag">false</span>,权限<em>检查</em> 就是关闭的,但是其他的行为没有改变。这个配置参数的改变并不改变文件或目录的模式、所有者和组等信息。
-		<p>
-		
-</p>
-		不管权限模式是开还是关,<span class="codefrag">chmod</span>,<span class="codefrag">chgrp</span> 和 <span class="codefrag">chown</span> <em>总是</em> 会检查权限。这些命令只有在权限检查背景下才有用,所以不会有兼容性问题。这样,这就能让管理员在打开常规的权限检查之前可以可靠地设置文件的所有者和权限。
-	</dd>
-	
-<dt>
-<span class="codefrag">dfs.web.ugi = webuser,webgroup</span>
-</dt>
-	
-<dd>
-	Web服务器使用的用户名。如果将这个参数设置为超级用户的名称,则所有Web客户就可以看到所有的信息。如果将这个参数设置为一个不使用的用户,则Web客户就只能访问到&ldquo;other&rdquo;权限可访问的资源了。额外的组可以加在后面,形成一个用逗号分隔的列表。
-	</dd>
-	
-<dt>
-<span class="codefrag">dfs.permissions.supergroup = supergroup</span>
-</dt>
-	
-<dd>
-	超级用户的组名。
-	</dd>
-	
-<dt>
-<span class="codefrag">dfs.upgrade.permission = 777</span>
-</dt>
-	
-<dd>
-	升级时的初始模式。文件<em>永不会</em>被设置<em>x</em>权限。在配置文件中,可以使用十进制数<em>511<sub>10</sub></em>。
-	</dd>
-	
-<dt>
-<span class="codefrag">dfs.umask = 022</span>
-</dt>
-	
-<dd>
-		
-<span class="codefrag">umask</span>参数在创建文件和目录时使用。在配置文件中,可以使用十进制数<em>18<sub>10</sub></em>。
-	</dd>
-
-</dl>
-</div>
-
-     
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 129
docs/cn/hdfs_permissions_guide.pdf


+ 0 - 277
docs/cn/hdfs_quota_admin_guide.html

@@ -1,277 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>
-      名字空间配额管理指南
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">HDFS配额管理指南</div>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hdfs_quota_admin_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>
-      名字空间配额管理指南
-    </h1>
-      
-<p>
-      Hadoop分布式文件系统(HDFS)允许管理员为每个目录设置配额。
-      新建立的目录没有配额。
-      最大的配额是<span class="codefrag">Long.Max_Value</span>。配额为1可以强制目录保持为空。
-      </p>
-
-      
-<p>
-      目录配额是对目录树上该目录下的名字数量做硬性限制。如果创建文件或目录时超过了配额,该操作会失败。重命名不会改变该目录的配额;如果重命名操作会导致违反配额限制,该操作将会失败。如果尝试设置一个配额而现有文件数量已经超出了这个新配额,则设置失败。
-      </p>
-
-      
-<p>
-      配额和fsimage保持一致。当启动时,如果fsimage违反了某个配额限制(也许fsimage被偷偷改变了),则启动失败并生成错误报告。设置或删除一个配额会创建相应的日志记录。
-      </p> 
-
-      
-<p>
-      下面的新命令或新选项是用于支持配额的。
-      前两个是管理员命令。
-      </p>
-
-      
-<ul>
-      
-<li>
-      
-<span class="codefrag">dfsadmin -setquota &lt;N&gt; &lt;directory&gt;...&lt;directory&gt;</span> 
-      
-<br> 
-      把每个目录配额设为<span class="codefrag">N</span>。这个命令会在每个目录上尝试,
-      如果<span class="codefrag">N</span>不是一个正的长整型数,目录不存在或是文件名,
-      或者目录超过配额,则会产生错误报告。
-      </li>
-  
-      
-<li>
-      
-<span class="codefrag">dfsadmin -clrquota &lt;directory&gt;...&lt;director&gt;</span>
-<br> 
-      为每个目录删除配额。这个命令会在每个目录上尝试,如果目录不存在或者是文件,则会产生错误报告。如果目录原来没有设置配额不会报错。
-      </li>
-  
-      
-<li>
-      
-<span class="codefrag">fs -count -q &lt;directory&gt;...&lt;directory&gt;</span>
-<br>
-      使用<span class="codefrag">-q</span>选项,会报告每个目录设置的配额,以及剩余配额。
-      如果目录没有设置配额,会报告<span class="codefrag">none</span>和<span class="codefrag">inf</span>。
-      </li>
-      
-</ul>
-   
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 47
docs/cn/hdfs_quota_admin_guide.pdf


+ 0 - 860
docs/cn/hdfs_shell.html

@@ -1,860 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop Shell命令</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">FS Shell使用指南</div>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hdfs_shell.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop Shell命令</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#FS+Shell"> FS Shell </a>
-<ul class="minitoc">
-<li>
-<a href="#cat"> cat </a>
-</li>
-<li>
-<a href="#chgrp"> chgrp </a>
-</li>
-<li>
-<a href="#chmod"> chmod </a>
-</li>
-<li>
-<a href="#chown"> chown </a>
-</li>
-<li>
-<a href="#copyFromLocal">copyFromLocal</a>
-</li>
-<li>
-<a href="#copyToLocal"> copyToLocal</a>
-</li>
-<li>
-<a href="#cp"> cp </a>
-</li>
-<li>
-<a href="#du">du</a>
-</li>
-<li>
-<a href="#dus"> dus </a>
-</li>
-<li>
-<a href="#expunge"> expunge </a>
-</li>
-<li>
-<a href="#get"> get </a>
-</li>
-<li>
-<a href="#getmerge"> getmerge </a>
-</li>
-<li>
-<a href="#ls"> ls </a>
-</li>
-<li>
-<a href="#lsr">lsr</a>
-</li>
-<li>
-<a href="#mkdir"> mkdir </a>
-</li>
-<li>
-<a href="#movefromLocal"> movefromLocal </a>
-</li>
-<li>
-<a href="#mv"> mv </a>
-</li>
-<li>
-<a href="#put"> put </a>
-</li>
-<li>
-<a href="#rm"> rm </a>
-</li>
-<li>
-<a href="#rmr"> rmr </a>
-</li>
-<li>
-<a href="#setrep"> setrep </a>
-</li>
-<li>
-<a href="#stat"> stat </a>
-</li>
-<li>
-<a href="#tail"> tail </a>
-</li>
-<li>
-<a href="#test"> test </a>
-</li>
-<li>
-<a href="#text"> text </a>
-</li>
-<li>
-<a href="#touchz"> touchz </a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-<!--DCCOMMENT:diff begin-->
-		
-<a name="N1000F"></a><a name="FS+Shell"></a>
-<h2 class="h3"> FS Shell </h2>
-<div class="section">
-<p>
-      调用文件系统(FS)Shell命令应使用
-      <span class="codefrag">bin/hadoop fs &lt;args&gt;</span>的形式。
-      所有的的FS shell命令使用URI路径作为参数。URI格式是<em>scheme://authority/path</em>。对HDFS文件系统,scheme是<em>hdfs</em>,对本地文件系统,scheme是<em>file</em>。其中scheme和authority参数都是可选的,如果未加指定,就会使用配置中指定的默认scheme。一个HDFS文件或目录比如<em>/parent/child</em>可以表示成<em>hdfs://namenode:namenodeport/parent/child</em>,或者更简单的<em>/parent/child</em>(假设你配置文件中的默认值是<em>namenode:namenodeport</em>)。大多数FS Shell命令的行为和对应的Unix Shell命令类似,不同之处会在下面介绍各命令使用详情时指出。出错信息会输出到<em>stderr</em>,其他信息输出到<em>stdout</em>。
-  </p>
-<a name="N10036"></a><a name="cat"></a>
-<h3 class="h4"> cat </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -cat URI [URI &hellip;]</span>
-			
-</p>
-<p>
-		   将路径指定文件的内容输出到<em>stdout</em>。
-		   </p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -cat hdfs://host1:port1/file1 hdfs://host2:port2/file2 
-		   </span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag">hadoop fs -cat file:///file3 /user/hadoop/file4 </span>
-				
-</li>
-			
-</ul>
-<p>返回值:<br>
-<!--DCCOMMENT:diff end
-note:"hadoop dfs" has been replaced by "hadoop fs" in this doc.
-
-@@ -39,11 +50,11 @@
-                        <p>Example:</p>
-                        <ul>
-                                <li>
--                                       <code> hadoop dfs -cat hdfs://host1:port1/file1 hdfs://host2:port2/file2
-+                                       <code> hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2
-                   </code>
-                                </li>
-                                <li>
--                                       <code>hadoop dfs -cat file:///file3 /user/hadoop/file4 </code>
-+                                       <code>hadoop fs -cat file:///file3 /user/hadoop/file4 </code>
-                                </li>
-                        </ul>
-                        <p>Exit Code:<br/>
--->
-		   
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-</p>
-<a name="N10068"></a><a name="chgrp"></a>
-<h3 class="h4"> chgrp </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -chgrp [-R] GROUP URI [URI &hellip;]</span>
-            Change group association of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
---&gt;
-			</p>
-<p>
-	    改变文件所属的组。使用<span class="codefrag">-R</span>将使改变在目录结构下递归进行。命令的使用者必须是文件的所有者或者超级用户。更多的信息请参见<a href="hdfs_permissions_guide.html">HDFS权限用户指南</a>。
-	    </p>
-<a name="N10086"></a><a name="chmod"></a>
-<h3 class="h4"> chmod </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &hellip;]</span>
-			
-</p>
-<p>
-	    改变文件的权限。使用<span class="codefrag">-R</span>将使改变在目录结构下递归进行。命令的使用者必须是文件的所有者或者超级用户。更多的信息请参见<a href="hdfs_permissions_guide.html">HDFS权限用户指南</a>。
-	    </p>
-<a name="N1009D"></a><a name="chown"></a>
-<h3 class="h4"> chown </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</span>
-			
-</p>
-<p>
-	    改变文件的拥有者。使用<span class="codefrag">-R</span>将使改变在目录结构下递归进行。命令的使用者必须是超级用户。更多的信息请参见<a href="hdfs_permissions_guide.html">HDFS权限用户指南</a>。
-	    </p>
-<a name="N100B4"></a><a name="copyFromLocal"></a>
-<h3 class="h4">copyFromLocal</h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -copyFromLocal &lt;localsrc&gt; URI</span>
-			
-</p>
-<p>除了限定源路径是一个本地文件外,和<a href="#putlink"><strong>put</strong></a>命令相似。</p>
-<a name="N100C9"></a><a name="copyToLocal"></a>
-<h3 class="h4"> copyToLocal</h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</span>
-			
-</p>
-<p>除了限定目标路径是一个本地文件外,和<a href="#getlink"><strong>get</strong></a>命令类似。</p>
-<a name="N100DE"></a><a name="cp"></a>
-<h3 class="h4"> cp </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -cp URI [URI &hellip;] &lt;dest&gt;</span>
-			
-</p>
-<p>
-	    将文件从源路径复制到目标路径。这个命令允许有多个源路径,此时目标路径必须是一个目录。
-	    <br>
-	    示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2</span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N10108"></a><a name="du"></a>
-<h3 class="h4">du</h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -du URI [URI &hellip;]</span>
-			
-</p>
-<p>
-	     显示目录中所有文件的大小,或者当只指定一个文件时,显示此文件的大小。<br>
-	     示例:<br>
-<span class="codefrag">hadoop fs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://host:port/user/hadoop/dir1</span>
-<br>
-	     返回值:<br>
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-<br>
-</p>
-<a name="N10123"></a><a name="dus"></a>
-<h3 class="h4"> dus </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -dus &lt;args&gt;</span>
-			
-</p>
-<p>
-	   显示文件的大小。
-	   </p>
-<a name="N10133"></a><a name="expunge"></a>
-<h3 class="h4"> expunge </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -expunge</span>
-			
-</p>
-<p>清空回收站。请参考<a href="hdfs_design.html">HDFS设计</a>文档以获取更多关于回收站特性的信息。
-	   </p>
-<a name="N10147"></a><a name="get"></a>
-<h3 class="h4"> get </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</span>
-				
-<br>
-			
-</p>
-<p>
-	   复制文件到本地文件系统。可用<span class="codefrag">-ignorecrc</span>选项复制CRC校验失败的文件。使用<span class="codefrag">-crc</span>选项复制文件以及CRC信息。
-	  		</p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -get /user/hadoop/file localfile </span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -get hdfs://host:port/user/hadoop/file localfile</span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N1017B"></a><a name="getmerge"></a>
-<h3 class="h4"> getmerge </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</span>
-			
-</p>
-<p>
-	  接受一个源目录和一个目标文件作为输入,并且将源目录中所有的文件连接成本地目标文件。<span class="codefrag">addnl</span>是可选的,用于指定在每个文件结尾添加一个换行符。 
-	  </p>
-<a name="N1018E"></a><a name="ls"></a>
-<h3 class="h4"> ls </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -ls &lt;args&gt;</span>
-			
-</p>
-<p>如果是文件,则按照如下格式返回文件信息:<br>
-<span class="codefrag">文件名 &lt;副本数&gt; 文件大小 修改日期 修改时间 权限 用户ID 组ID</span>
-<br>
-	         如果是目录,则返回它直接子文件的一个列表,就像在Unix中一样。目录返回列表的信息如下:<br>
-<span class="codefrag">目录名 &lt;dir&gt; 修改日期 修改时间 权限 用户ID 组ID</span>
-<br>
-	         示例:<br>
-<span class="codefrag">hadoop fs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://host:port/user/hadoop/dir1 /nonexistentfile</span>
-<br>
-	         返回值:<br>
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-<br>
-</p>
-<a name="N101B1"></a><a name="lsr"></a>
-<h3 class="h4">lsr</h3>
-<p>
-<span class="codefrag">使用方法:hadoop fs -lsr &lt;args&gt;</span>
-<br>
-	      
-<span class="codefrag">ls</span>命令的递归版本。类似于Unix中的<span class="codefrag">ls -R</span>。
-	      </p>
-<a name="N101C4"></a><a name="mkdir"></a>
-<h3 class="h4"> mkdir </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -mkdir &lt;paths&gt;</span>
-				
-<br>
-			
-</p>
-<p>接受路径指定的uri作为参数,创建这些目录。其行为类似于Unix的mkdir -p,它会创建路径中的各级父目录。</p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag">hadoop fs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag">hadoop fs -mkdir hdfs://host1:port1/user/hadoop/dir hdfs://host2:port2/user/hadoop/dir
-	  </span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag">成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N101F1"></a><a name="movefromLocal"></a>
-<h3 class="h4"> movefromLocal </h3>
-<p>
-				
-<span class="codefrag">使用方法:dfs -moveFromLocal &lt;src&gt; &lt;dst&gt;</span>
-			
-</p>
-<p>输出一个&rdquo;not implemented&ldquo;信息。
-	   </p>
-<a name="N10201"></a><a name="mv"></a>
-<h3 class="h4"> mv </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -mv URI [URI &hellip;] &lt;dest&gt;</span>
-			
-</p>
-<p>
-	    将文件从源路径移动到目标路径。这个命令允许有多个源路径,此时目标路径必须是一个目录。不允许在不同的文件系统间移动文件。
-	    <br>
-	    示例:
-	    </p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -mv /user/hadoop/file1 /user/hadoop/file2</span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -mv hdfs://host:port/file1 hdfs://host:port/file2 hdfs://host:port/file3 hdfs://host:port/dir1</span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N1022B"></a><a name="put"></a>
-<h3 class="h4"> put </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -put &lt;localsrc&gt; ... &lt;dst&gt;</span>
-			
-</p>
-<p>从本地文件系统中复制单个或多个源路径到目标文件系统。也支持从标准输入中读取输入写入目标文件系统。<br>
-	   
-</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -put localfile /user/hadoop/hadoopfile</span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir</span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -put localfile hdfs://host:port/hadoop/hadoopfile</span>
-				
-</li>
-				
-<li>
-<span class="codefrag">hadoop fs -put - hdfs://host:port/hadoop/hadoopfile</span>
-<br>从标准输入中读取输入。</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N10262"></a><a name="rm"></a>
-<h3 class="h4"> rm </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -rm URI [URI &hellip;] </span>
-			
-</p>
-<p>
-	   删除指定的文件。只删除非空目录和文件。请参考rmr命令了解递归删除。<br>
-	   示例:
-	   </p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -rm hdfs://host:port/file /user/hadoop/emptydir </span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N10286"></a><a name="rmr"></a>
-<h3 class="h4"> rmr </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -rmr URI [URI &hellip;]</span>
-			
-</p>
-<p>delete的递归版本。<br>
-	   示例:
-	   </p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -rmr /user/hadoop/dir </span>
-				
-</li>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -rmr hdfs://host:port/user/hadoop/dir </span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N102B0"></a><a name="setrep"></a>
-<h3 class="h4"> setrep </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -setrep [-R] &lt;path&gt;</span>
-			
-</p>
-<p>
-	   改变一个文件的副本系数。-R选项用于递归改变目录下所有文件的副本系数。
-	  </p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -setrep -w 3 -R /user/hadoop/dir1 </span>
-				
-</li>
-			
-</ul>
-<p>返回值:</p>
-<p>
-				
-<span class="codefrag">成功返回0,失败返回-1。</span>
-			
-</p>
-<a name="N102D5"></a><a name="stat"></a>
-<h3 class="h4"> stat </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -stat URI [URI &hellip;]</span>
-			
-</p>
-<p>
-	   返回指定路径的统计信息。
-	   </p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -stat path </span>
-				
-</li>
-			
-</ul>
-<p>返回值:<br>
-	   
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-</p>
-<a name="N102F8"></a><a name="tail"></a>
-<h3 class="h4"> tail </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -tail [-f] URI </span>
-			
-</p>
-<p>
-	   将文件尾部1K字节的内容输出到stdout。支持-f选项,行为和Unix中一致。
-	   </p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -tail pathname </span>
-				
-</li>
-			
-</ul>
-<p>返回值:<br>
-	   
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-</p>
-<a name="N1031B"></a><a name="test"></a>
-<h3 class="h4"> test </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -test -[ezd] URI</span>
-			
-</p>
-<p>
-	   选项:<br>
-	   -e 检查文件是否存在。如果存在则返回0。<br>
-	   -z 检查文件是否是0字节。如果是则返回0。 <br>
-	   -d 如果路径是个目录,则返回1,否则返回0。<br>
-</p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop fs -test -e filename </span>
-				
-</li>
-			
-</ul>
-<a name="N1033E"></a><a name="text"></a>
-<h3 class="h4"> text </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -text &lt;src&gt;</span>
-				
-<br>
-			
-</p>
-<p>
-	   将源文件输出为文本格式。允许的格式是zip和TextRecordInputStream。
-	  </p>
-<a name="N10350"></a><a name="touchz"></a>
-<h3 class="h4"> touchz </h3>
-<p>
-				
-<span class="codefrag">使用方法:hadoop fs -touchz URI [URI &hellip;]</span>
-				
-<br>
-			
-</p>
-<p>
-	   创建一个0字节的空文件。
-	   </p>
-<p>示例:</p>
-<ul>
-				
-<li>
-					
-<span class="codefrag"> hadoop -touchz pathname </span>
-				
-</li>
-			
-</ul>
-<p>返回值:<br>
-	   
-<span class="codefrag"> 成功返回0,失败返回-1。</span>
-</p>
-</div>
-	
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 347
docs/cn/hdfs_shell.pdf


+ 0 - 718
docs/cn/hdfs_user_guide.html

@@ -1,718 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>
-      Hadoop分布式文件系统使用指南
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">HDFS使用指南</div>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hdfs_user_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>
-      Hadoop分布式文件系统使用指南
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%9B%AE%E7%9A%84">目的</a>
-</li>
-<li>
-<a href="#%E6%A6%82%E8%BF%B0"> 概述 </a>
-</li>
-<li>
-<a href="#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"> 先决条件 </a>
-</li>
-<li>
-<a href="#Web%E6%8E%A5%E5%8F%A3"> Web接口 </a>
-</li>
-<li>
-<a href="#Shell%E5%91%BD%E4%BB%A4">Shell命令</a>
-<ul class="minitoc">
-<li>
-<a href="#DFSAdmin%E5%91%BD%E4%BB%A4"> DFSAdmin命令 </a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Secondary+NameNode"> Secondary NameNode </a>
-</li>
-<li>
-<a href="#Rebalancer"> Rebalancer </a>
-</li>
-<li>
-<a href="#%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5%EF%BC%88Rack+awareness%EF%BC%89"> 机架感知(Rack awareness) </a>
-</li>
-<li>
-<a href="#%E5%AE%89%E5%85%A8%E6%A8%A1%E5%BC%8F"> 安全模式 </a>
-</li>
-<li>
-<a href="#fsck"> fsck </a>
-</li>
-<li>
-<a href="#%E5%8D%87%E7%BA%A7%E5%92%8C%E5%9B%9E%E6%BB%9A"> 升级和回滚 </a>
-</li>
-<li>
-<a href="#%E6%96%87%E4%BB%B6%E6%9D%83%E9%99%90%E5%92%8C%E5%AE%89%E5%85%A8%E6%80%A7"> 文件权限和安全性 </a>
-</li>
-<li>
-<a href="#%E5%8F%AF%E6%89%A9%E5%B1%95%E6%80%A7"> 可扩展性 </a>
-</li>
-<li>
-<a href="#%E7%9B%B8%E5%85%B3%E6%96%87%E6%A1%A3"> 相关文档 </a>
-</li>
-</ul>
-</div>
-    
-<a name="N1000F"></a><a name="%E7%9B%AE%E7%9A%84"></a>
-<h2 class="h3">目的</h2>
-<div class="section">
-<p>
-	本文档的目标是为Hadoop分布式文件系统(HDFS)的用户提供一个学习的起点,这里的HDFS既可以作为<a href="http://hadoop.apache.org/">Hadoop</a>集群的一部分,也可以作为一个独立的分布式文件系统。虽然HDFS在很多环境下被设计成是可正确工作的,但是了解HDFS的工作原理对在特定集群上改进HDFS的运行性能和错误诊断都有极大的帮助。
-      </p>
-</div>
-<!--DCCOMMENT:diff end
-@@ -23,18 +23,18 @@
-
-   <header>
-     <title>
--      Hadoop DFS User Guide
-+      HDFS User Guide
-     </title>
-   </header>
-
-   <body>
-     <section> <title>Purpose</title>
-       <p>
-- This document aims to be the starting point for users working with
-+ This document is a starting point for users working with
-  Hadoop Distributed File System (HDFS) either as a part of a
-  <a href="http://hadoop.apache.org/">Hadoop</a>
-  cluster or as a stand-alone general purpose distributed file system.
-- While HDFS is designed to "just-work" in many environments, a working
-+ While HDFS is designed to "just work" in many environments, a working
-  knowledge of HDFS helps greatly with configuration improvements and
-  diagnostics on a specific cluster.
-       </p>
-
--->
-<!--DCCOMMENT:begin-->
-
-    
-<a name="N10021"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3"> 概述 </h2>
-<div class="section">
-<p>
-HDFS是Hadoop应用用到的一个最主要的分布式存储系统。一个HDFS集群主要由一个NameNode和很多个Datanode组成:Namenode管理文件系统的元数据,而Datanode存储了实际的数据。HDFS的体系结构在<a href="hdfs_design.html">这里</a>有详细的描述。本文档主要关注用户以及管理员怎样和HDFS进行交互。<a href="hdfs_design.html">HDFS架构设计</a>中的<a href="images/hdfsarchitecture.gif">图解</a>描述了Namenode、Datanode和客户端之间的基本的交互操作。基本上,客户端联系Namenode以获取文件的元数据或修饰属性,而真正的文件I/O操作是直接和Datanode进行交互的。
-      </p>
-<p>
-      下面列出了一些多数用户都比较感兴趣的重要特性。
-      </p>
-<ul>
-    
-<li>
-<!--DCCOMMENT:end
-note:all tag "<em>" has been deleted in this doc.
-
-@@ -43,21 +43,20 @@
-     <section> <title> Overview </title>
-       <p>
-  HDFS is the primary distributed storage used by Hadoop applications. A
-- HDFS cluster primarily consists of a <em>NameNode</em> that manages the
-- filesystem metadata and Datanodes that store the actual data. The
-+ HDFS cluster primarily consists of a NameNode that manages the
-+ file system metadata and DataNodes that store the actual data. The
-  architecture of HDFS is described in detail
-  <a href="hdfs_design.html">here</a>. This user guide primarily deals with
-  interaction of users and administrators with HDFS clusters.
-  The <a href="images/hdfsarchitecture.gif">diagram</a> from
-  <a href="hdfs_design.html">HDFS architecture</a> depicts
-- basic interactions among Namenode, Datanodes, and the clients. Eseentially,
-- clients contact Namenode for file metadata or file modifications and perform
-- actual file I/O directly with the datanodes.
-+ basic interactions among NameNode, the DataNodes, and the clients.
-+ Clients contact NameNode for file metadata or file modifications and perform
-+ actual file I/O directly with the DataNodes.
-       </p>
-       <p>
-  The following are some of the salient features that could be of
-- interest to many users. The terms in <em>italics</em>
-- are described in later sections.
-+ interest to many users.
-       </p>
-     <ul>
-     <li>
--->
-    Hadoop(包括HDFS)非常适合在商用硬件(commodity hardware)上做分布式存储和计算,因为它不仅具有容错性和可扩展性,而且非常易于扩展。<a href="mapred_tutorial.html">Map-Reduce</a>框架以其在大型分布式系统应用上的简单性和可用性而著称,这个框架已经被集成进Hadoop中。
-    </li>
-    
-<li>
-    	HDFS的可配置性极高,同时,它的默认配置能够满足很多的安装环境。多数情况下,这些参数只在非常大规模的集群环境下才需要调整。
-    </li>
-<!--DCCOMMENT:diff begin-->
-    
-<li>
-    	用Java语言开发,支持所有的主流平台。
-    </li>
-    
-<li>
-    	支持类Shell命令,可直接和HDFS进行交互。
-    </li>
-    
-<li>
-    	NameNode和DataNode有内置的Web服务器,方便用户检查集群的当前状态。
-    </li>
-<!--DCCOMMENT:diff end
-@@ -74,13 +73,13 @@
-        needs to be tuned only for very large clusters.
-     </li>
-     <li>
--       It is written in Java and is supported on all major platforms.
-+       Hadoop is written in Java and is supported on all major platforms.
-     </li>
-     <li>
--       Supports <em>shell like commands</em> to interact with HDFS directly.
-+       Hadoop supports shell-like commands to interact with HDFS directly.
-     </li>
-     <li>
--       Namenode and Datanodes have built in web servers that makes it
-+       The NameNode and Datanodes have built in web servers that makes it
-        easy to check current status of the cluster.
-     </li>
-     <li>
--->
-    
-<li>
-	新特性和改进会定期加入HDFS的实现中。下面列出的是HDFS中常用特性的一部分:
-      <ul>
-    	
-<li>
-    		文件权限和授权。
-    	</li>
-    	
-<li>
-    		机架感知(Rack awareness):在调度任务和分配存储空间时考虑节点的物理位置。
-    	</li>
-    	
-<li>
-    		安全模式:一种维护需要的管理模式。
-    	</li>
-    	
-<li>
-    		fsck:一个诊断文件系统健康状况的工具,能够发现丢失的文件或数据块。
-    	</li>
-    	
-<li>
-    		Rebalancer:当datanode之间数据不均衡时,平衡集群上的数据负载。
-    	</li>
-    	
-<li>
-    		升级和回滚:在软件更新后有异常发生的情形下,能够回滚到HDFS升级之前的状态。
-    	</li>
-    	
-<li>
-		Secondary Namenode:对文件系统名字空间执行周期性的检查点,将Namenode上HDFS改动日志文件的大小控制在某个特定的限度下。
-    	</li>
-      
-</ul>
-    
-</li>
-    
-</ul>
-</div> 
-<a name="N10071"></a><a name="%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"></a>
-<h2 class="h3"> 先决条件 </h2>
-<div class="section">
-<p>
-    下面的文档描述了如何安装和搭建Hadoop集群:
-    </p>
-<ul>
- 	
-<li>
- 		
-<a href="quickstart.html">Hadoop快速入门</a>
- 		针对初次使用者。
- 	</li>
- 	
-<li>
-		
-<a href="cluster_setup.html">Hadoop集群搭建</a>
- 		针对大规模分布式集群的搭建。
- 	</li>
-    
-</ul>
-<p>
-    文档余下部分假设用户已经安装并运行了至少包含一个Datanode节点的HDFS。就本文目的来说,Namenode和Datanode可以运行在同一个物理主机上。
-    </p>
-</div> 
-<a name="N1008F"></a><a name="Web%E6%8E%A5%E5%8F%A3"></a>
-<h2 class="h3"> Web接口 </h2>
-<div class="section">
-<p>
- 	NameNode和DataNode各自启动了一个内置的Web服务器,显示了集群当前的基本状态和信息。在默认配置下NameNode的首页地址是<span class="codefrag">http://namenode-name:50070/</span>。这个页面列出了集群里的所有DataNode和集群的基本状态。这个Web接口也可以用来浏览整个文件系统(使用NameNode首页上的"Browse the file system"链接)。
- </p>
-</div> 
-<a name="N100A2"></a><a name="Shell%E5%91%BD%E4%BB%A4"></a>
-<h2 class="h3">Shell命令</h2>
-<div class="section">
-<p>Hadoop包括一系列的类shell的命令,可直接和HDFS以及其他Hadoop支持的文件系统进行交互。<span class="codefrag">bin/hadoop fs -help</span> 命令列出所有Hadoop Shell支持的命令。而 <span class="codefrag">bin/hadoop fs -help command-name</span> 命令能显示关于某个命令的详细信息。这些命令支持大多数普通文件系统的操作,比如复制文件、改变文件权限等。它还支持一些HDFS特有的操作,比如改变文件副本数目。
-     </p>
-<a name="N100B3"></a><a name="DFSAdmin%E5%91%BD%E4%BB%A4"></a>
-<h3 class="h4"> DFSAdmin命令 </h3>
-<p>
-   	
-<span class="codefrag">'bin/hadoop dfsadmin'</span> 命令支持一些和HDFS管理相关的操作。<span class="codefrag">bin/hadoop dfsadmin -help</span> 命令能列出所有当前支持的命令。比如:
-   </p>
-<ul>
-   	
-<li>
-<!--DCCOMMENT:diff begin-->
-   	    
-<span class="codefrag">-report</span>:报告HDFS的基本统计信息。有些信息也可以在NameNode Web服务首页看到。
-<!--DCCOMMENT:diff end
-note: "Namenode" is replaced by "NameNode" in this doc
-
-        <li>
-            <code>-report</code>
--           : reports basic stats of HDFS. Some of this information is
--           also available on the Namenode front page.
-+           : reports basic statistics of HDFS. Some of this information is
-+           also available on the NameNode front page.
-        </li>
--->
-   	</li>
-   	
-<li>
-   	    
-<span class="codefrag">-safemode</span>:虽然通常并不需要,但是管理员的确可以手动让NameNode进入或离开安全模式。
-   	</li>
-   	
-<li>
-   	    
-<span class="codefrag">-finalizeUpgrade</span>:删除上一次升级时制作的集群备份。
-   	</li>
-   	
-</ul>
-</div> 
-<a name="N100DD"></a><a name="Secondary+NameNode"></a>
-<h2 class="h3"> Secondary NameNode </h2>
-<div class="section">
-<p>NameNode将对文件系统的改动追加保存到本地文件系统上的一个日志文件(<span class="codefrag">edits</span>)。当一个NameNode启动时,它首先从一个映像文件(<span class="codefrag">fsimage</span>)中读取HDFS的状态,接着应用日志文件中的edits操作。然后它将新的HDFS状态写入(<span class="codefrag">fsimage</span>)中,并使用一个空的edits文件开始正常操作。因为NameNode只有在启动阶段才合并<span class="codefrag">fsimage</span>和<span class="codefrag">edits</span>,所以久而久之日志文件可能会变得非常庞大,特别是对大型的集群。日志文件太大的另一个副作用是下一次NameNode启动会花很长时间。
-   </p>
-<p>
-     Secondary NameNode定期合并fsimage和edits日志,将edits日志文件大小控制在一个限度下。因为内存需求和NameNode在一个数量级上,所以通常secondary NameNode和NameNode运行在不同的机器上。Secondary NameNode通过<span class="codefrag">bin/start-dfs.sh</span>在<span class="codefrag">conf/masters</span>中指定的节点上启动。
-   </p>
-<p>
-Secondary NameNode的检查点进程启动,是由两个配置参数控制的:
-</p>
-<ul>
-      
-<li>
-        
-<span class="codefrag">fs.checkpoint.period</span>,指定连续两次检查点的最大时间间隔,
-        默认值是1小时。
-      </li>
-      
-<li>
-        
-<span class="codefrag">fs.checkpoint.size</span>定义了edits日志文件的最大值,一旦超过这个值会导致强制执行检查点(即使没到检查点的最大时间间隔)。默认值是64MB。
-      </li>
-   
-</ul>
-<p>
-     Secondary NameNode保存最新检查点的目录与NameNode的目录结构相同。
-     所以NameNode可以在需要的时候读取Secondary NameNode上的检查点镜像。
-   </p>
-<p>
-     如果NameNode上除了最新的检查点以外,所有的其他的历史镜像和edits文件都丢失了,
-     NameNode可以引入这个最新的检查点。以下操作可以实现这个功能:
-   </p>
-<ul>
-      
-<li>
-        在配置参数<span class="codefrag">dfs.name.dir</span>指定的位置建立一个空文件夹;
-      </li>
-      
-<li>
-        把检查点目录的位置赋值给配置参数<span class="codefrag">fs.checkpoint.dir</span>;
-      </li>
-      
-<li>
-        启动NameNode,并加上<span class="codefrag">-importCheckpoint</span>。 
-      </li>
-   
-</ul>
-<p>
-     NameNode会从<span class="codefrag">fs.checkpoint.dir</span>目录读取检查点,
-     并把它保存在<span class="codefrag">dfs.name.dir</span>目录下。
-     如果<span class="codefrag">dfs.name.dir</span>目录下有合法的镜像文件,NameNode会启动失败。
-     NameNode会检查<span class="codefrag">fs.checkpoint.dir</span>目录下镜像文件的一致性,但是不会去改动它。
-   </p>
-<p>
-     命令的使用方法请参考<a href="commands_manual.html#secondarynamenode"><span class="codefrag">secondarynamenode</span> 命令</a>.
-   </p>
-</div> 
-<a name="N10148"></a><a name="Rebalancer"></a>
-<h2 class="h3"> Rebalancer </h2>
-<div class="section">
-<p>
-      HDFS的数据也许并不是非常均匀的分布在各个DataNode中。一个常见的原因是在现有的集群上经常会增添新的DataNode节点。当新增一个数据块(一个文件的数据被保存在一系列的块中)时,NameNode在选择DataNode接收这个数据块之前,会考虑到很多因素。其中的一些考虑的是:
-    </p>
-<ul>
-      
-<li>
-	将数据块的一个副本放在正在写这个数据块的节点上。
-      </li>
-      
-<li>
-        尽量将数据块的不同副本分布在不同的机架上,这样集群可在完全失去某一机架的情况下还能存活。
-      </li>
-      
-<li>
-        一个副本通常被放置在和写文件的节点同一机架的某个节点上,这样可以减少跨越机架的网络I/O。
-      </li>
-      
-<li>
-        尽量均匀地将HDFS数据分布在集群的DataNode中。
-      </li>
-      
-</ul>
-<p>
-由于上述多种考虑需要取舍,数据可能并不会均匀分布在DataNode中。HDFS为管理员提供了一个工具,用于分析数据块分布和重新平衡DataNode上的数据分布。<a href="http://issues.apache.org/jira/browse/HADOOP-1652">HADOOP-1652</a>的附件中的一个<a href="http://issues.apache.org/jira/secure/attachment/12368261/RebalanceDesign6.pdf">PDF</a>是一个简要的rebalancer管理员指南。
-    </p>
-<p>
-     使用方法请参考<a href="commands_manual.html#balancer">balancer 命令</a>.
-   </p>
-</div> 
-<a name="N1017B"></a><a name="%E6%9C%BA%E6%9E%B6%E6%84%9F%E7%9F%A5%EF%BC%88Rack+awareness%EF%BC%89"></a>
-<h2 class="h3"> 机架感知(Rack awareness) </h2>
-<div class="section">
-<p>
-      通常,大型Hadoop集群是以机架的形式来组织的,同一个机架上不同节点间的网络状况比不同机架之间的更为理想。另外,NameNode设法将数据块副本保存在不同的机架上以提高容错性。Hadoop允许集群的管理员通过配置<span class="codefrag">dfs.network.script</span>参数来确定节点所处的机架。当这个脚本配置完毕,每个节点都会运行这个脚本来获取它的机架ID。默认的安装假定所有的节点属于同一个机架。这个特性及其配置参数在<a href="http://issues.apache.org/jira/browse/HADOOP-692">HADOOP-692</a>所附的<a href="http://issues.apache.org/jira/secure/attachment/12345251/Rack_aware_HDFS_proposal.pdf">PDF</a>上有更详细的描述。
-    </p>
-</div> 
-<a name="N10190"></a><a name="%E5%AE%89%E5%85%A8%E6%A8%A1%E5%BC%8F"></a>
-<h2 class="h3"> 安全模式 </h2>
-<div class="section">
-<p>
-     NameNode启动时会从fsimage和edits日志文件中装载文件系统的状态信息,接着它等待各个DataNode向它报告它们各自的数据块状态,这样,NameNode就不会过早地开始复制数据块,即使在副本充足的情况下。这个阶段,NameNode处于安全模式下。NameNode的安全模式本质上是HDFS集群的一种只读模式,此时集群不允许任何对文件系统或者数据块修改的操作。通常NameNode会在开始阶段自动地退出安全模式。如果需要,你也可以通过<span class="codefrag">'bin/hadoop dfsadmin -safemode'</span>命令显式地将HDFS置于安全模式。NameNode首页会显示当前是否处于安全模式。关于安全模式的更多介绍和配置信息请参考JavaDoc:<a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/dfs/NameNode.html#setSafeMode(org.apache.hadoop.dfs.FSConstants.SafeModeAction)"><span class="codefrag">setSafeMode()</span></a>。
-    </p>
-</div> 
-<a name="N101A2"></a><a name="fsck"></a>
-<h2 class="h3"> fsck </h2>
-<div class="section">
-<p>    
-      HDFS支持<span class="codefrag">fsck</span>命令来检查系统中的各种不一致状况。这个命令被设计来报告各种文件存在的问题,比如文件缺少数据块或者副本数目不够。不同于在本地文件系统上传统的fsck工具,这个命令并不会修正它检测到的错误。一般来说,NameNode会自动修正大多数可恢复的错误。HDFS的fsck不是一个Hadoop shell命令。它通过'<span class="codefrag">bin/hadoop fsck</span>'执行。
-<!--DCCOMMENT:diff begin-->
-命令的使用方法请参考<a href="commands_manual.html#fsck"><span class="codefrag">fsck</span>命令</a>
-<span class="codefrag">fsck</span>可用来检查整个文件系统,也可以只检查部分文件。
-<!--DCCOMMENT:diff end
- Hadoop shell command. It can be run as '<code>bin/hadoop fsck</code>'.
--      Fsck can be run on the whole filesystem or on a subset of files.
-+      For command usage, see <a href="commands_manual.html#fsck"><code>fsck</code> command</a>.
-+      <code>fsck</code> can be run on the whole file system or on a subset of files.
-      </p>
-
--->
-     </p>
-</div> 
-<a name="N101BF"></a><a name="%E5%8D%87%E7%BA%A7%E5%92%8C%E5%9B%9E%E6%BB%9A"></a>
-<h2 class="h3"> 升级和回滚 </h2>
-<div class="section">
-<p>当在一个已有集群上升级Hadoop时,像其他的软件升级一样,可能会有新的bug或一些会影响到现有应用的非兼容性变更出现。在任何有实际意义的HDSF系统上,丢失数据是不被允许的,更不用说重新搭建启动HDFS了。HDFS允许管理员退回到之前的Hadoop版本,并将集群的状态回滚到升级之前。更多关于HDFS升级的细节在<a href="http://wiki.apache.org/hadoop/Hadoop%20Upgrade">升级wiki</a>上可以找到。HDFS在一个时间可以有一个这样的备份。在升级之前,管理员需要用<span class="codefrag">bin/hadoop dfsadmin -finalizeUpgrade</span>(升级终结操作)命令删除存在的备份文件。下面简单介绍一下一般的升级过程:
-     </p>
-<ul>
-      
-<li>升级 Hadoop 软件之前,请检查是否已经存在一个备份,如果存在,可执行升级终结操作删除这个备份。通过<span class="codefrag">dfsadmin -upgradeProgress status</span>命令能够知道是否需要对一个集群执行升级终结操作。</li>
-      
-<li>停止集群并部署新版本的Hadoop。</li>
-      
-<li>使用<span class="codefrag">-upgrade</span>选项运行新的版本(<span class="codefrag">bin/start-dfs.sh -upgrade</span>)。
-      </li>
-      
-<li>在大多数情况下,集群都能够正常运行。一旦我们认为新的HDFS运行正常(也许经过几天的操作之后),就可以对之执行升级终结操作。注意,在对一个集群执行升级终结操作之前,删除那些升级前就已经存在的文件并不会真正地释放DataNodes上的磁盘空间。</li>
-      
-<li>如果需要退回到老版本,
-	<ul>
-          
-<li>停止集群并且部署老版本的Hadoop。</li>
-          
-<li>用回滚选项启动集群(<span class="codefrag">bin/start-dfs.h -rollback</span>)。</li>
-        
-</ul>
-      
-</li>
-      
-</ul>
-</div> 
-<a name="N101F7"></a><a name="%E6%96%87%E4%BB%B6%E6%9D%83%E9%99%90%E5%92%8C%E5%AE%89%E5%85%A8%E6%80%A7"></a>
-<h2 class="h3"> 文件权限和安全性 </h2>
-<div class="section">
-<p>           
-      这里的文件权限和其他常见平台如Linux的文件权限类似。目前,安全性仅限于简单的文件权限。启动NameNode的用户被视为HDFS的超级用户。HDFS以后的版本将会支持网络验证协议(比如Kerberos)来对用户身份进行验证和对数据进行加密传输。具体的细节请参考<a href="hdfs_permissions_guide.html">权限使用管理指南</a>。
-     </p>
-</div> 
-<a name="N10205"></a><a name="%E5%8F%AF%E6%89%A9%E5%B1%95%E6%80%A7"></a>
-<h2 class="h3"> 可扩展性 </h2>
-<div class="section">
-<p>
-      现在,Hadoop已经运行在上千个节点的集群上。<a href="http://wiki.apache.org/hadoop/PoweredBy">Powered By Hadoop</a>页面列出了一些已将Hadoop部署在他们的大型集群上的组织。HDFS集群只有一个NameNode节点。目前,NameNode上可用内存大小是一个主要的扩展限制。在超大型的集群中,增大HDFS存储文件的平均大小能够增大集群的规模,而不需要增加NameNode的内存。默认配置也许并不适合超大规模的集群。<a href="http://wiki.apache.org/hadoop/FAQ">Hadoop FAQ</a>页面列举了针对大型Hadoop集群的配置改进。</p>
-</div> 
-<a name="N10217"></a><a name="%E7%9B%B8%E5%85%B3%E6%96%87%E6%A1%A3"></a>
-<h2 class="h3"> 相关文档 </h2>
-<div class="section">
-<p>
-      这个用户手册给用户提供了一个学习和使用HDSF文件系统的起点。本文档会不断地进行改进,同时,用户也可以参考更多的Hadoop和HDFS文档。下面的列表是用户继续学习的起点:
-      </p>
-<ul>
-      
-<li>
-        
-<a href="http://hadoop.apache.org/">Hadoop官方主页</a>:所有Hadoop相关的起始页。
-      </li>
-      
-<li>
-        
-<a href="http://wiki.apache.org/hadoop/FrontPage">Hadoop Wiki</a>:Hadoop Wiki文档首页。这个指南是Hadoop代码树中的一部分,与此不同,Hadoop Wiki是由Hadoop社区定期编辑的。
-      </li>
-      
-<li>Hadoop Wiki上的<a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>。
-      </li>
-      
-<li>Hadoop <a href="http://hadoop.apache.org/core/docs/current/api/">JavaDoc API</a>。</li>
-      
-<li>Hadoop用户邮件列表:<a href="mailto:core-user@hadoop.apache.org">core-user[at]hadoop.apache.org</a>。</li>
-      
-<li>查看<span class="codefrag">conf/hadoop-default.xml</span>文件。这里包括了大多数配置参数的简要描述。</li>
-      
-<li>
-        
-<a href="commands_manual.html">命令手册</a>:命令使用说明。
-      </li>
-<!--DCCOMMENT:diff begin-->
-<!--DCCOMMENT:diff end
-@@ -411,6 +469,10 @@
-          It includes brief
-          description of most of the configuration variables available.
-       </li>
-+      <li>
-+        <a href="commands_manual.html">Commands Manual</a>
-+        : commands usage.
-+      </li>
-       </ul>
-      </section>
-
--->
-      
-</ul>
-</div>
-     
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 195
docs/cn/hdfs_user_guide.pdf


+ 0 - 257
docs/cn/hod.html

@@ -1,257 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title> 
-      Hadoop On Demand
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Hadoop On Demand</div>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hod.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1> 
-      Hadoop On Demand
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%AE%80%E4%BB%8B">简介</a>
-</li>
-<li>
-<a href="#%E6%96%87%E6%A1%A3">文档</a>
-</li>
-</ul>
-</div>
-  
-<a name="N1000D"></a><a name="%E7%AE%80%E4%BB%8B"></a>
-<h2 class="h3">简介</h2>
-<div class="section">
-<p>Hadoop On Demand(HOD)是一个能在大型物理集群上供应虚拟hadoop集群的系统。它使用Torque资源管理器分配节点。它可以在分配的节点上启动Hadoop Map/Reduce和HDFS的守护进程。它会自动为Hadoop守护进程和客户端生成合适的配置文件(hadoop-site.xml)。HOD还可以将Hadoop分发到它分配出来的虚拟Hadoop集群的节点上。简而言之,HOD使管理员和用户轻松地快速搭建和使用hadoop。它也是Hadoop开发人员和测试人员非常有用的一个工具,他们可以使用HOD共享一个物理集群来测试各自的Hadoop版本。</p>
-</div>
-      
-<a name="N10017"></a><a name="%E6%96%87%E6%A1%A3"></a>
-<h2 class="h3">文档</h2>
-<div class="section">
-<p>读一遍下面的文档,你会在使用HOD方面了解更多</p>
-<ul>
-        
-<li>
-<a href="hod_admin_guide.html">HOD管理指南</a> : 此指南概述了HOD的体系结构,Torque资源管理器及其他各种支持工具,也会告诉你如何安装,配置和运行HOD。</li>
-        
-<li>
-<a href="hod_config_guide.html">HOD配置指南</a> : 此指南讨论HOD的配置段,会告诉你如何使用那些最重要和最常用的配置项。</li>
-        
-<li>
-<a href="hod_user_guide.html">HOD用户指南</a> : 此指南会告诉你如何开始使用HOD,它的各种功能特性,命令行选项,也会给你一些故障解决方面的详细帮助。</li>
-      
-</ul>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 144
docs/cn/hod.pdf


+ 0 - 557
docs/cn/hod_admin_guide.html

@@ -1,557 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title> 
-      Hadoop On Demand
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">文档</div>
-<div id="menu_1.1" class="menuitemgroup">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hod_admin_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1> 
-      Hadoop On Demand
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E6%A6%82%E8%BF%B0">概述</a>
-</li>
-<li>
-<a href="#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6">先决条件</a>
-</li>
-<li>
-<a href="#%E8%B5%84%E6%BA%90%E7%AE%A1%E7%90%86%E5%99%A8">资源管理器</a>
-</li>
-<li>
-<a href="#%E5%AE%89%E8%A3%85HOD">安装HOD</a>
-</li>
-<li>
-<a href="#%E9%85%8D%E7%BD%AEHOD">配置HOD</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%9C%80%E5%B0%8F%E9%85%8D%E7%BD%AE">最小配置</a>
-</li>
-<li>
-<a href="#%E9%AB%98%E7%BA%A7%E9%85%8D%E7%BD%AE">高级配置</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E8%BF%90%E8%A1%8CHOD">运行HOD</a>
-</li>
-<li>
-<a href="#%E6%94%AF%E6%8C%81%E5%B7%A5%E5%85%B7%E5%92%8C%E5%AE%9E%E7%94%A8%E7%A8%8B%E5%BA%8F">支持工具和实用程序</a>
-<ul class="minitoc">
-<li>
-<a href="#logcondense.py+-+%E7%AE%A1%E7%90%86%E6%97%A5%E5%BF%97%E6%96%87%E4%BB%B6">logcondense.py - 管理日志文件</a>
-<ul class="minitoc">
-<li>
-<a href="#%E8%BF%90%E8%A1%8Clogcondense.py">运行logcondense.py</a>
-</li>
-<li>
-<a href="#logcondense.py%E7%9A%84%E5%91%BD%E4%BB%A4%E8%A1%8C%E9%80%89%E9%A1%B9">logcondense.py的命令行选项</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#checklimits.sh+-+%E7%9B%91%E8%A7%86%E8%B5%84%E6%BA%90%E9%99%90%E5%88%B6">checklimits.sh - 监视资源限制</a>
-<ul class="minitoc">
-<li>
-<a href="#%E8%BF%90%E8%A1%8Cchecklimits.sh">运行checklimits.sh</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#verify-account+-+%E7%94%A8%E4%BA%8E%E6%A0%B8%E5%AE%9E%E7%94%A8%E6%88%B7%E6%8F%90%E4%BA%A4%E4%BD%9C%E4%B8%9A%E6%89%80%E4%BD%BF%E7%94%A8%E7%9A%84%E5%B8%90%E5%8F%B7%E7%9A%84%E8%84%9A%E6%9C%AC">verify-account - 用于核实用户提交作业所使用的帐号的脚本</a>
-<ul class="minitoc">
-<li>
-<a href="#%E5%9C%A8HOD%E4%B8%AD%E9%9B%86%E6%88%90verify-account">在HOD中集成verify-account</a>
-</li>
-</ul>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-
-<a name="N1000C"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3">概述</h2>
-<div class="section">
-<p>Hadoop On Demand (HOD)是一个能在一个共享集群上供应和管理相互独立的Hadoop Map/Reduce和Hadoop分布式文件系统(HDFS)实例的系统。它能让管理员和用户轻松地快速搭建和使用hadoop。HOD对Hadoop的开发人员和测试人员也非常有用,他们可以通过HOD共享一个物理集群来测试各自不同的Hadoop版本。</p>
-<p>HOD依赖资源管理器(RM)来分配节点,这些节点被用来在之上运行hadoop实例。目前,HOD采用的是<a href="http://www.clusterresources.com/pages/products/torque-resource-manager.php">Torque资源管理器</a>。
-</p>
-<p>
-基本的HOD系统架构包含的下列组件:</p>
-<ul>
-  
-<li>一个资源管理器(可能同时附带一个调度程序)</li>
-  
-<li>各种HOD的组件 </li>
-  
-<li>Hadoop Map/Reduce和HDFS守护进程</li>
-
-</ul>
-<p>
-通过与以上组件交互,HOD在给定的集群上供应和维护Hadoop Map/Reduce实例,或者HDFS实例。集群中的节点可看作由两组节点构成:</p>
-<ul>
-  
-<li>提交节点(Submit nodes):用户通过HOD客户端在这些节点上申请集群,之后通过Hadoop客户端提交Hadoop作业。</li>
-  
-<li>计算节点(Compute nodes):利用资源管理器,HOD组件在这些节点上运行以供应Hadoop守护进程。之后,Hadoop作业在这些节点上运行。</li>
-
-</ul>
-<p>
-下面是对申请集群及在之上运行作业所需操作步骤的简要描述。
-</p>
-<ul>
-  
-<li>用户在提交节点上用HOD客户端分配所需数目节点的集群,在上面供应Hadoop。</li>
-  
-<li>HOD客户端利用资源管理器接口(在Torque中是qsub)提交一个被称为RingMaster的HOD进程作为一个资源管理器作业,申请理想数目的节点。这个作业被提交到资源管理器的中央服务器上(在Torque中叫pbs_server)。</li>
-  
-<li>在计算节点上,资源管理器的从(slave)守护程序(Torque中的pbs_moms)接受并处理中央服务器(Torque中的pbs_server)分配的作业。RingMaster进程在其中一个计算节点(Torque中的mother superior)上开始运行。</li>
-  
-<li>之后,Ringmaster通过资源管理器的另外一个接口(在Torque中是pbsdsh)在所有分配到的计算节点上运行第二个HOD组件HodRing,即分布式任务。</li>
-  
-<li>HodRing初始化之后会与RingMaster通信获取Hadoop指令,并遵照执行。一旦Hadoop的命令开始启动,它们会向RingMaster登记,提供关于守护进程的信息。</li>
-  
-<li>Hadoop实例所需的配置文件全部由HOD自己生成,有一些来自于用户在配置文件设置的选项。</li>
-  
-<li>HOD客户端保持和RingMaster的通信,找出JobTracker和HDFS守护进程的位置所在。</li>
-
-</ul>
-<p>之后的文档会讲述如何在一个物理集群的节点上安装HOD。</p>
-</div>
-
-
-<a name="N10056"></a><a name="%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"></a>
-<h2 class="h3">先决条件</h2>
-<div class="section">
-<p>要使用HOD,你的系统应包含下列的硬件和软件</p>
-<p>操作系统: HOD目前在RHEL4上测试通过。<br>
-节点:HOD至少需要3个由资源管理器配置的节点。<br>
-</p>
-<p>软件</p>
-<p>在使用HOD之前,以下组件必须被安装到所有节点上:</p>
-<ul>
- 
-<li>Torque:资源管理器</li>
- 
-<li>
-<a href="http://www.python.org">Python</a>:HOD要求Python 2.5.1</li>
-
-</ul>
-<p>下列组件是可选的,你可以安装以获取HOD更好的功能:</p>
-<ul>
- 
-<li>
-<a href="http://twistedmatrix.com/trac/">Twisted Python</a>:这个可以用来提升HOD的可扩展性。如果检测到这个模块已安装,HOD就用它,否则就使用默认的模块。</li>
-
- 
-<li>
-<a href="http://hadoop.apache.org/core/">Hadoop</a>:HOD能自动将Hadoop分发到集群的所有节点上。不过,如果Hadoop在所有节点上已经可用,HOD也可以使用已经安装好的Hadoop。HOD目前支持Hadoop 0.15和其后续版本。</li>
-
-</ul>
-<p>注释: HOD的配置需要以上这些组件的安装位置在集群所有节点上保持一致。如果在提交节点上的安装位置也相同,配置起来会更简单。</p>
-</div>
-
-
-<a name="N1008D"></a><a name="%E8%B5%84%E6%BA%90%E7%AE%A1%E7%90%86%E5%99%A8"></a>
-<h2 class="h3">资源管理器</h2>
-<div class="section">
-<p>目前,HOD使用Torque资源管理器来分配节点和提交作业。Torque是一个开源的资源管理器,来自于<a href="http://www.clusterresources.com">Cluster Resources</a>,是一个社区基于PBS项目努力的结晶。它提供对批处理作业和分散的计算节点(Compute nodes)的控制。你可以自由地从<a href="http://www.clusterresources.com/downloads/torque/">此处</a>下载Torque。</p>
-<p>所有torque相关的文档可以在<a href="http://www.clusterresources.com/pages/resources/documentation.php">这儿</a>的TORQUE Resource Manager一节找到。在<a href="http://www.clusterresources.com/wiki/doku.php?id=torque:torque_wiki">这里</a>可以看到wiki文档。如果想订阅TORQUE的邮件列表或查看问题存档,访问<a href="http://www.clusterresources.com/pages/resources/mailing-lists.php">这里</a>。</p>
-<p>使用带Torque的HOD:</p>
-<ul>
-
-<li>安装Torque组件:在一个节点上(head node)安装pbs_server,所有计算节点上安装pbs_mom,所有计算节点和提交节点上安装PBS客户端。至少做最基本的配置,使Torque系统跑起来,也就是,使pbs_server能知道该和哪些机器通话。查看<a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.2_basic_configuration">这里</a>可以了解基本配置。要了解高级配置,请查看<a href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.3_advanced_configuration">这里</a>。</li>
- 
-<li>在pbs_server上创建一个作业提交队列。队列的名字和HOD的配置参数resource-manager.queue相同。Hod客户端利用此队列提交RingMaster进程作为Torque作业。</li>
- 
-<li>在集群的所有节点上指定一个cluster name作为property。这可以用qmgr命令做到。比如:<span class="codefrag">qmgr -c "set node node properties=cluster-name"</span>。集群名字和HOD的配置参数hod.cluster是相同的。</li>
- 
-<li>确保作业可以提交到节点上去。这可以通过使用qsub命令做到。比如:<span class="codefrag">echo "sleep 30" | qsub -l nodes=3</span>
-</li>
-
-</ul>
-</div>
-
-
-<a name="N100CD"></a><a name="%E5%AE%89%E8%A3%85HOD"></a>
-<h2 class="h3">安装HOD</h2>
-<div class="section">
-<p>现在资源管理器已经安装好了,我们接着下载并安装HOD。</p>
-<ul>
- 
-<li>如果你想从Hadoop tar包中获取HOD,它在'contrib'下的'hod'的根目录下。</li>
- 
-<li>如果你从编译源码,可以在Hadoop根目录下的运行ant tar, 生成Hadoop tar包。然后从获取HOD,参照上面。</li>
- 
-<li>把这个目录下的所有文件分发到集群的所有节点上。注意文件拷贝的位置应在所有节点上保持一致。</li>
- 
-<li>注意,编译hadoop时会创建HOD,同时会正确地设置所有HOD必须的脚本文件的权限。</li>
-
-</ul>
-</div>
-
-
-<a name="N100E6"></a><a name="%E9%85%8D%E7%BD%AEHOD"></a>
-<h2 class="h3">配置HOD</h2>
-<div class="section">
-<p>安装HOD后你就可以配置它。为了运行HOD需要做的最小配置会在下面讲述,更多高级的配置会在HOD配置指南里面讲解。</p>
-<a name="N100EF"></a><a name="%E6%9C%80%E5%B0%8F%E9%85%8D%E7%BD%AE"></a>
-<h3 class="h4">最小配置</h3>
-<p>为运行HOD,以下的最小配置是必须要做的:</p>
-<ul>
- 
-<li>在你想要运行hod的节点上,编辑&lt;install dir&gt;/conf目录下的hodrc文件。这个文件包含了运行hod所必需的最少量的设置。</li>
- 
-<li>
-
-<p>为这个配置文件中的定义的变量指定适合你环境的值。注意,有些变量在文件中出现了不止一次。</p>
-
-  
-<ul>
-   
-<li>${JAVA_HOME}:Hadoop的Java的安装位置。Hadoop支持Sun JDK 1.5.x及以上版本。</li>
-   
-<li>${CLUSTER_NAME}:集群名称,由'node property'指定,在资源管理器配置中曾提到过。</li>
-   
-<li>${HADOOP_HOME}:Hadoop在计算节点和提交节点上的安装位置。</li>
-   
-<li>${RM_QUEUE}:在资源管理器配置中设置的作业提交队列。</li>
-   
-<li>${RM_HOME}:资源管理器在计算节点和提交节点的安装位置。</li>
-    
-</ul>
-
-</li>
-
-
-<li>
-
-<p>以下环境变量可能需要设置,取决于你的系统环境。在你运行HOD客户端的地方这些变量必须被定义,也必须在HOD配置文件中通过设定resource_manager.env-vars的值指定。多个变量可指定为用逗号分隔的key=value对组成的列表。</p>
-
-<ul>
-   
-<li>HOD_PYTHON_HOME:如果python安装在计算节点或提交节点的非默认位置,那么这个值必须设定为python的可执行文件的实际位置。</li>
-
-</ul>
-
-</li>
-
-</ul>
-<a name="N10123"></a><a name="%E9%AB%98%E7%BA%A7%E9%85%8D%E7%BD%AE"></a>
-<h3 class="h4">高级配置</h3>
-<p>你可以检查和修改其它配置选项来满足你的特定需要。关于HOD配置的更多信息,请参考<a href="hod_config_guide.html">配置指南</a>。</p>
-</div>
-  
-<a name="N10132"></a><a name="%E8%BF%90%E8%A1%8CHOD"></a>
-<h2 class="h3">运行HOD</h2>
-<div class="section">
-<p>当HOD配置好后,你就可以运行它了。更多信息请参考<a href="hod_user_guide.html">HOD用户指南</a>。</p>
-</div>
-
-  
-<a name="N10140"></a><a name="%E6%94%AF%E6%8C%81%E5%B7%A5%E5%85%B7%E5%92%8C%E5%AE%9E%E7%94%A8%E7%A8%8B%E5%BA%8F"></a>
-<h2 class="h3">支持工具和实用程序</h2>
-<div class="section">
-<p>此节描述一些可用于管理HOD部署的支持工具和应用程序。</p>
-<a name="N10149"></a><a name="logcondense.py+-+%E7%AE%A1%E7%90%86%E6%97%A5%E5%BF%97%E6%96%87%E4%BB%B6"></a>
-<h3 class="h4">logcondense.py - 管理日志文件</h3>
-<p>在<a href="hod_user_guide.html#%E6%94%B6%E9%9B%86%E5%92%8C%E6%9F%A5%E7%9C%8BHadoop%E6%97%A5%E5%BF%97">HOD用户指南</a>有提到,HOD可配置成将Hadoop日志上传到一个配置好的静态HDFS上。随着时间增加,日志数量会不断增长。logcondense.py可以帮助管理员清理上传到HDFS的日志文件。</p>
-<a name="N10156"></a><a name="%E8%BF%90%E8%A1%8Clogcondense.py"></a>
-<h4>运行logcondense.py</h4>
-<p>logcondense.py在hod_install_location/support文件夹下。你可以使用python去运行它,比如<em>python logcondense.py</em>,或者授以执行权限,直接运行<em>logcondense.py</em>。如果启用了权限,logcondense.py需要被有足够权限,能删除HDFS上上传目录下日志文件的用户运行。比如,在<a href="hod_config_guide.html#3.7+hodring%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">配置指南</a>中提及过,用户可以配置将日志放在HDFS上的其主目录下。在这种情况下,你需要具有超级用户权限,才能运行logcondense.py删除所有用户主目录下的日志文件。</p>
-<a name="N1016A"></a><a name="logcondense.py%E7%9A%84%E5%91%BD%E4%BB%A4%E8%A1%8C%E9%80%89%E9%A1%B9"></a>
-<h4>logcondense.py的命令行选项</h4>
-<p>logcondense.py支持以下命令行选项</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-            
-<tr>
-              
-<td colspan="1" rowspan="1">短选项</td>
-              <td colspan="1" rowspan="1">长选项</td>
-              <td colspan="1" rowspan="1">含义</td>
-              <td colspan="1" rowspan="1">例子</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">-p</td>
-              <td colspan="1" rowspan="1">--package</td>
-              <td colspan="1" rowspan="1">hadoop脚本的全路径。Hadoop的版本必须和运行HDFS的版本一致。</td>
-              <td colspan="1" rowspan="1">/usr/bin/hadoop</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">-d</td>
-              <td colspan="1" rowspan="1">--days</td>
-              <td colspan="1" rowspan="1">删除超过指定天数的日志文件</td>
-              <td colspan="1" rowspan="1">7</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">-c</td>
-              <td colspan="1" rowspan="1">--config</td>
-              <td colspan="1" rowspan="1">Hadoop配置目录的路径,hadoop-site.xml存在于此目录中。hadoop-site.xml中须指明待删除日志存放的HDFS的NameNode。</td>
-              <td colspan="1" rowspan="1">/home/foo/hadoop/conf</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">-l</td>
-              <td colspan="1" rowspan="1">--logs</td>
-              <td colspan="1" rowspan="1">一个HDFS路径,须和log-destination-uri指定的是同一个HDFS路径,不带hdfs:// URI串,这点在<a href="hod_config_guide.html#3.7+hodring%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">配置指南</a>中提到过。</td>
-              <td colspan="1" rowspan="1">/user</td>
-            
-</tr>
-            
-<tr>
-              
-<td colspan="1" rowspan="1">-n</td>
-              <td colspan="1" rowspan="1">--dynamicdfs</td>
-            <td colspan="1" rowspan="1">如果为true,logcondense.py除要删除Map/Reduce日志之外还需删除HDFS日志。否则,它只删除Map/Reduce日志,这也是不指定这个选项时的默认行为。这个选项对下面的情况非常有用:一个动态的HDFS由HOD供应,一个静态的HDFS用来收集日志文件 - 也许这是测试集群中一个非常普遍的使用场景。</td>
-              <td colspan="1" rowspan="1">false</td>
-            
-</tr>
-          
-</table>
-<p>比如,假如要删除所有7天之前的日志文件,hadoop-site.xml存放在~/hadoop-conf下,hadoop安装于~/hadoop-0.17.0,你可以这样:</p>
-<p>
-<em>python logcondense.py -p ~/hadoop-0.17.0/bin/hadoop -d 7 -c ~/hadoop-conf -l /user</em>
-</p>
-<a name="N1020D"></a><a name="checklimits.sh+-+%E7%9B%91%E8%A7%86%E8%B5%84%E6%BA%90%E9%99%90%E5%88%B6"></a>
-<h3 class="h4">checklimits.sh - 监视资源限制</h3>
-<p>checklimits.sh是一个针对Torque/Maui环境的HOD工具(<a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">Maui集群调度器</a> 是一个用于集群和超级计算机的开源作业调度器,来自clusterresourcces)。当新提交的作业违反或超过用户在Maui调度器里设置的限制时,checklimits.sh脚本更新torque的comment字段。它使用qstat在torque的job-list中做一次遍历确定作业是在队列中还是已完成,运行Maui工具checkjob检查每一个作业是否违反用户限制设定,之后运行torque的qalter工具更新作业的'comment'的属性。当前,它把那些违反限制的作业的comment的值更新为<em>User-limits exceeded. Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)</em>。之后,HOD根据这个注释内容做出相应处理。
-      </p>
-<a name="N1021D"></a><a name="%E8%BF%90%E8%A1%8Cchecklimits.sh"></a>
-<h4>运行checklimits.sh</h4>
-<p>checklimits.sh可以在hod_install_location/support目录下下找到。在具有得执行权限后,这个shell脚本可以直接通过<em>sh checklimits.sh </em>或者<em>./checklimits.sh</em>运行。这个工具运行的机器上应有Torque和Maui的二进制运行文件,并且这些文件要在这个shell脚本进程的路径中。为了更新不同用户作业的comment值,这个工具必须以torque的管理员权限运行。这个工具必须按照一定时间间隔重复运行,来保证更新job的约束条件,比如可以通过cron。请注意,这个脚本中用到的资源管理器和调度器命令运行代价可能会比价大,所以最好不要在没有sleeping的紧凑循环中运行。
-        </p>
-<a name="N1022E"></a><a name="verify-account+-+%E7%94%A8%E4%BA%8E%E6%A0%B8%E5%AE%9E%E7%94%A8%E6%88%B7%E6%8F%90%E4%BA%A4%E4%BD%9C%E4%B8%9A%E6%89%80%E4%BD%BF%E7%94%A8%E7%9A%84%E5%B8%90%E5%8F%B7%E7%9A%84%E8%84%9A%E6%9C%AC"></a>
-<h3 class="h4">verify-account - 用于核实用户提交作业所使用的帐号的脚本</h3>
-<p>生产系统一般使用帐号系统来对使用共享资源的用户收费。HOD支持一个叫<em>resource_manager.pbs-account</em>的参数,用户可以通过这个参数来指定提交作业时使用的帐号。核实这个帐户在帐号管理系统中的有效性是有必要的。脚本<em>hod-install-dir/bin/verify-account</em>提供了一种机制让用户插入自定义脚本来实现这个核实过程。</p>
-<a name="N1023D"></a><a name="%E5%9C%A8HOD%E4%B8%AD%E9%9B%86%E6%88%90verify-account"></a>
-<h4>在HOD中集成verify-account</h4>
-<p>在分配集群之前,HOD运行<em>verify-account</em>脚本,将<em>resource_manager.pbs-account</em>的值作为参数传递给用户自定义脚本来完成用户的确认。系统还可以通过这种方式来取代它本身的帐号系统。若该用户脚本中的返回值非0,就会导致HOD分配集群失败。并且在发生错误时,HOD还会将脚本中产生的错误信息打印出来。通过这种方式,任何描述性的错误信息都可以从用户脚本中返回给用户。
-      </p>
-<p>在HOD中自带的默认脚本是不做任何的用户核实,并返回0。</p>
-<p>如果HOD没有找到上面提到的verify-account脚本,HOD就会认为该用户核实的功能被关闭,然后继续自己以后的分配工作。</p>
-</div>
-
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 162
docs/cn/hod_admin_guide.pdf


+ 0 - 422
docs/cn/hod_config_guide.html

@@ -1,422 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title> 
-      Hadoop On Demand:配置指南
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">文档</div>
-<div id="menu_1.1" class="menuitemgroup">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hod_config_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1> 
-      Hadoop On Demand:配置指南
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#1.+%E7%AE%80%E4%BB%8B">1. 简介</a>
-</li>
-<li>
-<a href="#2.+%E6%AE%B5">2. 段</a>
-</li>
-<li>
-<a href="#3.+HOD%E9%85%8D%E7%BD%AE%E9%A1%B9">3. HOD配置项</a>
-<ul class="minitoc">
-<li>
-<a href="#3.1+%E4%B8%80%E8%88%AC%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.1 一般的配置项</a>
-</li>
-<li>
-<a href="#3.2+hod%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.2 hod的配置项</a>
-</li>
-<li>
-<a href="#3.3+resouce_manager%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.3 resouce_manager的配置项</a>
-</li>
-<li>
-<a href="#3.4+ringmaster%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.4 ringmaster的配置项</a>
-</li>
-<li>
-<a href="#3.5+gridservice-hdfs%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.5 gridservice-hdfs的配置项</a>
-</li>
-<li>
-<a href="#3.6+gridservice-mapred%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.6 gridservice-mapred的配置项</a>
-</li>
-<li>
-<a href="#3.7+hodring%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">3.7 hodring的配置项</a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-    
-<a name="N1000C"></a><a name="1.+%E7%AE%80%E4%BB%8B"></a>
-<h2 class="h3">1. 简介</h2>
-<div class="section">
-<p>
-      这个文档讲述了一些最重要和常用的Hadoop On Demand(HOD)的配置项。
-      这些配置项可通过两种方式指定:INI风格的配置文件,通过--section.option[=value]格式指定的HOD shell的命令行选项。如果两个地方都指定了同一个选项,命令行中的值覆盖配置文件中的值。
-	</p>
-<p>
-	你可以通过以下命令获得所有配置项的简要描述:
-      </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod --verbose-help</span></td>
-</tr>
-</table>
-</div>
-    
-<a name="N10021"></a><a name="2.+%E6%AE%B5"></a>
-<h2 class="h3">2. 段</h2>
-<div class="section">
-<p>HOD配置文件分成以下几个配置段:</p>
-<ul>
-        
-<li>  hod:                 HOD客户端的配置项</li>
-        
-<li>  resource_manager:    指定要使用的资源管理器的配置项,以及使用该资源管理器时需要的一些其他参数。</li>
-        
-<li>  ringmaster:          RingMaster进程的配置项</li>
-        
-<li>  hodring:             HodRing进程的配置项</li>
-        
-<li>  gridservice-mapred:  Map/Reduce守护进程的配置项</li>
-        
-<li>  gridservice-hdfs:    HDFS守护进程的配置项</li>
-      
-</ul>
-</div>
-     
-<a name="N10040"></a><a name="3.+HOD%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h2 class="h3">3. HOD配置项</h2>
-<div class="section">
-<p>
-      接下来的一节会先描述大部分HOD配置段中通用的一些配置项,再描述各配置段特有的配置项。
-      </p>
-<a name="N10049"></a><a name="3.1+%E4%B8%80%E8%88%AC%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.1 一般的配置项</h3>
-<p>某些配置项会在HOD配置中的多个段定义。在一个段中定义的配置项,会被该段所适用的所有进程使用。这些配置项意义相同,但在不同的段中可以有不同的取值。</p>
-<ul>
-          
-<li>temp-dir: HOD进程使用的临时目录。请确保运行hod的用户有权限在这个指定的目录下创建子目录。如果想在每次分配的时候都使用不同的临时目录,可以使用环境变量,资源管理器会让这些环境变量对HOD进程可用。例如,在Torque设置的时候,使--ringmaster.temp-dir=/tmp/hod- temp-dir.$PBS_JOBID会让ringmaster在每一次申请时使用不同的临时目录;Troque会在ringmaster启动前展开这个环境变量。
-          </li>
-          
-<li>debug:数值类型,取值范围是1-4。4会产生最多的log信息。</li>
-          
-<li>log-dir:日志文件的存放目录。缺省值是&lt;install-location&gt;/logs/。temp-dir变量的限制和注意事项在这里同样使用。
-          </li>
-          
-<li>xrs-port-range:端口范围,会在这之中挑选一个可用端口用于运行XML-RPC服务。</li>
-          
-<li>http-port-range:端口范围,会在这之中挑选一个可用端口用于运行HTTP服务。</li>
-          
-<li>java-home:给Hadoop使用的Java的位置。</li>
-          
-<li>syslog-address:syslog守护进程要绑定的地址。格式为host:port。如果配置了这个选项,HOD日志信息会被记录到这个位置的syslog。</li>
-        
-</ul>
-<a name="N1006B"></a><a name="3.2+hod%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.2 hod的配置项</h3>
-<ul>
-          
-<li>cluster:集群的描述性名称。对于Torque,这个值被指定为集群中所有节点的'Node property'。HOD使用这个值计算可用节点的个数。</li>
-          
-<li>client-params:逗号分割的hadoop配置参数列表,其中的每一项都是一个key-value对。在提交节点上会据此产生一个hadoop-site.xml,用于运行Map/Reduce作业。</li>
-          
-          
-<li>job-feasibility-attr: 正则表达式,用于指定是否和如何检查作业的可行性 - 资源管理器限制或调度限制。目前是通过torque作业的'comment'属性实现的,缺省情况下没有开启这个功能。设置了这个配置项后,HOD会使用它来确定哪些种类的限制是启用的,以及请求超出限制或者累积超出限制时是回收机群还是留在排队状态。torque comment属性可以被某个外部机制周期性地更新。比如,comment属性被hod/support目录下的<a href="hod_admin_guide.html#checklimits.sh+-+%E8%B5%84%E6%BA%90%E9%99%90%E5%88%B6%E7%9B%91%E8%A7%86%E5%99%A8">checklimits.sh</a>更新,这样设置job-feasibility-attr的值等于TORQUE_USER_LIMITS_COMMENT_FIELD, "User-limits exceeded. Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)"会使HOD产生相应的行为。</li>
-         
-</ul>
-<a name="N10082"></a><a name="3.3+resouce_manager%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.3 resouce_manager的配置项</h3>
-<ul>
-          
-<li>queue:资源管理器中配置的队列名,作业会被提交到这里。</li>
-          
-          
-<li>batch-home:个安装目录,其下的'bin'中有资源管理器的可执行文件。</li> 
-          
-<li>env-vars:逗号分隔的key-value对的列表,形式是key=value,它会被传递给运行在计算节点的作业。例如,如果ptyhon没有安装在常规位置,用户可以通过设置环境变量'HOD_PYTHON_HOME'指定python可执行文件的路径。之后,在计算节点运行的HOD的进程就可以使用这个变量了。</li>
-        
-</ul>
-<a name="N10095"></a><a name="3.4+ringmaster%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.4 ringmaster的配置项</h3>
-<ul>
-          
-<li>work-dirs:这是一个由逗号分隔的路径列表,这些路径将作为HOD产生和传递给Hadoop,用于存放DFS和Map/Reduce数据的目录的根目录。例如,这是DFS数据块存放的路径。一般情况下,有多少块磁盘就指定多少路径,以确保所有的磁盘都被利用到。temp-dir变量的限制和注意事项在这儿同样适用。</li>
-          
-<li>max-master-failures:hadoop主守护进启动前可以失败的次数,超出这个次数后,HOD会让这次集群分配失败。在HOD集群中,有时候由于某些问题,比如机器没安装java,没有安装Hadoop,或者Hadoop版本错误等,会存在一个或几个&ldquo;坏&rdquo;节点。当这个配置项被设为正整数时,只有当hadoop matser(JobTracker或者NameNode)在上述的坏节点上,由于上面提到的种种原因启动失败的次数超过设定的值时,RingMaster才会把错误返回给客户端。如果尝试启动的次数没有超过设定值,当下一个HodRing请求运行一个命令时,同一个hadoop master会指定给这个HodRing。这样,即使集群中存在一些坏的节点,HOD也会尽全力使这次分配成功。
-                       </li>
- 
-        
-</ul>
-<a name="N100A5"></a><a name="3.5+gridservice-hdfs%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.5 gridservice-hdfs的配置项</h3>
-<ul>
-          
-<li>external:如果被置为false,HOD必须在通过allocate命令分配的节点上自己创建HDFS集群。注意,在这种情况下,如果集群被回收,HDFS集群会停止,所有数据会丢失。如果被置为true,它会尝试链接外部的已配置的HDFS系统。通常,因为在作业运行之前作业的输入需要被放置在HDFS上,并且作业的输出需要持久保留,在生产环境中一个内部的HDFS集群意义不大。</li>
-          
-          
-<li>host:外部配置好的NameNode的主机名。</li>
-          
-          
-<li>fs_port:NameNode RPC服务绑定的端口。</li>
-          
-          
-<li>info_port:NameNode web UI服务绑定的端口。</li>
-          
-          
-<li>pkgs:安装目录,其下有bin/hadoop可执行文件。可用来使用集群上预先安装的Hadoop版本。</li>
-          
-          
-<li>server-params:一个逗号分割的hadoop配置参数列表,每一项为key-value对形式。这些将用于产生被NameNode和DataNode使用到的hadoop-site.xml文件。</li>
-          
-          
-<li>final-server-params:除会被标记为final外和上面相同。</li>
-        
-</ul>
-<a name="N100C4"></a><a name="3.6+gridservice-mapred%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.6 gridservice-mapred的配置项</h3>
-<ul>
-          
-<li>external:如果被置为false,HOD必须在通过allocate命令分配的节点上自己创建Map/Reduce集群。如果被置为true,它会尝试链接外部的已配置的Map/Reduce系统。</li>
-          
-<li>host:外部配置好的JobTracker的主机名。</li>
-          
-          
-<li>tracker_port:JobTracker RPC服务绑定的端口。</li>
-          
-          
-<li>info_port:JobTracker web UI服务绑定的端口。</li>
-          
-          
-<li>pkgs:安装目录,其下有bin/hadoop可执行文件。</li>
-          
-          
-<li>server-params:一个逗号分割的hadoop配置参数列表,每一项为key-value对形式。这些将用于产生被JobTracker和TaskTracker使用到的hadoop-site.xml文件。</li>
-          
-<li>final-server-params:除会被标记为final外和上面相同。</li>
-        
-</ul>
-<a name="N100E3"></a><a name="3.7+hodring%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9"></a>
-<h3 class="h4">3.7 hodring的配置项</h3>
-<ul>
-          
-<li>mapred-system-dir-root:DFS上的目录,HOD会在这个目录下创建子目录并把全路径作为参数'mapred.system.dir'的值传递给Hadoop守护进程。全路径的格式为value-of-this-option/userid/mapredsystem/cluster-id。注意,如果HDFS启用了权限,这里指定的路径下应允许所有用户创建子目录。设置此配置项的值为/user会使HOD使用用户的home目录来产生mapred.system.dir的值。</li>
-          
-<li>log-destination-uri:一个URL,能反映一个外部的静态的DFS或者集群节点上的本地文件系统上的路径。当集群被回收时,HOD会把Hadoop日志上传到这个路径。要指定DFS路径,使用'hdfs://path'格式。要指定一个集群节点上的本地文件系统路径,使用'file://path'格式。当HOD回收集群时,作为HOD的清除过程的一部分,hadoop日志会被删除。要做到持久储这些日志,你可以使用这个配置项。路径的格式会是values-of-this-option/userid/hod-logs/cluster-id。注意,应该保证所有的用户能在这里指定的目录下创建子目录。把这个值设为hdfs://user会使这些日志被转移到用户在DFS上的home目录下。</li>
-          
-<li>pkgs:安装目录,其下有bin/hadoop可执行文件。如果给log-destination-uri指定了一个HDFS URL,HOD上传日志时会用到这个配置项。注意,当用户使用了和外部静态HDFS不同版本的tarball时,这个配置项会派上用场。</li>
-                                      
-        
-</ul>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 140
docs/cn/hod_config_guide.pdf


+ 0 - 1251
docs/cn/hod_user_guide.html

@@ -1,1251 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>
-      Hadoop On Demand用户指南
-    </title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">文档</div>
-<div id="menu_1.1" class="menuitemgroup">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="hod_user_guide.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>
-      Hadoop On Demand用户指南
-    </h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%AE%80%E4%BB%8B">简介</a>
-</li>
-<li>
-<a href="#HOD%E4%BD%BF%E7%94%A8%E5%85%A5%E9%97%A8">HOD使用入门</a>
-<ul class="minitoc">
-<li>
-<a href="#%E4%B8%80%E4%B8%AA%E5%85%B8%E5%9E%8BHOD%E4%BC%9A%E8%AF%9D">一个典型HOD会话</a>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8HOD%E8%BF%90%E8%A1%8CHadoop%E8%84%9A%E6%9C%AC">使用HOD运行Hadoop脚本</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#HOD%E7%9A%84%E5%8A%9F%E8%83%BD">HOD的功能</a>
-<ul class="minitoc">
-<li>
-<a href="#%E4%BE%9B%E5%BA%94%E4%B8%8E%E7%AE%A1%E7%90%86Hadoop%E9%9B%86%E7%BE%A4">供应与管理Hadoop集群</a>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8tarball%E5%88%86%E5%8F%91Hadoop">使用tarball分发Hadoop</a>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8%E5%A4%96%E9%83%A8HDFS">使用外部HDFS</a>
-</li>
-<li>
-<a href="#%E9%85%8D%E7%BD%AEHadoop%E7%9A%84%E9%80%89%E9%A1%B9">配置Hadoop的选项</a>
-</li>
-<li>
-<a href="#%E6%9F%A5%E7%9C%8BHadoop%E7%9A%84Web-UI">查看Hadoop的Web-UI</a>
-</li>
-<li>
-<a href="#%E6%94%B6%E9%9B%86%E5%92%8C%E6%9F%A5%E7%9C%8BHadoop%E6%97%A5%E5%BF%97">收集和查看Hadoop日志</a>
-</li>
-<li>
-<a href="#%E9%97%B2%E7%BD%AE%E9%9B%86%E7%BE%A4%E7%9A%84%E8%87%AA%E5%8A%A8%E5%9B%9E%E6%94%B6">闲置集群的自动回收</a>
-</li>
-<li>
-<a href="#%E6%8C%87%E5%AE%9A%E9%A2%9D%E5%A4%96%E7%9A%84%E4%BD%9C%E4%B8%9A%E5%B1%9E%E6%80%A7">指定额外的作业属性</a>
-</li>
-<li>
-<a href="#%E6%8D%95%E8%8E%B7HOD%E5%9C%A8Torque%E4%B8%AD%E7%9A%84%E9%80%80%E5%87%BA%E7%A0%81">捕获HOD在Torque中的退出码</a>
-</li>
-<li>
-<a href="#%E5%91%BD%E4%BB%A4%E8%A1%8C">命令行</a>
-</li>
-<li>
-<a href="#HOD%E9%85%8D%E7%BD%AE%E9%80%89%E9%A1%B9"> HOD配置选项</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E6%95%85%E9%9A%9C%E6%8E%92%E9%99%A4">故障排除</a>
-<ul class="minitoc">
-<li>
-<a href="#%E5%88%86%E9%85%8D%E6%93%8D%E4%BD%9C%E6%97%B6">分配操作时hod挂起</a>
-</li>
-<li>
-<a href="#%E5%9B%9E%E6%94%B6%E6%93%8D%E4%BD%9C%E6%97%B6">回收操作时hod挂起</a>
-</li>
-<li>
-<a href="#%E5%A4%B1%E8%B4%A5%E6%97%B6%E7%9A%84%E9%94%99%E8%AF%AF%E4%BB%A3%E7%A0%81%E5%92%8C%E9%94%99%E8%AF%AF%E4%BF%A1%E6%81%AF">hod失败时的错误代码和错误信息</a>
-</li>
-<li>
-<a href="#Hadoop+DFSClient%E8%AD%A6%E5%91%8ANotReplicatedYetException%E4%BF%A1%E6%81%AF">Hadoop DFSClient警告NotReplicatedYetException信息</a>
-</li>
-<li>
-<a href="#%E6%88%90%E5%8A%9F%E5%88%86%E9%85%8D%E7%9A%84%E9%9B%86%E7%BE%A4%E4%B8%8A%E6%97%A0%E6%B3%95%E8%BF%90%E8%A1%8CHadoop%E4%BD%9C%E4%B8%9A">成功分配的集群上无法运行Hadoop作业</a>
-</li>
-<li>
-<a href="#%E6%88%91%E7%9A%84Hadoop%E4%BD%9C%E4%B8%9A%E8%A2%AB%E4%B8%AD%E6%AD%A2%E4%BA%86">我的Hadoop作业被中止了</a>
-</li>
-<li>
-<a href="#Hadoop%E4%BD%9C%E4%B8%9A%E5%A4%B1%E8%B4%A5%E5%B9%B6%E8%BF%94%E5%9B%9E%E6%B6%88%E6%81%AF%EF%BC%9A%E2%80%98Job+tracker+still+initializing%E2%80%99">Hadoop作业失败并返回消息:&lsquo;Job tracker still initializing&rsquo;</a>
-</li>
-<li>
-<a href="#Torque%E7%9A%84%E9%80%80%E5%87%BA%E4%BB%A3%E7%A0%81%E6%B2%A1%E6%9C%89%E5%8C%85%E5%90%ABHOD%E7%9A%84">Torque的退出代码没有包含HOD的</a>
-</li>
-<li>
-<a href="#Hadoop%E6%97%A5%E5%BF%97%E6%9C%AA%E8%A2%AB%E4%B8%8A%E4%BC%A0%E5%88%B0DFS">Hadoop日志未被上传到DFS</a>
-</li>
-<li>
-<a href="#%E5%AE%9A%E4%BD%8DRingmaster%E6%97%A5%E5%BF%97">定位Ringmaster日志</a>
-</li>
-<li>
-<a href="#%E5%AE%9A%E4%BD%8DHodring%E6%97%A5%E5%BF%97">定位Hodring日志</a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-  
-<a name="N1000C"></a><a name="%E7%AE%80%E4%BB%8B"></a>
-<h2 class="h3">简介</h2>
-<div class="section">
-<a name="Introduction" id="Introduction"></a>
-<p>Hadoop On Demand (HOD)是一个能在大规模物理集群上供应虚拟Hadoop集群的系统。它使用Torque资源管理器进行节点分配。在所分配的节点上,它能启动Hadoop Map/Reduce以及HDFS守护进程。它能自动为Hadoop守护进程及客户端生成合适的配置文件(Hadoop-site.xml)。HOD还能够将Hadoop分发到它分配的虚拟集群节点上。总之,HOD方便管理者和用户快速安装与使用Hadoop。它也是需要在同一物理集群上测试各自版本的Hadoop开发者和测试者的实用工具。</p>
-<p>HOD支持Hadoop 0.15及其后续版本。</p>
-<p>后面的文档包括一个快速入门指南能让你快速上手HOD,一个所有HOD特性的详细手册,命令行选项,一些已知问题和故障排除的信息。</p>
-</div>
-  
-<a name="N1001E"></a><a name="HOD%E4%BD%BF%E7%94%A8%E5%85%A5%E9%97%A8"></a>
-<h2 class="h3">HOD使用入门</h2>
-<div class="section">
-<a name="Getting_Started_Using_HOD_0_4" id="Getting_Started_Using_HOD_0_4"></a>
-<p>在这部分,我们将会逐步骤地介绍使用HOD涉及到的最基本的操作。在开始遵循这些步骤之前,我们假定HOD及其依赖的软硬件均已被正确安装和配置。这步通常由集群的系统管理员负责。</p>
-<p>HOD的用户界面是一个命令行工具,叫做<span class="codefrag">hod</span>。它被一个通常由系统管理员为用户设置好的配置文件所驱动。用户在使用<span class="codefrag">hod</span>的时候可以覆盖这个配置,文档的后面会由介绍。使用<span class="codefrag">hod</span>时有如下两种方式可以指定配置文件:</p>
-<ul>
-    
-<li>在命令行中指定,使用 -c 选项。例如<span class="codefrag">hod &lt;operation&gt; &lt;required-args&gt; -c path-to-the-configuration-file [ohter-options]</span>
-</li>
-    
-<li>在运行<span class="codefrag">hod</span>的地方设置环境变量<em>HOD_CONF_DIR</em>。这个变量应指向指向一个本地目录,其中有名为<em>hodrc</em>的文件。这与Hadoop中的<em>HADOOP_CONF_DIR</em>与<em>hadoop-site.xml</em>文件是类似的。如果命令行中未指定配置文件,<span class="codefrag">hod</span>会查找<em>HOD_CONF_DIR</em>环境变量指定目录下的<em>hodrc</em>文件。</li>
-    
-</ul>
-<p>下面的例子中,我们将不会明确指出这个配置选项,假定其已正确指定。</p>
-<a name="N1005B"></a><a name="%E4%B8%80%E4%B8%AA%E5%85%B8%E5%9E%8BHOD%E4%BC%9A%E8%AF%9D"></a>
-<h3 class="h4">一个典型HOD会话</h3>
-<a name="HOD_Session" id="HOD_Session"></a>
-<p>一个典型HOD会话至少包括三个步骤:分配,执行Hadoop作业,回收。为此,执行如下步骤。</p>
-<p>
-<strong>创建一个集群目录</strong>
-</p>
-<a name="Create_a_Cluster_Directory" id="Create_a_Cluster_Directory"></a>
-<p>
-<em>集群目录</em>是本地文件系统上的一个目录,<span class="codefrag">hod</span>会为它分配的集群产生对应的Hadoop配置<em>hadoop-site.xml</em>,放在这个目录下。这个目录可以按下文所述方式传递给<span class="codefrag">hod</span>操作。如果这个目录不存在,HOD会自动创建这个目录。一但分配好了集群,用户可通过Hadoop --config选项指定集群目录,在之上运行Hadoop作业。</p>
-<p>
-<strong><em>allocate</em>操作</strong>
-</p>
-<a name="Operation_allocate" id="Operation_allocate"></a>
-<p>
-<em>allocate</em>操作用来分配一组节点并在之上安装和提供Hadoop。它的语法如下。注意它要求指定参数集群目录(-d, --hod.clusterdir)和节点个数(-n, --hod.nodecount):</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes [OPTIONS]</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>如果命令成功执行,<span class="codefrag">cluster_dir/hadoop-site.xml</span>会被生成,文件中包含了分配出的集群的信息。它也会打印出关于Hadoop的web UI的信息。</p>
-<p>试运行这个命令会产生如下输出。注意在这个例子中集群目录是<span class="codefrag">~/hod-clusters/test</span>,我们要分配5个节点:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-    
-<tr>
-      
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d ~/hod-clusters/test -n 5</span>
-<br>
-      
-<span class="codefrag">INFO - HDFS UI on http://foo1.bar.com:53422</span>
-<br>
-      
-<span class="codefrag">INFO - Mapred UI on http://foo2.bar.com:55380</span>
-<br>
-</td>
-      
-</tr>
-   
-</table>
-<p>
-<strong>在分配的集群上执行Hadoop作业</strong>
-</p>
-<a name="Running_Hadoop_jobs_using_the_al" id="Running_Hadoop_jobs_using_the_al"></a>
-<p>现在,可以用一般的方式在分配的集群上执行Hadoop作业了。这是假定像<em>JAVA_HOME</em>,指向Hadoop安装的路径已被正确地设置了:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hadoop --config cluster_dir hadoop_command hadoop_command_args</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>或者</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ export HADOOP_CONF_DIR=cluster_dir</span> 
-<br>
-              
-<span class="codefrag">$ hadoop hadoop_command hadoop_command_args</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>继续我们的例子,下面的命令会在分配的集群上运行wordcount的例子:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hadoop --config ~/hod-clusters/test jar /path/to/hadoop/hadoop-examples.jar wordcount /path/to/input /path/to/output</span></td>
-</tr>
-</table>
-<p>或者</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-    
-<td colspan="1" rowspan="1"><span class="codefrag">$ export HADOOP_CONF_DIR=~/hod-clusters/test</span>
-<br>
-    
-<span class="codefrag">$ hadoop jar /path/to/hadoop/hadoop-examples.jar wordcount /path/to/input /path/to/output</span></td>
-    
-</tr>
-  
-</table>
-<p>
-<strong> <em>deallocate</em>操作</strong>
-</p>
-<a name="Operation_deallocate" id="Operation_deallocate"></a>
-<p>
-<em>deallocate</em>操作用来回收分配到的集群。当完成集群使用之后,必须执行回收操作使这些节点可以为其他用户所用。<em>deallocate</em>操作的语法如下。注意它需要集群目录(-d, --hod.clusterdir)作为参数:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod deallocate -d cluster_dir</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>继续我们的例子,如下命令会回收集群:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod deallocate -d ~/hod-clusters/test</span></td>
-</tr>
-</table>
-<p>如你所见,HOD允许用户分配一个集群,随意的使用它来运行Hadoop作业。例如,通过从多个shell中启动使用同一个配置的hadoop,用户可以做到在同一个集群上并发运行多个作业。</p>
-<a name="N10128"></a><a name="%E4%BD%BF%E7%94%A8HOD%E8%BF%90%E8%A1%8CHadoop%E8%84%9A%E6%9C%AC"></a>
-<h3 class="h4">使用HOD运行Hadoop脚本</h3>
-<a name="HOD_Script_Mode" id="HOD_Script_Mode"></a>
-<p>HOD的<em>script操作</em>能将集群的分配,使用和回收组织在一起。这对那些想运行Hadoop作业脚本,期望HOD能在脚本结束后自动完成清理操作的用户特别管用。用<span class="codefrag">hod</span>执行Hadoop脚本,需要这么做:</p>
-<p>
-<strong>创建脚本文件</strong>
-</p>
-<a name="Create_a_script_file" id="Create_a_script_file"></a>
-<p>这是一个普通的shell脚本,通常里面会包含hadoop命令,如:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hadoop jar jar_file options</span></td>
-  
-</tr>
-</table>
-<p>当然,用户可以向脚本中添加任何有效的命令。HOD会在执行这个脚本时自动地设置<em>HADOOP_CONF_DIR</em>指向分配的集群。用户不必对此担心。不过,像分配操作时一样,用户需要指定一个集群目录。</p>
-<p>
-<strong>运行脚本</strong>
-</p>
-<a name="Running_the_script" id="Running_the_script"></a>
-<p>
-<em>脚本操作</em>的语法如下。注意它需要集群目录(-d, --hod.clusterdir),节点个数(-n, --hod.nodecount)以及脚本文件(-s, --hod.script)作为参数:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod script -d cluster_directory -n number_of_nodes -s script_file</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>注意一但脚本执行完毕,HOD就会回收集群,这意味着脚本必须要做到等hadoop作业完成后脚本才结束。用户写脚本时必须注意这点。</p>
-</div>
-  
-<a name="N1016C"></a><a name="HOD%E7%9A%84%E5%8A%9F%E8%83%BD"></a>
-<h2 class="h3">HOD的功能</h2>
-<div class="section">
-<a name="HOD_0_4_Features" id="HOD_0_4_Features"></a><a name="N10174"></a><a name="%E4%BE%9B%E5%BA%94%E4%B8%8E%E7%AE%A1%E7%90%86Hadoop%E9%9B%86%E7%BE%A4"></a>
-<h3 class="h4">供应与管理Hadoop集群</h3>
-<a name="Provisioning_and_Managing_Hadoop" id="Provisioning_and_Managing_Hadoop"></a>
-<p>HOD主要功能是供应Hadoop的Map/Reduce和HDFS集群。这些在见入门一节已经做过描述。 此外,要是还有节点可用,并且组织上也批准,一个用户可以在同一时间内使用HOD分配多个Map/Reduce集群。对于分配到的不同集群,用户需要为上面提到的<span class="codefrag">cluster_dir</span>参数指定不同的路径。HOD提供<em>list</em>和<em>info</em>操作可以管理多个集群。</p>
-<p>
-<strong><em>list</em>操作</strong>
-</p>
-<a name="Operation_list" id="Operation_list"></a>
-<p>list操作能列举到目前为止用户所创建的所有集群。存放hadoop-site.xml的集群目录,与JobTracker和/或HDFS的连接及状态也会被显示出来。list操作的使用语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod list</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>
-<strong><em>info</em>操作</strong>
-</p>
-<a name="Operation_info" id="Operation_info"></a>
-<p>info操作会显示指定集群相关的信息。这些信息包括Torque作业id,HOD Ringmaster进程,Hadoop的JobTracker和NameNode守护进程等重要守护进程的位置。info操作的语法如下。注意它需要集群目录(-d, --hod.clusterdir)作为参数:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-      
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod info -d cluster_dir</span></td>
-        
-</tr>
-      
-    
-</table>
-<p>
-<span class="codefrag">cluster_dir</span>应为前面<em>allocate</em>操作中指定的有效集群目录。</p>
-<a name="N101BE"></a><a name="%E4%BD%BF%E7%94%A8tarball%E5%88%86%E5%8F%91Hadoop"></a>
-<h3 class="h4">使用tarball分发Hadoop</h3>
-<a name="Using_a_tarball_to_distribute_Ha" id="Using_a_tarball_to_distribute_Ha"></a>
-<p>供应Hadoop时,HOD可以使用集群节点上已经安装好的Hadoop,也可以将hadoop的tarball作为供应操作的一部分在节点上进行分发和安装。如果使用tarball选项,就不必非得使用预装的Hadoop了,也不要求集群节点上必须有一个预装的版本。这对开发/QE环境下在一个共享集群上测试不同版本hadoop的开发者尤其有用。</p>
-<p>要使用预装的Hadoop,你必须在hodrc中的<span class="codefrag">gridservice-hdfs</span>部分和<span class="codefrag">gridservice-mapred</span>部分指定<span class="codefrag">pkgs</span>选项。它必须指向集群中所有节点上Hadoop的安装路径。</p>
-<p>指定Tarball的语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -t hadoop_tarball_location</span></td>
-        
-</tr>
-    
-</table>
-<p>例如,下面的命令根据tarball<span class="codefrag">~/share/hadoop.tar.gz</span>分配Hadoop:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d ~/hadoop-cluster -n 10 -t ~/share/hadoop.tar.gz</span></td>
-</tr>
-</table>
-<p>类似地,使用hod脚本的语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod script -d cluster_directory -s scritp_file -n number_of_nodes -t hadoop_tarball_location</span></td>
-        
-</tr>
-    
-</table>
-<p>上面语法中指定的hadoop_tarball_location应指向从所有计算节点都可以访问的共享文件系统的路径。当前,HOD只支持挂载的NFS。</p>
-<p>
-<em>注意:</em>
-</p>
-<ul>
-    
-<li>为了获得更好分发性能,建议Hadoop tarball只包含库与二进制文件,不包含源代码或文档。</li>
-    
-<li>当你希望在用tarball方式分配的集群上执行作业,你必须使用兼容的Hadoop版本提交你的作业。最好的方式是解压,使用Tarball中的版本。</li>
-    
-<li>你需要确保在tar分发包的conf目录下没有Hadoop配置文件hadoop-env.sh和hadoop-site.xml。如果这些文件存在并包含错误的值,集群分配可能会失败。
-</li>
-  
-</ul>
-<a name="N10214"></a><a name="%E4%BD%BF%E7%94%A8%E5%A4%96%E9%83%A8HDFS"></a>
-<h3 class="h4">使用外部HDFS</h3>
-<a name="Using_an_external_HDFS" id="Using_an_external_HDFS"></a>
-<p>在典型的由HOD提供的Hadoop集群中,HDFS已经被静态地(未使用HOD)设置好。这能使数据在HOD提供的集群被回收后还可以持久保存在HDFS中。为使用静态配置的HDFS,你的hodrc必须指向一个外部HDFS。具体就是,在hodrc的<span class="codefrag">gridservice-hdfs</span>部分将下面选项设置为正确的值:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1">external = true</td>
-</tr>
-<tr>
-<td colspan="1" rowspan="1">host = HDFS NameNode主机名</td>
-</tr>
-<tr>
-<td colspan="1" rowspan="1">fs_port = HDFS NameNode端口</td>
-</tr>
-<tr>
-<td colspan="1" rowspan="1">info_port = HDFS NameNode web UI的端口</td>
-</tr>
-</table>
-<p>
-<em>注意:</em>你也可以从命令行开启这个选项。即,你这样去使用一个静态HDFS:<br>
-    
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes --gridservice-hdfs.external</span></td>
-        
-</tr>
-    
-</table>
-<p>如果需要,HOD即可以供应HDFS集群也可以供应Map/Reduce的集群HOD。这需要设置hodrc中的<span class="codefrag">gridservice-hdfs</span>部分的下列选项:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1">external = false</td>
-</tr>
-</table>
-<a name="N10258"></a><a name="%E9%85%8D%E7%BD%AEHadoop%E7%9A%84%E9%80%89%E9%A1%B9"></a>
-<h3 class="h4">配置Hadoop的选项</h3>
-<a name="Options_for_Configuring_Hadoop" id="Options_for_Configuring_Hadoop"></a>
-<p>HOD提供一个非常方便的机制能配置它提供的Hadoop守护进程和它在客户端生成的hadoop-site.xml。通过在HOD配置文件中指定配置参数,或在分配集群时在命令行指定都可做到这点。</p>
-<p>
-<strong>配置Hadoop守护进程</strong>
-</p>
-<a name="Configuring_Hadoop_Daemons" id="Configuring_Hadoop_Daemons"></a>
-<p>要配置Hadoop守护进程,你可以这么做:</p>
-<p>对于Map/Reduce,指定<span class="codefrag">gridservice-mapred</span>部分的<span class="codefrag">server-params</span>项的指为一个以逗号分割的key-value对列表。同配置动态HDFS集群一样,设置<span class="codefrag">gridservice-hdfs</span>部分的<span class="codefrag">server-params</span>项。如果这些参数应被标记成<em>final</em>,将这些参数包含到相应部分的<span class="codefrag">final-server-params</span>项中。</p>
-<p>例如:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">server-params = mapred.reduce.parallel.copies=20,io.sort.factor=100,io.sort.mb=128,io.file.buffer.size=131072</span></td>
-</tr>
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">final-server-params = mapred.child.java.opts=-Xmx512m,dfs.block.size=134217728,fs.inmemory.size.mb=128</span></td>
-  
-</tr>
-</table>
-<p>要从命令行指定选项,你可以用如下语法:</p>
-<p>配置Map/Reduce守护进程:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -Mmapred.reduce.parallel.copies=20 -Mio.sort.factor=100</span></td>
-        
-</tr>
-    
-</table>
-<p>在上述例子中,<em>mapred.reduce.parallel.copies</em>参数和<em>io.sort.factor</em>参数将会被添加到<span class="codefrag">server-params</span>中,如果已经在<span class="codefrag">server-params</span>中存在,则它们会被覆盖。要将这些参数指定成<em>final</em>类型,你可以:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -Fmapred.reduce.parallel.copies=20 -Fio.sort.factor=100</span></td>
-        
-</tr>
-    
-</table>
-<p>不过,应注意final参数无法被命令行改写的,只有在未指定的情形才能追加。</p>
-<p>配置动态供应的HDFS守护进程的选项与此相似。用-H替换-M以,用-S替换-F即可。</p>
-<p>
-<strong>配置Hadoop的作业提交(客户端)程序</strong>
-</p>
-<a name="Configuring_Hadoop_Job_Submissio" id="Configuring_Hadoop_Job_Submissio"></a>
-<p>如上所述,当allocate操作成功后,<span class="codefrag">cluster_dir/hadoop-site.xml</span>将会生成,其中会包含分配的集群的JobTracker和NameNode的信息。这个配置用于向集群提交作业。HOD提供选项可将其它的hadoop配置参数添加到该文件,其语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -Cmapred.userlog.limit.kb=200 -Cmapred.child.java.opts=-Xmx512m</span></td>
-        
-</tr>
-    
-</table>
-<p>上例中,<em>mapred.userlog.limit.kb</em>和<em>mapred.child.java.opts</em>会被添加到hod产生的hadoop-site.xml中。</p>
-<a name="N102EA"></a><a name="%E6%9F%A5%E7%9C%8BHadoop%E7%9A%84Web-UI"></a>
-<h3 class="h4">查看Hadoop的Web-UI</h3>
-<a name="Viewing_Hadoop_Web_UIs" id="Viewing_Hadoop_Web_UIs"></a>
-<p>HOD的allocate操作会打印出JobTracker和NameNode的Web UI的URL。例如:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d ~/hadoop-cluster -n 10 -c ~/hod-conf-dir/hodrc</span>
-<br>
-    
-<span class="codefrag">INFO - HDFS UI on http://host242.foo.com:55391</span>
-<br>
-    
-<span class="codefrag">INFO - Mapred UI on http://host521.foo.com:54874</span>
-    </td>
-</tr>
-</table>
-<p>上面提到的<em>info</em>操作可以给你同样的信息。</p>
-<a name="N1030C"></a><a name="%E6%94%B6%E9%9B%86%E5%92%8C%E6%9F%A5%E7%9C%8BHadoop%E6%97%A5%E5%BF%97"></a>
-<h3 class="h4">收集和查看Hadoop日志</h3>
-<a name="Collecting_and_Viewing_Hadoop_Lo" id="Collecting_and_Viewing_Hadoop_Lo"></a>
-<p>要获取在某些分配节点上运行的守护进程的Hadoop日志:</p>
-<ul>
-    
-<li>登录感兴趣的节点。如果你想查看JobTracker或者NameNode的日志,<em>list</em>和<em>info</em>操作能告诉你这些进程在那些节点上运行。</li>
-    
-<li>获取感兴趣的守护进程的进程信息(例如,<span class="codefrag">ps ux | grep TaskTracker</span>)</li>
-    
-<li>在这些进程信息中,查找变量<span class="codefrag">-Dhadoop.log.dir</span>的值。通常是hod配置文件里<span class="codefrag">hodring.temp-dir</span>目录的一个子目录 。</li>
-    
-<li>切换到<span class="codefrag">hadoop.log.dir</span>目录以查看守护进程日志和用户日志。</li>
-  
-</ul>
-<p>HOD也提供了一个机制,能让你在集群回收后将日志收集存放到文件系统,或者一个在外部配置的HDFS中。这样的话,在作业完成,节点回收后你还可以看这些日志。要做到这点,像下面一样为log-destination-uri指定一个URI:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">log-destination-uri= hdfs://host123:45678/user/hod/logs</span>或者</td>
-</tr>
-    
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">log-destination-uri= file://path/to/store/log/files</span></td>
-</tr>
-    
-</table>
-<p>在上面指定的的根目录中,HOD会创建路径user_name/torque_jobid,把作业涉及到的每个节点上的日志文件gzip压缩,存放在里面。</p>
-<p>注意要在HDFS上存储这些文件,你得将<span class="codefrag">hodring.pkgs</span>项配置为和刚才提到的HDFS兼容的版本。否则,HOD会尝试使用它供应Hadoop集群时用到的Hadoop版本。</p>
-<a name="N10355"></a><a name="%E9%97%B2%E7%BD%AE%E9%9B%86%E7%BE%A4%E7%9A%84%E8%87%AA%E5%8A%A8%E5%9B%9E%E6%94%B6"></a>
-<h3 class="h4">闲置集群的自动回收</h3>
-<a name="Auto_deallocation_of_Idle_Cluste" id="Auto_deallocation_of_Idle_Cluste"></a>
-<p>HOD会自动回收在一段时间内没有运行Hadoop作业的集群。每次的HOD分配会带有一个监控设施不停地检查Hadoop作业的执行。如果侦测到在一定时间内没Hadoop作业在执行,它就回收这个集群,释放那些未被有效利用的节点。</p>
-<p>
-<em>注意:</em>当集群被回收时,<em>集群目录</em>没有被自动清空。用户须通过一个正式的<em>deallcocate</em>操作清理它。</p>
-<a name="N1036B"></a><a name="%E6%8C%87%E5%AE%9A%E9%A2%9D%E5%A4%96%E7%9A%84%E4%BD%9C%E4%B8%9A%E5%B1%9E%E6%80%A7"></a>
-<h3 class="h4">指定额外的作业属性</h3>
-<a name="Specifying_Additional_Job_Attrib" id="Specifying_Additional_Job_Attrib"></a>
-<p>HOD允许用户为一个Torque作业指定一个时钟时间和一个名称(或者标题)。 </p>
-<p>时钟时间是对Torque作业有效时间的一个估计。这个时间过期后,Torque将自动删除这个作业,释放其节点。指定这个时钟时间还能帮助作业调度程序更好的安排作业,提高对集群资源的使用率。</p>
-<p>指定时钟时间的语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -l time_in_seconds</span></td>
-        
-</tr>
-    
-</table>
-<p>Torque作业的名称或标题能给用户以友好的作业标识。每次展示Torque作业的属性的时候,这个字符串就会出现,包括<span class="codefrag">qstat</span>命令。</p>
-<p>指定名称或标题的语法如下:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-          
-<td colspan="1" rowspan="1"><span class="codefrag">$ hod allocate -d cluster_dir -n number_of_nodes -N name_of_job</span></td>
-        
-</tr>
-    
-</table>
-<p>
-<em>注意:</em>由于底层Torque资源管理器的限制,不以字母开头或者包含空格的名字将导致作业失败。失败信息会表明问题存在于指定的作业名称中。</p>
-<a name="N103A2"></a><a name="%E6%8D%95%E8%8E%B7HOD%E5%9C%A8Torque%E4%B8%AD%E7%9A%84%E9%80%80%E5%87%BA%E7%A0%81"></a>
-<h3 class="h4">捕获HOD在Torque中的退出码</h3>
-<a name="Capturing_HOD_exit_codes_in_Torq" id="Capturing_HOD_exit_codes_in_Torq"></a>
-<p>HOD退出码出现在Torque的exit_status字段中。这有助于使用者和系统管理员区分成功的HOD执行和失败的HOD执行。如果分配成功且所有Hadoop作业在所分配的集群上正确的执行,退出码为0。如果分配失败或者部分hadoop作业在分配集群上运行失败,退出码非0。下表列出了可能出现的退出码。<em>注意:只有所使用的Hadoop版本是0.16或以上时,Hadoop作业状态才可以被捕获。</em>
-</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-    
-      
-<tr>
-        
-<td colspan="1" rowspan="1">退出码</td>
-        <td colspan="1" rowspan="1">含义</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 6 </td>
-        <td colspan="1" rowspan="1">Ringmaster故障</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 7 </td>
-        <td colspan="1" rowspan="1"> DFS故障</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 8 </td>
-        <td colspan="1" rowspan="1"> Job tracker故障</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 10 </td>
-        <td colspan="1" rowspan="1"> 集群死亡</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 12 </td>
-        <td colspan="1" rowspan="1"> 集群已分配 </td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 13 </td>
-        <td colspan="1" rowspan="1"> HDFS死亡</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 14 </td>
-        <td colspan="1" rowspan="1"> Mapred死亡</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 16 </td>
-        <td colspan="1" rowspan="1">集群中所有的Map/Reduce作业失败。查看hadoop日志了解更多细节。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 17 </td>
-        <td colspan="1" rowspan="1">集群中部分的Map/Reduce作业失败。查看hadoop日志了解更多细节。</td>
-      
-</tr>
-  
-</table>
-<a name="N10434"></a><a name="%E5%91%BD%E4%BB%A4%E8%A1%8C"></a>
-<h3 class="h4">命令行</h3>
-<a name="Command_Line" id="Command_Line"></a>
-<p>HOD命令行的通用的语法如下:<br>
-      
-<em>hod &lt;operation&gt; [ARGS] [OPTIONS]<br>
-</em>
-      允许的操作有&lsquo;allocate&rsquo;,&lsquo;deallocate&rsquo;,&lsquo;info&rsquo;,&lsquo;list&rsquo;,&lsquo;script&rsquo;以及&lsquo;help&rsquo;。要获取某特定操作的帮助你可以执行:<span class="codefrag">hod help &lt;operation&gt;</span>。要查看可能的操作你可以执行<span class="codefrag">hod help options</span>。</p>
-<p>
-<em>allocate</em>
-<br>
-      
-<em>用法:hod allocate -d cluster_dir -n number_of_nodes [OPTIONS]</em>
-<br>
-      分配一个指定节点数目的集群,把分配信息存放在cluster_dir方便后续<span class="codefrag">hadoop</span>命令使用。注意<span class="codefrag">cluster_dir</span>必须在运行该命令前已经存在。</p>
-<p>
-<em>list</em>
-<br>
-      
-<em>用法:hod list [OPTIONS]</em>
-<br>
-      列举出用户分配的所有集群。提供的信息包括集群对应的的Torque作业标识,存储分配信息的集群目录,Map/Reduce守护进程是否存活。</p>
-<p>
-<em>info</em>
-<br>
-      
-<em>用法:hod info -d cluster_dir [OPTIONS]</em>
-<br>
-      列举集群分配信息存放于某指定集群目录的集群信息。</p>
-<p>
-<em>deallocate</em>
-<br>
-      
-<em>用法:hod deallocate -d cluster_dir [OPTIONS]</em>
-<br>
-      回收集群分配信息存放于某指定集群目录的集群。</p>
-<p>
-<em>script</em>
-<br>
-      
-<em>用法:hod script -s script_file -d cluster_directory -n number_of_node [OPTIONS]</em>
-<br>
-      用HOD<em>script</em>操作执行一个hadoop脚本。在给定数目的节点上提供Hadoop,在提交的节点执行这个脚本,并在脚本执行结束后回收集群。</p>
-<p>
-<em>help</em>
-<br>
-      
-<em>用法:hod help [operation | 'options']</em>
-<br>
-      未指定参数时,<span class="codefrag">hod help</span>给出用法以及基本选项,等同于<span class="codefrag">hod --help</span> (见下文)。当指定参数&lsquo;options&rsquo;时,显示hod的基本选项。当指定operation时,它会显示出该特定operation的用法和相应的描述。例如,希望了解allocate操作,你可以执行<span class="codefrag">hod help allocate</span>
-</p>
-<p>除上面的操作外,HOD还能接受下列命令行选项。</p>
-<p>
-<em>--help</em>
-<br>
-      打印出用法和基本选项的帮助信息。</p>
-<p>
-<em>--verbose-help</em>
-<br>
-      hodrc文件中所有的配置项均可通过命令行传递,使用语法<span class="codefrag">--section_name.option_name[=vlaue]</span>。这种方式下,命令行传递的参数会覆盖hodrc中的配置项。verbose-help命令会列出hodrc文件中全部可用项。这也是一个了解配置选项含义的好方法。</p>
-<p>
-<a href="#Options_Configuring_HOD">下一部分</a>有多数重要的hod配置项的描述。对于基本选项,你可以通过<span class="codefrag">hod help options</span>了解,对于所有的hod配置中的可能选项,你可以参看<span class="codefrag">hod --verbose-help</span>的输出。了解所有选项的描述,请参看<a href="hod_config_guide.html">配置指南</a>。</p>
-<a name="N104BB"></a><a name="HOD%E9%85%8D%E7%BD%AE%E9%80%89%E9%A1%B9"></a>
-<h3 class="h4"> HOD配置选项</h3>
-<a name="Options_Configuring_HOD" id="Options_Configuring_HOD"></a>
-<p> 如上所述,HOD的配置是通过系统管理员设置配置文件完成。这是一个INI风格的配置文件,文件分成多个段,每个段包含一些配置项。这些段分别和HOD的进程:client,ringmaster,hodring,mapreduce或hdfs相关。每一个配置项有选项名和值构成。</p>
-<p>有两种方式可让用户覆盖默认配置文件里的设定:</p>
-<ul>
-    
-<li>在每条命令前,用户可以向HOD提供自己的配置文件,使用<span class="codefrag">-c</span>选项。</li>
-    
-<li>用户可以在命令行指定HOD的配置选项覆盖正使用的配置文件中提供的值。</li>
-  
-</ul>
-<p>这一节介绍一些最常用的配置项。为了指定方便,这些常用选项通常会有一个<em>短</em>选项名。所有其它选项可能用随后介绍的<em>长</em>选项指定。</p>
-<p>
-<em>-c config_file</em>
-<br>
-  提供要使用的配置文件。可与其他任何的HOD选项一起使用。此外,可定义<span class="codefrag">HOD_CONF_DIR</span>环境变量为一个包含<span class="codefrag">hodrc</span>文件的目录,避免每条HOD命令都要指定配置文件。</p>
-<p>
-<em>-d cluster_dir</em>
-<br>
-  大多数hod操作都要求这个选项。如<a href="#Create_a_Cluster_Directory">此处</a>描述的,<em>集群目录</em>是在本地文件系统上的一个目录,<span class="codefrag">hod</span>将它分配集群的相应Hadoop配置产生在这个目录里,即<em>hadoop-site.xml</em>。使用-d或者--hod.clusterdir将这个参数传递给<span class="codefrag">hod</span>操作,如果目录不存在,HOD会自动创建该目录。集群分配好后,用户可在这个集群上,通过指定hadoop--config为集群目录来执行Hadoop作业。</p>
-<p>
-<em>-n number_of_nodes</em>
-<br>
-  hod allocation操作和script操作要求这个选项。表示要分配的节点数。</p>
-<p>
-<em>-s script-file</em>
-<br>
-  脚本操作时需要,用于指定要执行的脚本文件。</p>
-<p>
-<em>-b 1|2|3|4</em>
-<br>
-  启用给定的调试级别。能与其他HOD选项一起使用。级别4最为详尽。</p>
-<p>
-<em>-t hadoop_tarball</em>
-<br>
-  从指定tar.gz文件提供Hadoop分发。此选项值只适用于<em>allocate</em>操作。为获得更好的分发性能,强烈推荐创建Hadoop tarball<em>前</em>删除其中的源代码或文档。</p>
-<p>
-<em>-N job-name</em>
-<br>
-  内部使用的资源管理作业名。比如,对于Torque作为资源管理器的情况,会被解释成<span class="codefrag">qsub -N</span>选项,使用<span class="codefrag">qstat</span>命令时可以看到这个作业名。</p>
-<p>
-<em>-l wall-clock-time</em>
-<br>
-  用户希望在分配的集群作业的时间总量。它被传递给HOD底层的资源管理器,用于更有效地调度和利用集群。注意对于Torque的情形,这个时间到期后,集群会在被自动回收。</p>
-<p>
-<em>-j java-home</em>
-<br>
-  JAVA_HOME环境变量里指定的路径。在<em>script</em>操作中使用。HOD将JAVA_HOME环境变量设置为这个值,并在此环境下启动用户脚本。</p>
-<p>
-<em>-A account-string</em>
-<br>
-  传递给后台资源管理器的核计信息。</p>
-<p>
-<em>-Q queue-name</em>
-<br>
-  接受作业提交的后台资源管理器中队列的名称。</p>
-<p>
-<em>-Mkey1=value1 -Mkey2=value2</em>
-<br>
-  为供应的Map/Reduce守护进程(JobTracker以及TaskTracker)提供配置参数。在集群节点上,会根据这些值产生一个hadoop-site.xml。 <br>
-  
-<em>注意:</em>值中的下列字符:空格,逗号,等号,分号需要使用&lsquo;\&rsquo;转义, 且放置在引号中。你也可以使用&lsquo;\&rsquo;来转义&lsquo;\&rsquo;。</p>
-<p>
-<em>-Hkey1=value1 -Hkey2=value2</em>
-<br>
-  为供应的HDFS守护进程(NameNode以及DataNode)提供配置参数。在集群节点上,会根据这些值产生一个hadoop-site.xml。 <br>
-  
-<em>注意:</em>值中的下列字符:空格,逗号,等号,分号需要使用&lsquo;\&rsquo;转义, 且放置在引号中。你也可以使用&lsquo;\&rsquo;来转义&lsquo;\&rsquo;。</p>
-<p>
-<em>-Ckey1=value1 -Ckey2=value2</em>
-<br>
-  为提交作业的客户端提供配置参数。在提交节点上,会根据这些值产生一个hadoop-site.xml。<br>
-  
-<em>注意:</em>参数值可以使用以下符号:空格,逗号,等号,需要&lsquo;\&rsquo;做转义符的分号,上述符号要用引号进行分割。你也可以使用&lsquo;\&rsquo;转义&lsquo;\&rsquo;。 </p>
-<p>
-<em>--section-name.option-name=value</em>
-<br>
-  这是用<em>长</em>格式提供配置选项的方法。比如,你可以<em>--hod.script-wait-time=20</em>
-</p>
-</div>
-
-<a name="N10572"></a><a name="%E6%95%85%E9%9A%9C%E6%8E%92%E9%99%A4"></a>
-<h2 class="h3">故障排除</h2>
-<div class="section">
-<a name="Troubleshooting" id="Troubleshooting"></a>
-<p>下节列出了一些用户使用HOD时可能碰到的多发错误的条件以及解决问题的方法</p>
-<a name="N1057D"></a><a name="%E5%88%86%E9%85%8D%E6%93%8D%E4%BD%9C%E6%97%B6"></a>
-<h3 class="h4">分配操作时hod挂起</h3>
-<a name="_hod_Hangs_During_Allocation" id="_hod_Hangs_During_Allocation"></a><a name="hod_Hangs_During_Allocation" id="hod_Hangs_During_Allocation"></a>
-<p>
-<em>可能原因:</em>HOD或Hadoop的一个组件启动失败。这种情况下,<span class="codefrag">hod</span>命令会在一段时间(通常是2-3分钟)后返回,退出码是错误代码部分定义的错误码7或8。参考该部分以获得更多细节。 </p>
-<p>
-<em>可能原因:</em>使用tarball模式申请了大规模的集群。有时由于网络负载,或者是分配节点上的负载,tarball分发过程可能会慢的比较明显,需要几分钟才能响应。等待命令完成。还可以检查一下tarball,看是否不含Hadoop源码或文档。</p>
-<p>
-<em>可能原因:</em>Torque相关的问题。如果原因与Torque相关,<span class="codefrag">hod</span>命令5分钟内是不会返回的。在调试模式下运行<span class="codefrag">hod</span>你会发现<span class="codefrag">qstat</span>命令被重复执行。在另一个shell中执行<span class="codefrag">qstat</span>命令你会发现作业处于<span class="codefrag">Q</span>(排队)状态。这通常说明Torque出现了问题。可能原因有个别节点宕机,或者增加了新节点但Torque不知。通常,需要系统管理员帮助解决此问题。</p>
-<a name="N105AB"></a><a name="%E5%9B%9E%E6%94%B6%E6%93%8D%E4%BD%9C%E6%97%B6"></a>
-<h3 class="h4">回收操作时hod挂起</h3>
-<a name="_hod_Hangs_During_Deallocation" id="_hod_Hangs_During_Deallocation"></a><a name="hod_Hangs_During_Deallocation" id="hod_Hangs_During_Deallocation"></a>
-<p>
-<em>可能原因:</em>Torque相关的问题,通常是Torque server上的负载较大,或者是分配的集群非常大。一般来说,你唯一能做的是等待命令执行完成。</p>
-<a name="N105BD"></a><a name="%E5%A4%B1%E8%B4%A5%E6%97%B6%E7%9A%84%E9%94%99%E8%AF%AF%E4%BB%A3%E7%A0%81%E5%92%8C%E9%94%99%E8%AF%AF%E4%BF%A1%E6%81%AF"></a>
-<h3 class="h4">hod失败时的错误代码和错误信息</h3>
-<a name="hod_Fails_With_an_error_code_and" id="hod_Fails_With_an_error_code_and"></a><a name="_hod_Fails_With_an_error_code_an" id="_hod_Fails_With_an_error_code_an"></a>
-<p>如果<span class="codefrag">hod</span>命令的退出码不是<span class="codefrag">0</span>,参考下面的退出代码表确定此情况发生的原因和相应的调试方法。</p>
-<p>
-<strong>错误代码</strong>
-</p>
-<a name="Error_Codes" id="Error_Codes"></a>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-    
-      
-<tr>
-        
-<th colspan="1" rowspan="1">错误代码</th>
-        <th colspan="1" rowspan="1">含义</th>
-        <th colspan="1" rowspan="1">可能原因及补救方法</th>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 1 </td>
-        <td colspan="1" rowspan="1">配置错误 </td>
-        <td colspan="1" rowspan="1">hodrc中的参数错误,或者其他与HOD配置相关的错误。此类情况下,错误信息已经足够帮你发现和解决问题。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 2 </td>
-        <td colspan="1" rowspan="1">无效操作</td>
-        <td colspan="1" rowspan="1">执行<span class="codefrag">hod help</span>查看有效的操作列表。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 3 </td>
-        <td colspan="1" rowspan="1">无效操作参数</td>
-        <td colspan="1" rowspan="1">执行<span class="codefrag">hod help operation</span>查看特定操作的用法。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 4 </td>
-        <td colspan="1" rowspan="1">调度失败</td>
-        <td colspan="1" rowspan="1"> 1. 请求分配了过多的资源。执行<span class="codefrag">checknodes cluster_name</span>查看是否有足够多的可用节点。<br>
-             2. 请求的资源超出了资源管理器的限制。<br>
-             3. Torque配置错误,Torque可执行文件路径配置错误,或者其它Torque相关问题。联系系统管理员。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 5 </td>
-        <td colspan="1" rowspan="1">执行作业失败</td>
-        <td colspan="1" rowspan="1"> 1. Torque作业被外部删除。执行Torque <span class="codefrag">qstat</span>命令查看是否有作业处于<span class="codefrag">R</span>(运行)状态。如果没有,尝试重新运行HOD。<br>
-          2. Torque的问题诸如服务器暂时性宕机,或者无响应。联系系统管理员。 <br>
-          3. 系统管理员可能配置了帐号核实,并且一个非法的帐号被指定。请联系系统管理员。 </td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 6 </td>
-        <td colspan="1" rowspan="1">Ringmaster故障</td>
-        <td colspan="1" rowspan="1"> HOD会打印信息"Cluster could not be allocated because of the following errors on the ringmaster host &lt;hostname&gt;"。实际的错误信息可能指示下列情形中的一种:<br>
-          1. 运行ringmaster的节点配置不合法,错误信息中的hostname会指明具体的机器。<br>
-          2. <span class="codefrag">ringmaster</span>段的配置无效,<br>
-          3. <span class="codefrag">gridservice-mapred或者gridservice-hdfs</span>段中<span class="codefrag">pkgs</span>项的配置无效,<br>
-          4. 无效的hadoop tarball,或者tarball中conf目录下存在无效的配置文件,<br>
-          5. Hadoop中的MapReduce与外部HDFS版本不匹配。<br>
-          Torque <span class="codefrag">qstat</span>命令很可能会显示一个出于<span class="codefrag">C</span>(Completed,已完成)状态的作业。<br>
-          你可以登录到HOD失败信息中给出的ringmaster主机,根据错误信息的提示解决问题。如果错误信息没有给出完整的信息,ringmaster日志也可能帮助找到问题的根源。参考下面<em>定位Ringmaster日志</em>一节了解更多信息。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 7 </td>
-        <td colspan="1" rowspan="1"> DFS故障</td>
-        <td colspan="1" rowspan="1"> 当HOD由于DFS故障(或者Job tracker失败,错误码8,下文有介绍)分配失败时,它会打印错误信息 "Hodring at &lt;hostname&gt; failed with following errors:",并给出真正的错误信息,这个信息可能表明下列情形中的一种:<br>
-	  1. 启动Hadoop集群时出现问题。通常错误信息会表明之前提到的主机出现错误的真正原因。你也要检查HOD配置中文件中Hadoop相关的配置。按上面<em>收集和查看Hadoop日志</em>一节中介绍的方法查看Hadoop的日志。<br>
-          2. 运行hodring的节点上的配置无效,错误信息中的hostname会指明机器<br>
-          3. hodrc中<span class="codefrag">hodring</span>段的配置无效。<span class="codefrag">ssh</span>到错误信息中提到的节点,在hdring日志中grep<span class="codefrag">ERROR</span>或<span class="codefrag">CRITICAL</span>。参考下面<em>定位Hodring日志</em>部分获取更多信息。<br>
-	  4. 指定了无效的tarball,可能未正确打包。<br>
-          5. 无法与外部配置的HDFS通信。<br>
-          当DFS或Job tracker出现故障时,你可以登录到HOD失败信息中提到的主机上,进行debug。解决问题的时候,你也应通过查看ringmaster日志中的其它日志信息,来检查其他机器是否在启动jobtracker/namenode时也出现了问题,而不只是检查错误信息中提到的主机。其他机器也可能发生问题是因为HOD会按照配置项<a href="hod_config_guide.html#3.4+ringmaster%E7%9A%84%E9%85%8D%E7%BD%AE%E9%A1%B9">ringmaster.max-master-failures</a>的设置在多个机器上连续尝试和启动hadoop守护进程。更多关于ringmaster日志的信息请参考下文<em>定位Ringmaster日志</em>。
-</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 8 </td>
-        <td colspan="1" rowspan="1">Job tracker故障</td>
-        <td colspan="1" rowspan="1">与<em>DFS故障</em>情形中的原因类似。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 10 </td>
-        <td colspan="1" rowspan="1">集群死亡</td>
-        <td colspan="1" rowspan="1">1. 集群因为较长时间空闲被自动回收。<br>
-          2. 集群因系统管理员或者用户指定的时钟时间到期被自动回收。<br>
-          3. 无法与成功分配的JobTracker以及HDFS的NameNode通信。回收集群,重新分配。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 12 </td>
-        <td colspan="1" rowspan="1">集群已分配</td>
-        <td colspan="1" rowspan="1">指定的集群目录是已被用于先前的分配操作,且尚未回收。指定另外一个目录,或者先回收先前分配的。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 13 </td>
-        <td colspan="1" rowspan="1">HDFS死亡</td>
-        <td colspan="1" rowspan="1">无法与HDFS的NameNode通信。HDFS的NameNode停掉了。</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 14 </td>
-        <td colspan="1" rowspan="1">Mapred死亡</td>
-        <td colspan="1" rowspan="1"> 1. 集群因为长时间闲置被自动回收。 <br>
-          2. 集群因系统管理员或用户指定的时钟时间到期被自动回收。<br>
-	  3. 无法与Map/Reduce的JobTracker通信。JobTracker节点宕机。 <br>
-          
-</td>
-      
-</tr>
-      
-<tr>
-        
-<td colspan="1" rowspan="1"> 15 </td>
-        <td colspan="1" rowspan="1">集群未分配</td>
-        <td colspan="1" rowspan="1">一个需要已分配集群的操作被指以一个没有状态信息的集群目录。</td>
-      
-</tr>
-   
-      
-<tr>
-        
-<td colspan="1" rowspan="1">任意非0退出代码</td>
-        <td colspan="1" rowspan="1">HOD脚本错误</td>
-        <td colspan="1" rowspan="1">如果使用了hod的脚本选项,很可能这个退出代码是脚本的退出吗。不幸的是,这可能会与hod自己的退出码冲突。为帮助用户区分两者,如果脚本返回了一个退出码,hod将此退出码写到了集群目录下的script.exitcode文件。你可以cat这个文件以确定脚本的退出码。如果文件不存在,则退出代码是hod命令的退出码。</td> 
-      
-</tr>
-  
-</table>
-<a name="N10752"></a><a name="Hadoop+DFSClient%E8%AD%A6%E5%91%8ANotReplicatedYetException%E4%BF%A1%E6%81%AF"></a>
-<h3 class="h4">Hadoop DFSClient警告NotReplicatedYetException信息</h3>
-<p>有时,当你申请到一个HOD集群后马上尝试上传文件到HDFS时,DFSClient会警告NotReplicatedYetException。通常会有一个这样的信息 - </p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">WARN
-hdfs.DFSClient: NotReplicatedYetException sleeping &lt;filename&gt; retries
-left 3</span></td>
-</tr>
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">08/01/25 16:31:40 INFO hdfs.DFSClient:
-org.apache.hadoop.ipc.RemoteException: java.io.IOException: File
-&lt;filename&gt; could only be replicated to 0 nodes, instead of
-1</span></td>
-</tr>
-</table>
-<p> 当你向一个DataNodes正在和NameNode联络的集群上传文件的时候,这种现象就会发生。在上传新文件到HDFS之前多等待一段时间就可以解决这个问题,因为这使得足够多的DataNode启动并且联络上了NameNode。</p>
-<a name="N1076A"></a><a name="%E6%88%90%E5%8A%9F%E5%88%86%E9%85%8D%E7%9A%84%E9%9B%86%E7%BE%A4%E4%B8%8A%E6%97%A0%E6%B3%95%E8%BF%90%E8%A1%8CHadoop%E4%BD%9C%E4%B8%9A"></a>
-<h3 class="h4">成功分配的集群上无法运行Hadoop作业</h3>
-<a name="Hadoop_Jobs_Not_Running_on_a_Suc" id="Hadoop_Jobs_Not_Running_on_a_Suc"></a>
-<p>这一情景通常发生在这种情形:一个集群已经分配,并且一段时间内处于不活跃状态,之后hadoop作业试图在这个集群上运行。Hadoop作业会失败,产生如下异常信息:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-<tr>
-<td colspan="1" rowspan="1"><span class="codefrag">08/01/25 16:31:40 INFO ipc.Client: Retrying connect to server: foo.bar.com/1.1.1.1:53567. Already tried 1 time(s).</span></td>
-</tr>
-</table>
-<p>
-<em>可能原因:</em>相当长的时间内无hadoop作业运行,集群会如<em>闲置集群的自动回收</em>一节介绍的那样被自动回收。回收该集群,然后重新分配。</p>
-<p>
-<em>可能原因:</em>从分配开始算起,Torque管理员指定的或<em>指定额外的作业属性</em>一节中定义的<span class="codefrag">-l</span>选项指定的时间上限过期。这种情况下集群可能已被释放。回收集群,然后重新分配。</p>
-<p>
-<em>可能原因:</em>提交作业使用的hadoop版本和供应集群的Hadoop版本(通常通过tarball选项)不匹配。确保使用的兼容的版本。</p>
-<p>
-<em>可能原因:</em> 提交job的hadoop客户端与提供的hadoop(通常通过tarball选项)版本不兼容。 确保所使用hadoop软件版本兼容。</p>
-<p>
-<em>可能原因:</em> 你使用了<span class="codefrag">-M or -H</span>中的一个指定Hadoop配置,其中有未正确转义的字符比如空格或逗号。参考<em>HOD配置选项</em>一节以了解如何正确指定这些选项。</p>
-<a name="N107A5"></a><a name="%E6%88%91%E7%9A%84Hadoop%E4%BD%9C%E4%B8%9A%E8%A2%AB%E4%B8%AD%E6%AD%A2%E4%BA%86"></a>
-<h3 class="h4">我的Hadoop作业被中止了</h3>
-<a name="My_Hadoop_Job_Got_Killed" id="My_Hadoop_Job_Got_Killed"></a>
-<p>
-<em>可能原因:</em>从分配开始算起,Torque管理员指定的或<em>指定额外的作业属性</em>一节中定义的<span class="codefrag">-l</span>选项指定的时间上限过期。这种情况下集群可能已被释放。回收集群,然后重新分配,这次要制定一个大点儿的时钟时间。</p>
-<p>
-<em>可能原因:</em> JobTracker节点出现问题。参考<em>收集和查看Hadoop日志</em>一节以获取更多信息。</p>
-<a name="N107C0"></a><a name="Hadoop%E4%BD%9C%E4%B8%9A%E5%A4%B1%E8%B4%A5%E5%B9%B6%E8%BF%94%E5%9B%9E%E6%B6%88%E6%81%AF%EF%BC%9A%E2%80%98Job+tracker+still+initializing%E2%80%99"></a>
-<h3 class="h4">Hadoop作业失败并返回消息:&lsquo;Job tracker still initializing&rsquo;</h3>
-<a name="Hadoop_Job_Fails_with_Message_Jo" id="Hadoop_Job_Fails_with_Message_Jo"></a>
-<p>
-<em>可能原因:</em>hadoop作业是作为HOD脚本的一部分运行的,它在JobTracker完全就绪前开始了执行。分配集群时为配置选<span class="codefrag">--hod.script-wait-time</span>设定一个大点儿的值。通常取120是可以工作的,尽管通常没必要这么大。</p>
-<a name="N107D0"></a><a name="Torque%E7%9A%84%E9%80%80%E5%87%BA%E4%BB%A3%E7%A0%81%E6%B2%A1%E6%9C%89%E5%8C%85%E5%90%ABHOD%E7%9A%84"></a>
-<h3 class="h4">Torque的退出代码没有包含HOD的</h3>
-<a name="The_Exit_Codes_For_HOD_Are_Not_G" id="The_Exit_Codes_For_HOD_Are_Not_G"></a>
-<p>
-<em>可能原因:</em>此功能需要Hadoop 0.16。所用的Hadoop版本不满足这个条件。请使用合适的Hadoop版本。</p>
-<p>
-<em>可能原因:</em>没有使用<span class="codefrag">hod</span>命令回收集群;例如直接使用<span class="codefrag">qdel</span>。当使用这种方式回收集群时,HOD进程被信号中止。这会导致退出码是基于signal number的,而不是程序的退出码。</p>
-<a name="N107E8"></a><a name="Hadoop%E6%97%A5%E5%BF%97%E6%9C%AA%E8%A2%AB%E4%B8%8A%E4%BC%A0%E5%88%B0DFS"></a>
-<h3 class="h4">Hadoop日志未被上传到DFS</h3>
-<a name="The_Hadoop_Logs_are_Not_Uploaded" id="The_Hadoop_Logs_are_Not_Uploaded"></a>
-<p>
-<em>可能原因:</em>上传日志的使用的hadoop与外部的HDFS版本不兼容。确保<span class="codefrag">hodring.pkgs</span>选项指定了正确的版本。</p>
-<a name="N107F8"></a><a name="%E5%AE%9A%E4%BD%8DRingmaster%E6%97%A5%E5%BF%97"></a>
-<h3 class="h4">定位Ringmaster日志</h3>
-<a name="Locating_Ringmaster_Logs" id="Locating_Ringmaster_Logs"></a>
-<p>遵循以下步骤定位ringmaster日志:</p>
-<ul>
-    
-<li>用-b选项在调试模式执行hod。这会打印出当前运行的Torque作业的标识。</li>
-    
-<li>执行<span class="codefrag">qstat -f torque_job_id</span>,在输出中查找<span class="codefrag">exec_host</span>参数的值。列表中的第一个主机就是ringmaster节点。</li>
-    
-<li>登陆该节点。</li>
-  
-<li>ringmaster日志的位置由hodrc中的<span class="codefrag">ringmaster.log-dir</span>项指定。日志文件的名字会是<span class="codefrag">username.torque_job_id/ringmaster-main.log</span>。</li>
-    
-<li>如果你没有获取到足够的信息,你可以将ringmaster的调试级别设为4。这可通过向hod命令行传递<span class="codefrag">--ringmaster.debug 4</span>做到。</li>
-  
-</ul>
-<a name="N10824"></a><a name="%E5%AE%9A%E4%BD%8DHodring%E6%97%A5%E5%BF%97"></a>
-<h3 class="h4">定位Hodring日志</h3>
-<a name="Locating_Hodring_Logs" id="Locating_Hodring_Logs"></a>
-<p>遵循以下步骤定位hodring日志:</p>
-<ul>
-    
-<li>用-b选项在调试模式下运行hod。这将打印当前运行的Torque作业的标识。</li>
-    
-<li>执行<span class="codefrag">qstat -f torque_job_id</span>,查看输出中<span class="codefrag">exec_host</span>参数的值。列表中的的所有节点上都有一个hodring。</li>
-    
-<li>登陆到任何一个节点。</li>
-    
-<li>hodring日志的位置由hodrc中的<span class="codefrag">hodring.log-dir</span>项指定。日志文件的名字会是<span class="codefrag">username.torque_job_id/hodring-main.log</span>。</li>
-    
-<li>如果你没有获得足够的信息,你或许想将hodring的调试等级更改为4。这可以向hod命令行传递<span class="codefrag">--hodring.debug 4</span> 来做到。</li>
-  
-</ul>
-</div>	
-
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 358
docs/cn/hod_user_guide.pdf


BIN
docs/cn/images/built-with-forrest-button.png


BIN
docs/cn/images/core-logo.gif


BIN
docs/cn/images/favicon.ico


BIN
docs/cn/images/hadoop-logo.jpg


BIN
docs/cn/images/hdfsarchitecture.gif


BIN
docs/cn/images/hdfsdatanodes.gif


BIN
docs/cn/images/instruction_arrow.png


+ 0 - 268
docs/cn/index.html

@@ -1,268 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop文档</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menupage">
-<div class="menupagetitle">概述</div>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit">
-<hr>
-<a href="http://forrest.apache.org/"><img border="0" title="Built with Apache Forrest" alt="Built with Apache Forrest - logo" src="images/built-with-forrest-button.png" style="width: 88px;height: 31px;"></a>
-</div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="index.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop文档</h1>
-    
-<p>
-	下面的文档是一些概念介绍和操作教程,可帮助你开始使用Hadoop。如果遇到了问题,你可以向<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>求助或者浏览一下存档邮件。
-    </p>
-    
-<ul>
-      
-<li>
-<a href="quickstart.html">Hadoop快速入门</a>
-</li>
-      
-<li>
-<a href="cluster_setup.html">Hadoop集群搭建</a>
-</li>
-      
-<li>
-<a href="hdfs_design.html">Hadoop分布式文件系统</a>
-</li>
-      
-<li>
-<a href="mapred_tutorial.html">Hadoop Map-Reduce教程</a>
-</li>
-      
-<li>
-<a href="native_libraries.html">Hadoop本地库</a>
-</li>
-      
-<li>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</li>
-      
-<li>
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</li>
-      
-<li>
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</li>
-    
-</ul>
-    
-<p>
-    
-</p>
-
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<div id="logos"></div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 160
docs/cn/index.pdf


+ 0 - 380
docs/cn/linkmap.html

@@ -1,380 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Site Linkmap Table of Contents</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">文档</div>
-<div id="menu_1.1" class="menuitemgroup">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="linkmap.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Site Linkmap Table of Contents</h1>
-<p>
-          This is a map of the complete site and its structure.
-        </p>
-<ul>
-<li>
-<a>Hadoop</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>site</em>
-</li>
-<ul>
-
-  
-<ul>
-<li>
-<a>文档</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>docs</em>
-</li>
-<ul> 
-    
-<ul>
-<li>
-<a href="index.html">概述</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>overview</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="quickstart.html">快速入门</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>quickstart</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="cluster_setup.html">集群搭建</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>setup</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hdfs_design.html">HDFS构架设计</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hdfs_user_guide.html">HDFS使用指南</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="commands_manual.html">命令手册</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>commands</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hdfs_shell.html">FS Shell使用指南</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>fs</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="distcp.html">DistCp使用指南</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>distcp</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="mapred_tutorial.html">Map-Reduce教程</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>mapred</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="native_libraries.html">Hadoop本地库</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>mapred</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="streaming.html">Streaming</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>streaming</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hadoop_archives.html">Hadoop Archives</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>archives</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="hod.html">Hadoop On Demand</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hod</em>
-</li>
-<ul>
-      
-      
-      
-    
-</ul>
-</ul>
-    
-<ul>
-<li>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>api</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>jdiff</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://wiki.apache.org/hadoop/">维基</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>wiki</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>faq</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>lists</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>relnotes</em>
-</li>
-</ul>
-    
-<ul>
-<li>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>changes</em>
-</li>
-</ul>
-  
-</ul>
-</ul>
-
- 
- 
-
-</ul>
-</ul>
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 62
docs/cn/linkmap.pdf


+ 0 - 667
docs/cn/mapred-default.html

@@ -1,667 +0,0 @@
-<html>
-<body>
-<table border="1">
-<tr>
-<td>name</td><td>value</td><td>description</td>
-</tr>
-<tr>
-<td><a name="hadoop.job.history.location">hadoop.job.history.location</a></td><td></td><td> If job tracker is static the history files are stored 
-  in this single well known place. If No value is set here, by default,
-  it is in the local file system at ${hadoop.log.dir}/history.
-  </td>
-</tr>
-<tr>
-<td><a name="hadoop.job.history.user.location">hadoop.job.history.user.location</a></td><td></td><td> User can specify a location to store the history files of 
-  a particular job. If nothing is specified, the logs are stored in 
-  output directory. The files are stored in "_logs/history/" in the directory.
-  User can stop logging by giving the value "none". 
-  </td>
-</tr>
-<tr>
-<td><a name="io.sort.factor">io.sort.factor</a></td><td>10</td><td>The number of streams to merge at once while sorting
-  files.  This determines the number of open file handles.</td>
-</tr>
-<tr>
-<td><a name="io.sort.mb">io.sort.mb</a></td><td>100</td><td>The total amount of buffer memory to use while sorting 
-  files, in megabytes.  By default, gives each merge stream 1MB, which
-  should minimize seeks.</td>
-</tr>
-<tr>
-<td><a name="io.sort.record.percent">io.sort.record.percent</a></td><td>0.05</td><td>The percentage of io.sort.mb dedicated to tracking record
-  boundaries. Let this value be r, io.sort.mb be x. The maximum number
-  of records collected before the collection thread must block is equal
-  to (r * x) / 4</td>
-</tr>
-<tr>
-<td><a name="io.sort.spill.percent">io.sort.spill.percent</a></td><td>0.80</td><td>The soft limit in either the buffer or record collection
-  buffers. Once reached, a thread will begin to spill the contents to disk
-  in the background. Note that this does not imply any chunking of data to
-  the spill. A value less than 0.5 is not recommended.</td>
-</tr>
-<tr>
-<td><a name="io.map.index.skip">io.map.index.skip</a></td><td>0</td><td>Number of index entries to skip between each entry.
-  Zero by default. Setting this to values larger than zero can
-  facilitate opening large map files using less memory.</td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker">mapred.job.tracker</a></td><td>local</td><td>The host and port that the MapReduce job tracker runs
-  at.  If "local", then jobs are run in-process as a single map
-  and reduce task.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.http.address">mapred.job.tracker.http.address</a></td><td>0.0.0.0:50030</td><td>
-    The job tracker http server address and port the server will listen on.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.handler.count">mapred.job.tracker.handler.count</a></td><td>10</td><td>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.tracker.report.address">mapred.task.tracker.report.address</a></td><td>127.0.0.1:0</td><td>The interface and port that task tracker server listens on. 
-  Since it is only connected to by the tasks, it uses the local interface.
-  EXPERT ONLY. Should only be changed if your host does not have the loopback 
-  interface.</td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir">mapred.local.dir</a></td><td>${hadoop.tmp.dir}/mapred/local</td><td>The local directory where MapReduce stores intermediate
-  data files.  May be a comma-separated list of
-  directories on different devices in order to spread disk i/o.
-  Directories that do not exist are ignored.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.system.dir">mapred.system.dir</a></td><td>${hadoop.tmp.dir}/mapred/system</td><td>The shared directory where MapReduce stores control files.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.temp.dir">mapred.temp.dir</a></td><td>${hadoop.tmp.dir}/mapred/temp</td><td>A shared directory for temporary files.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir.minspacestart">mapred.local.dir.minspacestart</a></td><td>0</td><td>If the space in mapred.local.dir drops under this, 
-  do not ask for more tasks.
-  Value in bytes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.local.dir.minspacekill">mapred.local.dir.minspacekill</a></td><td>0</td><td>If the space in mapred.local.dir drops under this, 
-    do not ask more tasks until all the current ones have finished and 
-    cleaned up. Also, to save the rest of the tasks we have running, 
-    kill one of them, to clean up some space. Start with the reduce tasks,
-    then go with the ones that have finished the least.
-    Value in bytes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.expiry.interval">mapred.tasktracker.expiry.interval</a></td><td>600000</td><td>Expert: The time-interval, in miliseconds, after which
-  a tasktracker is declared 'lost' if it doesn't send heartbeats.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.instrumentation">mapred.tasktracker.instrumentation</a></td><td>org.apache.hadoop.mapred.TaskTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each TaskTracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.vmem.reserved">mapred.tasktracker.vmem.reserved</a></td><td>-1</td><td>Configuration property to specify the amount of virtual memory
-    that has to be reserved by the TaskTracker for system usage (OS, TT etc).
-    The reserved virtual memory should be a part of the total virtual memory
-    available on the TaskTracker.
-    
-    The reserved virtual memory and the total virtual memory values are
-    reported by the TaskTracker as part of heart-beat so that they can
-    considered by a scheduler. Please refer to the documentation of the
-    configured scheduler to see how this property is used.
-    
-    These two values are also used by a TaskTracker for tracking tasks' memory
-    usage. Memory management functionality on a TaskTracker is disabled if this
-    property is set to -1, if it more than the total virtual memory on the 
-    tasktracker, or if either of the values is negative.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.pmem.reserved">mapred.tasktracker.pmem.reserved</a></td><td>-1</td><td>Configuration property to specify the amount of physical memory
-    that has to be reserved by the TaskTracker for system usage (OS, TT etc).
-    The reserved physical memory should be a part of the total physical memory
-    available on the TaskTracker.
-
-    The reserved physical memory and the total physical memory values are
-    reported by the TaskTracker as part of heart-beat so that they can
-    considered by a scheduler. Please refer to the documentation of the
-    configured scheduler to see how this property is used.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.default.maxvmem">mapred.task.default.maxvmem</a></td><td>-1</td><td>
-    Cluster-wide configuration in bytes to be set by the administrators that
-    provides default amount of maximum virtual memory for job's tasks. This has
-    to be set on both the JobTracker node for the sake of scheduling decisions
-    and on the TaskTracker nodes for the sake of memory management.
-
-    If a job doesn't specify its virtual memory requirement by setting
-    mapred.task.maxvmem to -1, tasks are assured a memory limit set
-    to this property. This property is set to -1 by default.
-
-    This value should in general be less than the cluster-wide
-    configuration mapred.task.limit.maxvmem. If not or if it is not set,
-    TaskTracker's memory management will be disabled and a scheduler's memory
-    based scheduling decisions may be affected. Please refer to the
-    documentation of the configured scheduler to see how this property is used.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.limit.maxvmem">mapred.task.limit.maxvmem</a></td><td>-1</td><td>
-    Cluster-wide configuration in bytes to be set by the site administrators
-    that provides an upper limit on the maximum virtual memory that can be
-    specified by a job via mapred.task.maxvmem. This has to be set on both the
-    JobTracker node for the sake of scheduling decisions and on the TaskTracker
-    nodes for the sake of memory management.
-    
-    The job configuration mapred.task.maxvmem should not be more than this
-    value, otherwise depending on the scheduler being configured, the job may
-    be rejected or the job configuration may just be ignored. Please refer to
-    the documentation of the configured scheduler to see how this property is
-    used.
-
-    If it is not set a TaskTracker, TaskTracker's memory management will be
-    disabled.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.maxvmem">mapred.task.maxvmem</a></td><td>-1</td><td>
-    The maximum amount of virtual memory any task of a job will use, in bytes.
-
-    This value will be used by TaskTrackers for monitoring the memory usage of
-    tasks of this jobs. If a TaskTracker's memory management functionality is
-    enabled, each task of this job will be allowed to use a maximum virtual
-    memory specified by this property. If the task's memory usage goes over 
-    this value, the task will be failed by the TT. If not set, the
-    cluster-wide configuration mapred.task.default.maxvmem is used as the
-    default value for memory requirements. If this property cascaded with
-    mapred.task.default.maxvmem becomes equal to -1, the job's tasks will
-    not be assured any particular amount of virtual memory and may be killed by
-    a TT that intends to control the total memory usage of the tasks via memory
-    management functionality. If the memory management functionality is
-    disabled on a TT, this value is ignored.
-
-    This value should not be more than the cluster-wide configuration
-    mapred.task.limit.maxvmem.
-
-    This value may be used by schedulers that support scheduling based on job's
-    memory requirements. Please refer to the documentation of the scheduler
-    being configured to see if it does memory based scheduling and if it does,
-    how this property is used by that scheduler.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.maxpmem">mapred.task.maxpmem</a></td><td>-1</td><td>
-   The maximum amount of physical memory any task of a job will use in bytes.
-
-   This value may be used by schedulers that support scheduling based on job's
-   memory requirements. In general, a task of this job will be scheduled on a
-   TaskTracker, only if the amount of physical memory still unoccupied on the
-   TaskTracker is greater than or equal to this value. Different schedulers can
-   take different decisions, some might just ignore this value. Please refer to
-   the documentation of the scheduler being configured to see if it does
-   memory based scheduling and if it does, how this variable is used by that
-   scheduler.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.memory_calculator_plugin">mapred.tasktracker.memory_calculator_plugin</a></td><td></td><td>
-   Name of the class whose instance will be used to query memory information
-   on the tasktracker.
-   
-   The class must be an instance of 
-   org.apache.hadoop.util.MemoryCalculatorPlugin. If the value is null, the
-   tasktracker attempts to use a class appropriate to the platform. 
-   Currently, the only platform supported is Linux.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.taskmemorymanager.monitoring-interval">mapred.tasktracker.taskmemorymanager.monitoring-interval</a></td><td>5000</td><td>The interval, in milliseconds, for which the tasktracker waits
-   between two cycles of monitoring its tasks' memory usage. Used only if
-   tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
-   </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill">mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</a></td><td>5000</td><td>The time, in milliseconds, the tasktracker waits for sending a
-  SIGKILL to a process that has overrun memory limits, after it has been sent
-  a SIGTERM. Used only if tasks' memory management is enabled via
-  mapred.tasktracker.tasks.maxmemory.</td>
-</tr>
-<tr>
-<td><a name="mapred.map.tasks">mapred.map.tasks</a></td><td>2</td><td>The default number of map tasks per job.  Typically set
-  to a prime several times greater than number of available hosts.
-  Ignored when mapred.job.tracker is "local".  
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.tasks">mapred.reduce.tasks</a></td><td>1</td><td>The default number of reduce tasks per job.  Typically set
-  to a prime close to the number of available hosts.  Ignored when
-  mapred.job.tracker is "local".
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.restart.recover">mapred.jobtracker.restart.recover</a></td><td>false</td><td>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.job.history.block.size">mapred.jobtracker.job.history.block.size</a></td><td>3145728</td><td>The block size of the job history file. Since the job recovery
-               uses job history, its important to dump job history to disk as 
-               soon as possible. Note that this is an expert level parameter.
-               The default value is set to 3 MB.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.taskScheduler">mapred.jobtracker.taskScheduler</a></td><td>org.apache.hadoop.mapred.JobQueueTaskScheduler</td><td>The class responsible for scheduling the tasks.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.taskScheduler.maxRunningTasksPerJob">mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</a></td><td></td><td>The maximum number of running tasks for a job before
-  it gets preempted. No limits if undefined.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.max.attempts">mapred.map.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per map task.
-  In other words, framework will try to execute a map task these many number
-  of times before giving up on it.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.max.attempts">mapred.reduce.max.attempts</a></td><td>4</td><td>Expert: The maximum number of attempts per reduce task.
-  In other words, framework will try to execute a reduce task these many number
-  of times before giving up on it.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.parallel.copies">mapred.reduce.parallel.copies</a></td><td>5</td><td>The default number of parallel transfers run by reduce
-  during the copy(shuffle) phase.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.copy.backoff">mapred.reduce.copy.backoff</a></td><td>300</td><td>The maximum amount of time (in seconds) a reducer spends on 
-  fetching one map output before declaring it as failed.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.timeout">mapred.task.timeout</a></td><td>600000</td><td>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.map.tasks.maximum">mapred.tasktracker.map.tasks.maximum</a></td><td>2</td><td>The maximum number of map tasks that will be run
-  simultaneously by a task tracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.reduce.tasks.maximum">mapred.tasktracker.reduce.tasks.maximum</a></td><td>2</td><td>The maximum number of reduce tasks that will be run
-  simultaneously by a task tracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.completeuserjobs.maximum">mapred.jobtracker.completeuserjobs.maximum</a></td><td>100</td><td>The maximum number of complete jobs per user to keep around 
-  before delegating them to the job history.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.instrumentation">mapred.jobtracker.instrumentation</a></td><td>org.apache.hadoop.mapred.JobTrackerMetricsInst</td><td>Expert: The instrumentation class to associate with each JobTracker.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.java.opts">mapred.child.java.opts</a></td><td>-Xmx200m</td><td>Java opts for the task tracker child processes.  
-  The following symbol, if present, will be interpolated: @taskid@ is replaced 
-  by current TaskID. Any other occurrences of '@' will go unchanged.
-  For example, to enable verbose gc logging to a file named for the taskid in
-  /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-        -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-  
-  The configuration variable mapred.child.ulimit can be used to control the
-  maximum virtual memory of the child processes. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.ulimit">mapred.child.ulimit</a></td><td></td><td>The maximum virtual memory, in KB, of a process launched by the 
-  Map-Reduce framework. This can be used to control both the Mapper/Reducer 
-  tasks and applications using Hadoop Pipes, Hadoop Streaming etc. 
-  By default it is left unspecified to let cluster admins control it via 
-  limits.conf and other such relevant mechanisms.
-  
-  Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
-  JavaVM, else the VM might not start. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.child.tmp">mapred.child.tmp</a></td><td>./tmp</td><td> To set the value of tmp directory for map and reduce tasks.
-  If the value is an absolute path, it is directly assigned. Otherwise, it is
-  prepended with task's working directory. The java tasks are executed with
-  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
-  streaming are set with environment variable,
-   TMPDIR='the absolute path of the tmp dir'
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.inmem.merge.threshold">mapred.inmem.merge.threshold</a></td><td>1000</td><td>The threshold, in terms of the number of files 
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.shuffle.merge.percent">mapred.job.shuffle.merge.percent</a></td><td>0.66</td><td>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.shuffle.input.buffer.percent">mapred.job.shuffle.input.buffer.percent</a></td><td>0.70</td><td>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.reduce.input.buffer.percent">mapred.job.reduce.input.buffer.percent</a></td><td>0.0</td><td>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.tasks.speculative.execution">mapred.map.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some map tasks 
-               may be executed in parallel.</td>
-</tr>
-<tr>
-<td><a name="mapred.reduce.tasks.speculative.execution">mapred.reduce.tasks.speculative.execution</a></td><td>true</td><td>If true, then multiple instances of some reduce tasks 
-               may be executed in parallel.</td>
-</tr>
-<tr>
-<td><a name="mapred.job.reuse.jvm.num.tasks">mapred.job.reuse.jvm.num.tasks</a></td><td>1</td><td>How many tasks to run per jvm. If set to -1, there is
-  no limit. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.min.split.size">mapred.min.split.size</a></td><td>0</td><td>The minimum size chunk that map input should be split
-  into.  Note that some file formats may have minimum split sizes that
-  take priority over this setting.</td>
-</tr>
-<tr>
-<td><a name="mapred.jobtracker.maxtasks.per.job">mapred.jobtracker.maxtasks.per.job</a></td><td>-1</td><td>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </td>
-</tr>
-<tr>
-<td><a name="mapred.submit.replication">mapred.submit.replication</a></td><td>10</td><td>The replication level for submitted job files.  This
-  should be around the square root of the number of nodes.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.dns.interface">mapred.tasktracker.dns.interface</a></td><td>default</td><td>The name of the Network Interface from which a task
-  tracker should report its IP address.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.dns.nameserver">mapred.tasktracker.dns.nameserver</a></td><td>default</td><td>The host name or IP address of the name server (DNS)
-  which a TaskTracker should use to determine the host name used by
-  the JobTracker for communication and display purposes.
-  </td>
-</tr>
-<tr>
-<td><a name="tasktracker.http.threads">tasktracker.http.threads</a></td><td>40</td><td>The number of worker threads that for the http server. This is
-               used for map output fetching
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.tracker.http.address">mapred.task.tracker.http.address</a></td><td>0.0.0.0:50060</td><td>
-    The task tracker http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
-<td><a name="keep.failed.task.files">keep.failed.task.files</a></td><td>false</td><td>Should the files for failed tasks be kept. This should only be 
-               used on jobs that are failing, because the storage is never
-               reclaimed. It also prevents the map outputs from being erased
-               from the reduce directory as they are consumed.</td>
-</tr>
-<tr>
-<td><a name="mapred.output.compress">mapred.output.compress</a></td><td>false</td><td>Should the job outputs be compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.output.compression.type">mapred.output.compression.type</a></td><td>RECORD</td><td>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.output.compression.codec">mapred.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the job outputs are compressed, how should they be compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.compress.map.output">mapred.compress.map.output</a></td><td>false</td><td>Should the outputs of the maps be compressed before being
-               sent across the network. Uses SequenceFile compression.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.map.output.compression.codec">mapred.map.output.compression.codec</a></td><td>org.apache.hadoop.io.compress.DefaultCodec</td><td>If the map outputs are compressed, how should they be 
-               compressed?
-  </td>
-</tr>
-<tr>
-<td><a name="map.sort.class">map.sort.class</a></td><td>org.apache.hadoop.util.QuickSort</td><td>The default sort class for sorting keys.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.userlog.limit.kb">mapred.userlog.limit.kb</a></td><td>0</td><td>The maximum size of user-logs of each task in KB. 0 disables the cap.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.userlog.retain.hours">mapred.userlog.retain.hours</a></td><td>24</td><td>The maximum time, in hours, for which the user-logs are to be 
-          retained.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.hosts">mapred.hosts</a></td><td></td><td>Names a file that contains the list of nodes that may
-  connect to the jobtracker.  If the value is empty, all hosts are
-  permitted.</td>
-</tr>
-<tr>
-<td><a name="mapred.hosts.exclude">mapred.hosts.exclude</a></td><td></td><td>Names a file that contains the list of hosts that
-  should be excluded by the jobtracker.  If the value is empty, no
-  hosts are excluded.</td>
-</tr>
-<tr>
-<td><a name="mapred.max.tracker.blacklists">mapred.max.tracker.blacklists</a></td><td>4</td><td>The number of blacklists for a taskTracker by various jobs
-               after which the task tracker could be blacklisted across
-               all jobs. The tracker will be given a tasks later
-               (after a day). The tracker will become a healthy
-               tracker after a restart.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.max.tracker.failures">mapred.max.tracker.failures</a></td><td>4</td><td>The number of task-failures on a tasktracker of a given job 
-               after which new tasks of that job aren't assigned to it.
-  </td>
-</tr>
-<tr>
-<td><a name="jobclient.output.filter">jobclient.output.filter</a></td><td>FAILED</td><td>The filter for controlling the output of the task's userlogs sent
-               to the console of the JobClient. 
-               The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and 
-               ALL.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.active">mapred.job.tracker.persist.jobstatus.active</a></td><td>false</td><td>Indicates if persistency of job status information is
-      active or not.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.hours">mapred.job.tracker.persist.jobstatus.hours</a></td><td>0</td><td>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.tracker.persist.jobstatus.dir">mapred.job.tracker.persist.jobstatus.dir</a></td><td>/jobtracker/jobsInfo</td><td>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile">mapred.task.profile</a></td><td>false</td><td>To set whether the system should collect profiler
-     information for some of the tasks in this job? The information is stored
-     in the user log directory. The value is "true" if task profiling
-     is enabled.</td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile.maps">mapred.task.profile.maps</a></td><td>0-2</td><td> To set the ranges of map tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.task.profile.reduces">mapred.task.profile.reduces</a></td><td>0-2</td><td> To set the ranges of reduce tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.line.input.format.linespermap">mapred.line.input.format.linespermap</a></td><td>1</td><td> Number of lines per split in NLineInputFormat.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.attempts.to.start.skipping">mapred.skip.attempts.to.start.skipping</a></td><td>2</td><td> The number of Task attempts AFTER which skip mode 
-    will be kicked off. When skip mode is kicked off, the 
-    tasks reports the range of records which it will process 
-    next, to the TaskTracker. So that on failures, TT knows which 
-    ones are possibly the bad records. On further executions, 
-    those are skipped.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.map.auto.incr.proc.count">mapred.skip.map.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true, 
-    SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented 
-    by MapRunner after invoking the map function. This value must be set to 
-    false for applications which process the records asynchronously 
-    or buffer the input records. For example streaming. 
-    In such cases applications should increment this counter on their own.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.reduce.auto.incr.proc.count">mapred.skip.reduce.auto.incr.proc.count</a></td><td>true</td><td> The flag which if set to true, 
-    SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented 
-    by framework after invoking the reduce function. This value must be set to 
-    false for applications which process the records asynchronously 
-    or buffer the input records. For example streaming. 
-    In such cases applications should increment this counter on their own.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.out.dir">mapred.skip.out.dir</a></td><td></td><td> If no value is specified here, the skipped records are 
-    written to the output directory at _logs/skip.
-    User can stop writing skipped records by giving the value "none". 
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.map.max.skip.records">mapred.skip.map.max.skip.records</a></td><td>0</td><td> The number of acceptable skip records surrounding the bad 
-    record PER bad record in mapper. The number includes the bad record as well.
-    To turn the feature of detection/skipping of bad records off, set the 
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying  
-    until this threshold is met OR all attempts get exhausted for this task. 
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to 
-    narrow down. Whatever records(depends on application) get skipped are 
-    acceptable.
-    </td>
-</tr>
-<tr>
-<td><a name="mapred.skip.reduce.max.skip.groups">mapred.skip.reduce.max.skip.groups</a></td><td>0</td><td> The number of acceptable skip groups surrounding the bad 
-    group PER bad group in reducer. The number includes the bad group as well.
-    To turn the feature of detection/skipping of bad groups off, set the 
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying  
-    until this threshold is met OR all attempts get exhausted for this task. 
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to 
-    narrow down. Whatever groups(depends on application) get skipped are 
-    acceptable.
-    </td>
-</tr>
-<tr>
-<td><a name="job.end.retry.attempts">job.end.retry.attempts</a></td><td>0</td><td>Indicates how many times hadoop should attempt to contact the
-               notification URL </td>
-</tr>
-<tr>
-<td><a name="job.end.retry.interval">job.end.retry.interval</a></td><td>30000</td><td>Indicates time in milliseconds between notification URL retry
-                calls</td>
-</tr>
-<tr>
-<td><a name="hadoop.rpc.socket.factory.class.JobSubmissionProtocol">hadoop.rpc.socket.factory.class.JobSubmissionProtocol</a></td><td></td><td> SocketFactory to use to connect to a Map/Reduce master
-    (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.task.cache.levels">mapred.task.cache.levels</a></td><td>2</td><td> This is the max level of the task cache. For example, if
-    the level is 2, the tasks cached are at the host level and at the rack
-    level.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.names">mapred.queue.names</a></td><td>default</td><td> Comma separated list of queues configured for this jobtracker.
-    Jobs are added to queues and schedulers can configure different 
-    scheduling properties for the various queues. To configure a property 
-    for a queue, the name of the queue must match the name specified in this 
-    value. Queue properties that are common to all schedulers are configured 
-    here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME,
-    for e.g. mapred.queue.default.submit-job-acl.
-    The number of queues configured in this parameter could depend on the
-    type of scheduler being used, as specified in 
-    mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler
-    supports only a single queue, which is the default configured here.
-    Before adding more queues, ensure that the scheduler you've configured
-    supports multiple queues.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.acls.enabled">mapred.acls.enabled</a></td><td>false</td><td> Specifies whether ACLs are enabled, and should be checked
-    for various operations.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.default.acl-submit-job">mapred.queue.default.acl-submit-job</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
-    to submit jobs to the 'default' queue. The user list and the group list
-    are separated by a blank. For e.g. alice,bob group1,group2. 
-    If set to the special value '*', it means all users are allowed to 
-    submit jobs. 
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.queue.default.acl-administer-jobs">mapred.queue.default.acl-administer-jobs</a></td><td>*</td><td> Comma separated list of user and group names that are allowed
-    to delete jobs or modify job's priority for jobs not owned by the current
-    user in the 'default' queue. The user list and the group list
-    are separated by a blank. For e.g. alice,bob group1,group2. 
-    If set to the special value '*', it means all users are allowed to do 
-    this operation.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.job.queue.name">mapred.job.queue.name</a></td><td>default</td><td> Queue to which a job is submitted. This must match one of the
-    queues defined in mapred.queue.names for the system. Also, the ACL setup
-    for the queue must allow the current user to submit a job to the queue.
-    Before specifying a queue, ensure that the system is configured with 
-    the queue, and access is allowed for submitting jobs to the queue.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.tasktracker.indexcache.mb">mapred.tasktracker.indexcache.mb</a></td><td>10</td><td> The maximum memory that a task tracker allows for the 
-    index cache that is used when serving map outputs to reducers.
-  </td>
-</tr>
-<tr>
-<td><a name="mapred.merge.recordsBeforeProgress">mapred.merge.recordsBeforeProgress</a></td><td>10000</td><td> The number of records to process during merge before
-   sending a progress notification to the TaskTracker.
-  </td>
-</tr>
-</table>
-</body>
-</html>

+ 0 - 3464
docs/cn/mapred_tutorial.html

@@ -1,3464 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop Map/Reduce教程</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Map-Reduce教程</div>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="mapred_tutorial.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop Map/Reduce教程</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%9B%AE%E7%9A%84">目的</a>
-</li>
-<li>
-<a href="#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6">先决条件</a>
-</li>
-<li>
-<a href="#%E6%A6%82%E8%BF%B0">概述</a>
-</li>
-<li>
-<a href="#%E8%BE%93%E5%85%A5%E4%B8%8E%E8%BE%93%E5%87%BA">输入与输出</a>
-</li>
-<li>
-<a href="#%E4%BE%8B%E5%AD%90%EF%BC%9AWordCount+v1.0">例子:WordCount v1.0</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%BA%90%E4%BB%A3%E7%A0%81">源代码</a>
-</li>
-<li>
-<a href="#%E7%94%A8%E6%B3%95">用法</a>
-</li>
-<li>
-<a href="#%E8%A7%A3%E9%87%8A">解释</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#Map%2FReduce+-+%E7%94%A8%E6%88%B7%E7%95%8C%E9%9D%A2">Map/Reduce - 用户界面</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%A0%B8%E5%BF%83%E5%8A%9F%E8%83%BD%E6%8F%8F%E8%BF%B0">核心功能描述</a>
-<ul class="minitoc">
-<li>
-<a href="#Mapper">Mapper</a>
-</li>
-<li>
-<a href="#Reducer">Reducer</a>
-</li>
-<li>
-<a href="#Partitioner">Partitioner</a>
-</li>
-<li>
-<a href="#Reporter">Reporter</a>
-</li>
-<li>
-<a href="#OutputCollector">OutputCollector</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%BD%9C%E4%B8%9A%E9%85%8D%E7%BD%AE">作业配置</a>
-</li>
-<li>
-<a href="#%E4%BB%BB%E5%8A%A1%E7%9A%84%E6%89%A7%E8%A1%8C%E5%92%8C%E7%8E%AF%E5%A2%83">任务的执行和环境</a>
-</li>
-<li>
-<a href="#%E4%BD%9C%E4%B8%9A%E7%9A%84%E6%8F%90%E4%BA%A4%E4%B8%8E%E7%9B%91%E6%8E%A7">作业的提交与监控</a>
-<ul class="minitoc">
-<li>
-<a href="#%E4%BD%9C%E4%B8%9A%E7%9A%84%E6%8E%A7%E5%88%B6">作业的控制</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%BD%9C%E4%B8%9A%E7%9A%84%E8%BE%93%E5%85%A5">作业的输入</a>
-<ul class="minitoc">
-<li>
-<a href="#InputSplit">InputSplit</a>
-</li>
-<li>
-<a href="#RecordReader">RecordReader</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%BD%9C%E4%B8%9A%E7%9A%84%E8%BE%93%E5%87%BA">作业的输出</a>
-<ul class="minitoc">
-<li>
-<a href="#%E4%BB%BB%E5%8A%A1%E7%9A%84Side-Effect+File">任务的Side-Effect File</a>
-</li>
-<li>
-<a href="#RecordWriter">RecordWriter</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E5%85%B6%E4%BB%96%E6%9C%89%E7%94%A8%E7%9A%84%E7%89%B9%E6%80%A7">其他有用的特性</a>
-<ul class="minitoc">
-<li>
-<a href="#Counters">Counters</a>
-</li>
-<li>
-<a href="#DistributedCache">DistributedCache</a>
-</li>
-<li>
-<a href="#Tool">Tool</a>
-</li>
-<li>
-<a href="#IsolationRunner">IsolationRunner</a>
-</li>
-<li>
-<a href="#Profiling">Profiling</a>
-</li>
-<li>
-<a href="#%E8%B0%83%E8%AF%95">调试</a>
-</li>
-<li>
-<a href="#JobControl">JobControl</a>
-</li>
-<li>
-<a href="#%E6%95%B0%E6%8D%AE%E5%8E%8B%E7%BC%A9">数据压缩</a>
-</li>
-</ul>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%BE%8B%E5%AD%90%EF%BC%9AWordCount+v2.0">例子:WordCount v2.0</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%BA%90%E4%BB%A3%E7%A0%81-N10DC0">源代码</a>
-</li>
-<li>
-<a href="#%E8%BF%90%E8%A1%8C%E6%A0%B7%E4%BE%8B">运行样例</a>
-</li>
-<li>
-<a href="#%E7%A8%8B%E5%BA%8F%E8%A6%81%E7%82%B9">程序要点</a>
-</li>
-</ul>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="%E7%9B%AE%E7%9A%84"></a>
-<h2 class="h3">目的</h2>
-<div class="section">
-<p>这篇教程从用户的角度出发,全面地介绍了Hadoop Map/Reduce框架的各个方面。</p>
-</div>
-    
-    
-<a name="N10017"></a><a name="%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6"></a>
-<h2 class="h3">先决条件</h2>
-<div class="section">
-<p>请先确认Hadoop被正确安装、配置和正常运行中。更多信息见:</p>
-<ul>
-        
-<li>
-          
-<a href="quickstart.html">Hadoop快速入门</a>对初次使用者。
-        </li>
-        
-<li>
-          
-<a href="cluster_setup.html">Hadoop集群搭建</a>对大规模分布式集群。
-        </li>
-      
-</ul>
-</div>
-    
-    
-<a name="N10032"></a><a name="%E6%A6%82%E8%BF%B0"></a>
-<h2 class="h3">概述</h2>
-<div class="section">
-<p>Hadoop Map/Reduce是一个使用简易的软件框架,基于它写出来的应用程序能够运行在由上千个商用机器组成的大型集群上,并以一种可靠容错的方式并行处理上T级别的数据集。</p>
-<p>一个Map/Reduce <em>作业(job)</em> 通常会把输入的数据集切分为若干独立的数据块,由
-      <em>map任务(task)</em>以完全并行的方式处理它们。框架会对map的输出先进行排序,
-      然后把结果输入给<em>reduce任务</em>。通常作业的输入和输出都会被存储在文件系统中。
-      整个框架负责任务的调度和监控,以及重新执行已经失败的任务。</p>
-<p>通常,Map/Reduce框架和<a href="hdfs_design.html">分布式文件系统</a>是运行在一组相同的节点上的,也就是说,计算节点和存储节点通常在一起。这种配置允许框架在那些已经存好数据的节点上高效地调度任务,这可以使整个集群的网络带宽被非常高效地利用。</p>
-<p>Map/Reduce框架由一个单独的master <span class="codefrag">JobTracker</span> 和每个集群节点一个slave <span class="codefrag">TaskTracker</span>共同组成。master负责调度构成一个作业的所有任务,这些任务分布在不同的slave上,master监控它们的执行,重新执行已经失败的任务。而slave仅负责执行由master指派的任务。</p>
-<p>应用程序至少应该指明输入/输出的位置(路径),并通过实现合适的接口或抽象类提供map和reduce函数。再加上其他作业的参数,就构成了<em>作业配置(job configuration)</em>。然后,Hadoop的 <em>job client</em>提交作业(jar包/可执行程序等)和配置信息给<span class="codefrag">JobTracker</span>,后者负责分发这些软件和配置信息给slave、调度任务并监控它们的执行,同时提供状态和诊断信息给job-client。</p>
-<p>虽然Hadoop框架是用Java<sup>TM</sup>实现的,但Map/Reduce应用程序则不一定要用
-      Java来写 。</p>
-<ul>
-        
-<li>
-          
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/streaming/package-summary.html">
-          Hadoop Streaming</a>是一种运行作业的实用工具,它允许用户创建和运行任何可执行程序
-          (例如:Shell工具)来做为mapper和reducer。
-        </li>
-        
-<li>
-          
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/pipes/package-summary.html">
-          Hadoop Pipes</a>是一个与<a href="http://www.swig.org/">SWIG</a>兼容的C++ API
-          (没有基于JNI<sup>TM</sup>技术),它也可用于实现Map/Reduce应用程序。
-        </li>
-      
-</ul>
-</div>
-    
-    
-<a name="N10082"></a><a name="%E8%BE%93%E5%85%A5%E4%B8%8E%E8%BE%93%E5%87%BA"></a>
-<h2 class="h3">输入与输出</h2>
-<div class="section">
-<p>Map/Reduce框架运转在<span class="codefrag">&lt;key, value&gt;</span> 键值对上,也就是说,
-      框架把作业的输入看为是一组<span class="codefrag">&lt;key, value&gt;</span> 键值对,同样也产出一组
-      <span class="codefrag">&lt;key, value&gt;</span> 键值对做为作业的输出,这两组键值对的类型可能不同。</p>
-<p>框架需要对<span class="codefrag">key</span>和<span class="codefrag">value</span>的类(classes)进行序列化操作,
-      因此,这些类需要实现 <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/Writable.html">Writable</a>接口。
-      另外,为了方便框架执行排序操作,<span class="codefrag">key</span>类必须实现
-      <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/WritableComparable.html">
-      WritableComparable</a>接口。
-      </p>
-<p>一个Map/Reduce 作业的输入和输出类型如下所示:</p>
-<p>
-        (input) <span class="codefrag">&lt;k1, v1&gt;</span> 
-        -&gt; 
-        <strong>map</strong> 
-        -&gt; 
-        <span class="codefrag">&lt;k2, v2&gt;</span> 
-        -&gt; 
-        <strong>combine</strong> 
-        -&gt; 
-        <span class="codefrag">&lt;k2, v2&gt;</span> 
-        -&gt; 
-        <strong>reduce</strong> 
-        -&gt; 
-        <span class="codefrag">&lt;k3, v3&gt;</span> (output)
-      </p>
-</div>
-
-    
-<a name="N100C4"></a><a name="%E4%BE%8B%E5%AD%90%EF%BC%9AWordCount+v1.0"></a>
-<h2 class="h3">例子:WordCount v1.0</h2>
-<div class="section">
-<p>在深入细节之前,让我们先看一个Map/Reduce的应用示例,以便对它们的工作方式有一个初步的认识。</p>
-<p>
-<span class="codefrag">WordCount</span>是一个简单的应用,它可以计算出指定数据集中每一个单词出现的次数。</p>
-<p>这个应用适用于
-      <a href="quickstart.html#Standalone+Operation">单机模式</a>,
-      <a href="quickstart.html#SingleNodeSetup">伪分布式模式</a> 或
-      <a href="quickstart.html#Fully-Distributed+Operation">完全分布式模式</a> 
-      三种Hadoop安装方式。</p>
-<a name="N100E1"></a><a name="%E6%BA%90%E4%BB%A3%E7%A0%81"></a>
-<h3 class="h4">源代码</h3>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-            
-<th colspan="1" rowspan="1"></th>
-            <th colspan="1" rowspan="1">WordCount.java</th>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">1.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">package org.myorg;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">2.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">3.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import java.io.IOException;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">4.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import java.util.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">5.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">6.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">7.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.conf.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">8.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.io.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">9.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">10.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.util.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">11.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">12.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">public class WordCount {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">13.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">14.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static class Map extends MapReduceBase 
-                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">15.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                private final static IntWritable one = new IntWritable(1);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">16.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private Text word = new Text();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">17.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">18.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                public void map(LongWritable key, Text value, 
-                OutputCollector&lt;Text, IntWritable&gt; output, 
-                Reporter reporter) throws IOException {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">19.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">String line = value.toString();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">20.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">21.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">22.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">word.set(tokenizer.nextToken());</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">23.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">output.collect(word, one);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">24.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">25.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">26.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">27.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">28.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static class Reduce extends MapReduceBase implements 
-                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">29.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
-                OutputCollector&lt;Text, IntWritable&gt; output, 
-                Reporter reporter) throws IOException {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">30.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">int sum = 0;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">31.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">while (values.hasNext()) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">32.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">sum += values.next().get();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">33.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">34.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">35.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">36.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">37.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">38.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static void main(String[] args) throws Exception {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">39.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                JobConf conf = new JobConf(WordCount.class);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">40.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setJobName("wordcount");</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">41.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">42.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">43.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">44.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">45.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setMapperClass(Map.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">46.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">47.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">48.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">49.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">50.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">51.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">52.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">FileInputFormat.setInputPaths(conf, new Path(args[0]));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">53.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">FileOutputFormat.setOutputPath(conf, new Path(args[1]));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">54.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">55.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">JobClient.runJob(conf);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">57.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">58.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">59.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-        
-</table>
-<a name="N10463"></a><a name="%E7%94%A8%E6%B3%95"></a>
-<h3 class="h4">用法</h3>
-<p>假设环境变量<span class="codefrag">HADOOP_HOME</span>对应安装时的根目录,<span class="codefrag">HADOOP_VERSION</span>对应Hadoop的当前安装版本,编译<span class="codefrag">WordCount.java</span>来创建jar包,可如下操作:</p>
-<p>
-          
-<span class="codefrag">$ mkdir wordcount_classes</span>
-<br>
-          
-<span class="codefrag">
-            $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar 
-              -d wordcount_classes WordCount.java
-          </span>
-<br>
-          
-<span class="codefrag">$ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ .</span> 
-        
-</p>
-<p>假设:</p>
-<ul>
-          
-<li>
-            
-<span class="codefrag">/usr/joe/wordcount/input</span>  - 是HDFS中的输入路径
-          </li>
-          
-<li>
-            
-<span class="codefrag">/usr/joe/wordcount/output</span> - 是HDFS中的输出路径
-          </li>
-        
-</ul>
-<p>用示例文本文件做为输入:</p>
-<p>
-          
-<span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
-<br>
-          
-<span class="codefrag">/usr/joe/wordcount/input/file01</span>
-<br>
-          
-<span class="codefrag">/usr/joe/wordcount/input/file02</span>
-<br>
-          
-<br>
-          
-<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
-<br>
-          
-<span class="codefrag">Hello World Bye World</span>
-<br>
-          
-<br>
-          
-<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
-<br>
-          
-<span class="codefrag">Hello Hadoop Goodbye Hadoop</span>
-        
-</p>
-<p>运行应用程序:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
-              /usr/joe/wordcount/input /usr/joe/wordcount/output 
-          </span>
-        
-</p>
-<p>输出是:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
-          </span>
-          
-<br>
-          
-<span class="codefrag">Bye    1</span>
-<br>
-          
-<span class="codefrag">Goodbye    1</span>
-<br>
-          
-<span class="codefrag">Hadoop    2</span>
-<br>
-          
-<span class="codefrag">Hello    2</span>
-<br>
-          
-<span class="codefrag">World    2</span>
-<br>
-        
-</p>
-<p> 应用程序能够使用<span class="codefrag">-files</span>选项来指定一个由逗号分隔的路径列表,这些路径是task的当前工作目录。使用选项<span class="codefrag">-libjars</span>可以向map和reduce的classpath中添加jar包。使用<span class="codefrag">-archives</span>选项程序可以传递档案文件做为参数,这些档案文件会被解压并且在task的当前工作目录下会创建一个指向解压生成的目录的符号链接(以压缩包的名字命名)。
-        有关命令行选项的更多细节请参考
-        <a href="commands_manual.html">Commands manual</a>。</p>
-<p>使用<span class="codefrag">-libjars</span>和<span class="codefrag">-files</span>运行<span class="codefrag">wordcount</span>例子:<br>
-        
-<span class="codefrag"> hadoop jar hadoop-examples.jar wordcount -files cachefile.txt
-        -libjars mylib.jar input output </span>
-        
-</p>
-<a name="N10504"></a><a name="%E8%A7%A3%E9%87%8A"></a>
-<h3 class="h4">解释</h3>
-<p>
-<span class="codefrag">WordCount</span>应用程序非常直截了当。</p>
-<p>
-<span class="codefrag">Mapper</span>(14-26行)中的<span class="codefrag">map</span>方法(18-25行)通过指定的
-        <span class="codefrag">TextInputFormat</span>(49行)一次处理一行。然后,它通过<span class="codefrag">StringTokenizer</span>
-        以空格为分隔符将一行切分为若干tokens,之后,输出<span class="codefrag">&lt; &lt;word&gt;, 1&gt;</span>
-        形式的键值对。</p>
-<p>
-        对于示例中的第一个输入,map输出是:<br>
-          
-<span class="codefrag">&lt; Hello, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; World, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Bye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; World, 1&gt;</span>
-<br>
-        
-</p>
-<p>
-          第二个输入,map输出是:<br>
-          
-<span class="codefrag">&lt; Hello, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hadoop, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Goodbye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hadoop, 1&gt;</span>
-<br>
-        
-</p>
-<p>关于组成一个指定作业的map数目的确定,以及如何以更精细的方式去控制这些map,我们将在教程的后续部分学习到更多的内容。</p>
-<p>
-<span class="codefrag">WordCount</span>还指定了一个<span class="codefrag">combiner</span> (46行)。因此,每次map运行之后,会对输出按照<em>key</em>进行排序,然后把输出传递给本地的combiner(按照作业的配置与Reducer一样),进行本地聚合。</p>
-<p>
-         第一个map的输出是:<br>
-          
-<span class="codefrag">&lt; Bye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hello, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; World, 2&gt;</span>
-<br>
-        
-</p>
-<p>
-          第二个map的输出是:<br>
-          
-<span class="codefrag">&lt; Goodbye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hadoop, 2&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hello, 1&gt;</span>
-<br>
-        
-</p>
-<p>
-<span class="codefrag">Reducer</span>(28-36行)中的<span class="codefrag">reduce</span>方法(29-35行)
-        仅是将每个key(本例中就是单词)出现的次数求和。
-        </p>
-<p>
-          因此这个作业的输出就是:<br>
-          
-<span class="codefrag">&lt; Bye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Goodbye, 1&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hadoop, 2&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; Hello, 2&gt;</span>
-<br>
-          
-<span class="codefrag">&lt; World, 2&gt;</span>
-<br>
-        
-</p>
-<p>代码中的<span class="codefrag">run</span>方法中指定了作业的几个方面,
-        例如:通过命令行传递过来的输入/输出路径、key/value的类型、输入/输出的格式等等<span class="codefrag">JobConf</span>中的配置信息。随后程序调用了<span class="codefrag">JobClient.runJob</span>(55行)来提交作业并且监控它的执行。</p>
-<p>我们将在本教程的后续部分学习更多的关于<span class="codefrag">JobConf</span>, <span class="codefrag">JobClient</span>,
-        <span class="codefrag">Tool</span>和其他接口及类(class)。</p>
-</div>
-    
-    
-<a name="N105B5"></a><a name="Map%2FReduce+-+%E7%94%A8%E6%88%B7%E7%95%8C%E9%9D%A2"></a>
-<h2 class="h3">Map/Reduce - 用户界面</h2>
-<div class="section">
-<p>这部分文档为用户将会面临的Map/Reduce框架中的各个环节提供了适当的细节。这应该会帮助用户更细粒度地去实现、配置和调优作业。然而,请注意每个类/接口的javadoc文档提供最全面的文档;本文只是想起到指南的作用。
-      </p>
-<p>我们会先看看<span class="codefrag">Mapper</span>和<span class="codefrag">Reducer</span>接口。应用程序通常会通过提供<span class="codefrag">map</span>和<span class="codefrag">reduce</span>方法来实现它们。
-      </p>
-<p>然后,我们会讨论其他的核心接口,其中包括:
-      <span class="codefrag">JobConf</span>,<span class="codefrag">JobClient</span>,<span class="codefrag">Partitioner</span>, 
-      <span class="codefrag">OutputCollector</span>,<span class="codefrag">Reporter</span>, 
-      <span class="codefrag">InputFormat</span>,<span class="codefrag">OutputFormat</span>等等。</p>
-<p>最后,我们将通过讨论框架中一些有用的功能点(例如:<span class="codefrag">DistributedCache</span>, 
-      <span class="codefrag">IsolationRunner</span>等等)来收尾。</p>
-<a name="N105EE"></a><a name="%E6%A0%B8%E5%BF%83%E5%8A%9F%E8%83%BD%E6%8F%8F%E8%BF%B0"></a>
-<h3 class="h4">核心功能描述</h3>
-<p>应用程序通常会通过提供<span class="codefrag">map</span>和<span class="codefrag">reduce</span>来实现
-        <span class="codefrag">Mapper</span>和<span class="codefrag">Reducer</span>接口,它们组成作业的核心。</p>
-<a name="N10603"></a><a name="Mapper"></a>
-<h4>Mapper</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Mapper.html">
-          Mapper</a>将输入键值对(key/value pair)映射到一组中间格式的键值对集合。</p>
-<p>Map是一类将输入记录集转换为中间格式记录集的独立任务。
-          这种转换的中间格式记录集不需要与输入记录集的类型一致。一个给定的输入键值对可以映射成0个或多个输出键值对。</p>
-<p>Hadoop Map/Reduce框架为每一个<span class="codefrag">InputSplit</span>产生一个map任务,而每个<span class="codefrag">InputSplit</span>是由该作业的<span class="codefrag">InputFormat</span>产生的。</p>
-<p>概括地说,对<span class="codefrag">Mapper</span>的实现者需要重写
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
-		  JobConfigurable.configure(JobConf)</a>方法,这个方法需要传递一个<span class="codefrag">JobConf</span>参数,目的是完成Mapper的初始化工作。然后,框架为这个任务的<span class="codefrag">InputSplit</span>中每个键值对调用一次
-	  <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Mapper.html#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
-		  map(WritableComparable, Writable, OutputCollector, Reporter)</a>操作。应用程序可以通过重写<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/Closeable.html#close()">Closeable.close()</a>方法来执行相应的清理工作。</p>
-<p>输出键值对不需要与输入键值对的类型一致。一个给定的输入键值对可以映射成0个或多个输出键值对。通过调用<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
-          OutputCollector.collect(WritableComparable,Writable)</a>可以收集输出的键值对。</p>
-<p>应用程序可以使用<span class="codefrag">Reporter</span>报告进度,设定应用级别的状态消息,更新<span class="codefrag">Counters</span>(计数器),或者仅是表明自己运行正常。</p>
-<p>框架随后会把与一个特定key关联的所有中间过程的值(value)分成组,然后把它们传给<span class="codefrag">Reducer</span>以产出最终的结果。用户可以通过
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
-          JobConf.setOutputKeyComparatorClass(Class)</a>来指定具体负责分组的
-          <span class="codefrag">Comparator</span>。</p>
-<p>
-<span class="codefrag">Mapper</span>的输出被排序后,就被划分给每个<span class="codefrag">Reducer</span>。分块的总数目和一个作业的reduce任务的数目是一样的。用户可以通过实现自定义的          <span class="codefrag">Partitioner</span>来控制哪个key被分配给哪个 <span class="codefrag">Reducer</span>。</p>
-<p>用户可选择通过<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setCombinerClass(java.lang.Class)">
-          JobConf.setCombinerClass(Class)</a>指定一个<span class="codefrag">combiner</span>,它负责对中间过程的输出进行本地的聚集,这会有助于降低从<span class="codefrag">Mapper</span>到
-          <span class="codefrag">Reducer</span>数据传输量。
-          </p>
-<p>这些被排好序的中间过程的输出结果保存的格式是(key-len, key, value-len, value),应用程序可以通过<span class="codefrag">JobConf</span>控制对这些中间结果是否进行压缩以及怎么压缩,使用哪种<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/compress/CompressionCodec.html">
-          CompressionCodec</a>。
-          </p>
-<a name="N1067B"></a><a name="%E9%9C%80%E8%A6%81%E5%A4%9A%E5%B0%91%E4%B8%AAMap%EF%BC%9F"></a>
-<h5>需要多少个Map?</h5>
-<p>Map的数目通常是由输入数据的大小决定的,一般就是所有输入文件的总块(block)数。</p>
-<p>Map正常的并行规模大致是每个节点(node)大约10到100个map,对于CPU
-            消耗较小的map任务可以设到300个左右。由于每个任务初始化需要一定的时间,因此,比较合理的情况是map执行的时间至少超过1分钟。</p>
-<p>这样,如果你输入10TB的数据,每个块(block)的大小是128MB,你将需要大约82,000个map来完成任务,除非使用
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
-            setNumMapTasks(int)</a>(注意:这里仅仅是对框架进行了一个提示(hint),实际决定因素见<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">这里</a>)将这个数值设置得更高。</p>
-<a name="N10694"></a><a name="Reducer"></a>
-<h4>Reducer</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reducer.html">
-          Reducer</a>将与一个key关联的一组中间数值集归约(reduce)为一个更小的数值集。</p>
-<p>用户可以通过<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
-          JobConf.setNumReduceTasks(int)</a>设定一个作业中reduce任务的数目。</p>
-<p>概括地说,对<span class="codefrag">Reducer</span>的实现者需要重写
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
-          JobConfigurable.configure(JobConf)</a>方法,这个方法需要传递一个<span class="codefrag">JobConf</span>参数,目的是完成Reducer的初始化工作。然后,框架为成组的输入数据中的每个<span class="codefrag">&lt;key, (list of values)&gt;</span>对调用一次
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
-          reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>方法。之后,应用程序可以通过重写<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/Closeable.html#close()">Closeable.close()</a>来执行相应的清理工作。</p>
-<p>
-<span class="codefrag">Reducer</span>有3个主要阶段:shuffle、sort和reduce。
-          </p>
-<a name="N106C4"></a><a name="Shuffle"></a>
-<h5>Shuffle</h5>
-<p>
-<span class="codefrag">Reducer</span>的输入就是Mapper已经排好序的输出。在这个阶段,框架通过HTTP为每个Reducer获得所有Mapper输出中与之相关的分块。</p>
-<a name="N106D0"></a><a name="Sort"></a>
-<h5>Sort</h5>
-<p>这个阶段,框架将按照key的值对<span class="codefrag">Reducer</span>的输入进行分组
-            (因为不同mapper的输出中可能会有相同的key)。</p>
-<p>Shuffle和Sort两个阶段是同时进行的;map的输出也是一边被取回一边被合并的。</p>
-<a name="N106DF"></a><a name="Secondary+Sort"></a>
-<h5>Secondary Sort</h5>
-<p>如果需要中间过程对key的分组规则和reduce前对key的分组规则不同,那么可以通过<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setOutputValueGroupingComparator(java.lang.Class)">
-              JobConf.setOutputValueGroupingComparator(Class)</a>来指定一个<span class="codefrag">Comparator</span>。再加上
-              <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
-              JobConf.setOutputKeyComparatorClass(Class)</a>可用于控制中间过程的key如何被分组,所以结合两者可以实现<em>按值的二次排序</em>。
-              </p>
-<a name="N106F8"></a><a name="Reduce"></a>
-<h5>Reduce</h5>
-<p>在这个阶段,框架为已分组的输入数据中的每个
-          <span class="codefrag">&lt;key, (list of values)&gt;</span>对调用一次
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
-          reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>方法。</p>
-<p>Reduce任务的输出通常是通过调用
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
-            OutputCollector.collect(WritableComparable, Writable)</a>写入
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/fs/FileSystem.html">
-            文件系统</a>的。</p>
-<p>应用程序可以使用<span class="codefrag">Reporter</span>报告进度,设定应用程序级别的状态消息,更新<span class="codefrag">Counters</span>(计数器),或者仅是表明自己运行正常。</p>
-<p>
-<span class="codefrag">Reducer</span>的输出是<em>没有排序的</em>。</p>
-<a name="N10725"></a><a name="%E9%9C%80%E8%A6%81%E5%A4%9A%E5%B0%91%E4%B8%AAReduce%EF%BC%9F"></a>
-<h5>需要多少个Reduce?</h5>
-<p>Reduce的数目建议是<span class="codefrag">0.95</span>或<span class="codefrag">1.75</span>乘以
-            (&lt;<em>no. of nodes</em>&gt; * 
-            <span class="codefrag">mapred.tasktracker.reduce.tasks.maximum</span>)。
-            </p>
-<p>用0.95,所有reduce可以在maps一完成时就立刻启动,开始传输map的输出结果。用1.75,速度快的节点可以在完成第一轮reduce任务后,可以开始第二轮,这样可以得到比较好的负载均衡的效果。</p>
-<p>增加reduce的数目会增加整个框架的开销,但可以改善负载均衡,降低由于执行失败带来的负面影响。</p>
-<p>上述比例因子比整体数目稍小一些是为了给框架中的推测性任务(speculative-tasks)
-            或失败的任务预留一些reduce的资源。</p>
-<a name="N10744"></a><a name="%E6%97%A0Reducer"></a>
-<h5>无Reducer</h5>
-<p>如果没有归约要进行,那么设置reduce任务的数目为<em>零</em>是合法的。</p>
-<p>这种情况下,map任务的输出会直接被写入由
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">
-		    setOutputPath(Path)</a>指定的输出路径。框架在把它们写入<span class="codefrag">FileSystem</span>之前没有对它们进行排序。
-            </p>
-<a name="N1075C"></a><a name="Partitioner"></a>
-<h4>Partitioner</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Partitioner.html">
-          Partitioner</a>用于划分键值空间(key space)。</p>
-<p>Partitioner负责控制map输出结果key的分割。Key(或者一个key子集)被用于产生分区,通常使用的是Hash函数。分区的数目与一个作业的reduce任务的数目是一样的。因此,它控制将中间过程的key(也就是这条记录)应该发送给<span class="codefrag">m</span>个reduce任务中的哪一个来进行reduce操作。
-	  </p>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/lib/HashPartitioner.html">
-          HashPartitioner</a>是默认的 <span class="codefrag">Partitioner</span>。  </p>
-<a name="N10778"></a><a name="Reporter"></a>
-<h4>Reporter</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reporter.html">
-          Reporter</a>是用于Map/Reduce应用程序报告进度,设定应用级别的状态消息,
-          更新<span class="codefrag">Counters</span>(计数器)的机制。</p>
-<p>
-<span class="codefrag">Mapper</span>和<span class="codefrag">Reducer</span>的实现可以利用<span class="codefrag">Reporter</span>
-          来报告进度,或者仅是表明自己运行正常。在那种应用程序需要花很长时间处理个别键值对的场景中,这种机制是很关键的,因为框架可能会以为这个任务超时了,从而将它强行杀死。另一个避免这种情况发生的方式是,将配置参数<span class="codefrag">mapred.task.timeout</span>设置为一个足够高的值(或者干脆设置为零,则没有超时限制了)。
-          </p>
-<p>应用程序可以用<span class="codefrag">Reporter</span>来更新<span class="codefrag">Counter</span>(计数器)。
-          </p>
-<a name="N1079F"></a><a name="OutputCollector"></a>
-<h4>OutputCollector</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/OutputCollector.html">
-          OutputCollector</a>是一个Map/Reduce框架提供的用于收集
-          <span class="codefrag">Mapper</span>或<span class="codefrag">Reducer</span>输出数据的通用机制
-          (包括中间输出结果和作业的输出结果)。</p>
-<p>Hadoop Map/Reduce框架附带了一个包含许多实用型的mapper、reducer和partitioner
-        的<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/lib/package-summary.html">类库</a>。</p>
-<a name="N107BA"></a><a name="%E4%BD%9C%E4%B8%9A%E9%85%8D%E7%BD%AE"></a>
-<h3 class="h4">作业配置</h3>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html">
-        JobConf</a>代表一个Map/Reduce作业的配置。</p>
-<p>
-<span class="codefrag">JobConf</span>是用户向Hadoop框架描述一个Map/Reduce作业如何执行的主要接口。框架会按照<span class="codefrag">JobConf</span>描述的信息忠实地去尝试完成这个作业,然而:</p>
-<ul>
-          
-<li>
-            一些参数可能会被管理者标记为<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html#FinalParams">
-            final</a>,这意味它们不能被更改。
-          </li>
-          
-<li>
-          一些作业的参数可以被直截了当地进行设置(例如:
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
-            setNumReduceTasks(int)</a>),而另一些参数则与框架或者作业的其他参数之间微妙地相互影响,并且设置起来比较复杂(例如:<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
-            setNumMapTasks(int)</a>)。
-          </li>
-        
-</ul>
-<p>通常,<span class="codefrag">JobConf</span>会指明<span class="codefrag">Mapper</span>、Combiner(如果有的话)、
-        <span class="codefrag">Partitioner</span>、<span class="codefrag">Reducer</span>、<span class="codefrag">InputFormat</span>和 
-        <span class="codefrag">OutputFormat</span>的具体实现。<span class="codefrag">JobConf</span>还能指定一组输入文件
-        (<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileInputFormat.html#setInputPaths(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path[])">setInputPaths(JobConf, Path...)</a>
-        /<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileInputFormat.html#addInputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">addInputPath(JobConf, Path)</a>)
-        和(<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileInputFormat.html#setInputPaths(org.apache.hadoop.mapred.JobConf,%20java.lang.String)">setInputPaths(JobConf, String)</a>
-        /<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileInputFormat.html#addInputPath(org.apache.hadoop.mapred.JobConf,%20java.lang.String)">addInputPaths(JobConf, String)</a>)
-        以及输出文件应该写在哪儿
-        (<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)">setOutputPath(Path)</a>)。</p>
-<p>
-<span class="codefrag">JobConf</span>可选择地对作业设置一些高级选项,例如:设置<span class="codefrag">Comparator</span>;
-        放到<span class="codefrag">DistributedCache</span>上的文件;中间结果或者作业输出结果是否需要压缩以及怎么压缩;
-        利用用户提供的脚本(<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMapDebugScript(java.lang.String)">setMapDebugScript(String)</a>/<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setReduceDebugScript(java.lang.String)">setReduceDebugScript(String)</a>)     
-        进行调试;作业是否允许<em>预防性(speculative)</em>任务的执行
-        (<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMapSpeculativeExecution(boolean)">setMapSpeculativeExecution(boolean)</a>)/(<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setReduceSpeculativeExecution(boolean)">setReduceSpeculativeExecution(boolean)</a>)
-        ;每个任务最大的尝试次数
-        (<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMaxMapAttempts(int)">setMaxMapAttempts(int)</a>/<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceAttempts(int)">setMaxReduceAttempts(int)</a>)
-        ;一个作业能容忍的任务失败的百分比
-        (<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMaxMapTaskFailuresPercent(int)">setMaxMapTaskFailuresPercent(int)</a>/<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceTaskFailuresPercent(int)">setMaxReduceTaskFailuresPercent(int)</a>) 
-        ;等等。</p>
-<p>当然,用户能使用
-        <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html#set(java.lang.String, java.lang.String)">set(String, String)</a>/<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/conf/Configuration.html#get(java.lang.String, java.lang.String)">get(String, String)</a>
-        来设置或者取得应用程序需要的任意参数。然而,<span class="codefrag">DistributedCache</span>的使用是面向大规模只读数据的。</p>
-<a name="N1084C"></a><a name="%E4%BB%BB%E5%8A%A1%E7%9A%84%E6%89%A7%E8%A1%8C%E5%92%8C%E7%8E%AF%E5%A2%83"></a>
-<h3 class="h4">任务的执行和环境</h3>
-<p>
-<span class="codefrag">TaskTracker</span>是在一个单独的jvm上以子进程的形式执行
-        <span class="codefrag">Mapper</span>/<span class="codefrag">Reducer</span>任务(Task)的。
-        </p>
-<p>子任务会继承父<span class="codefrag">TaskTracker</span>的环境。用户可以通过JobConf中的
-        <span class="codefrag">mapred.child.java.opts</span>配置参数来设定子jvm上的附加选项,例如:
-        通过<span class="codefrag">-Djava.library.path=&lt;&gt;</span> 将一个非标准路径设为运行时的链接用以搜索共享库,等等。如果<span class="codefrag">mapred.child.java.opts</span>包含一个符号<em>@taskid@</em>,
-        它会被替换成map/reduce的taskid的值。</p>
-<p>下面是一个包含多个参数和替换的例子,其中包括:记录jvm GC日志;
-        JVM JMX代理程序以无密码的方式启动,这样它就能连接到jconsole上,从而可以查看子进程的内存和线程,得到线程的dump;还把子jvm的最大堆尺寸设置为512MB,
-        并为子jvm的<span class="codefrag">java.library.path</span>添加了一个附加路径。</p>
-<p>
-          
-<span class="codefrag">&lt;property&gt;</span>
-<br>
-          &nbsp;&nbsp;<span class="codefrag">&lt;name&gt;mapred.child.java.opts&lt;/name&gt;</span>
-<br>
-          &nbsp;&nbsp;<span class="codefrag">&lt;value&gt;</span>
-<br>
-          &nbsp;&nbsp;&nbsp;&nbsp;<span class="codefrag">
-                    -Xmx512M -Djava.library.path=/home/mycompany/lib
-                    -verbose:gc -Xloggc:/tmp/@taskid@.gc</span>
-<br>
-          &nbsp;&nbsp;&nbsp;&nbsp;<span class="codefrag">
-                    -Dcom.sun.management.jmxremote.authenticate=false 
-                    -Dcom.sun.management.jmxremote.ssl=false</span>
-<br>
-          &nbsp;&nbsp;<span class="codefrag">&lt;/value&gt;</span>
-<br>
-          
-<span class="codefrag">&lt;/property&gt;</span>
-        
-</p>
-<p>用户或管理员也可以使用<span class="codefrag">mapred.child.ulimit</span>设定运行的子任务的最大虚拟内存。<span class="codefrag">mapred.child.ulimit</span>的值以(KB)为单位,并且必须大于或等于-Xmx参数传给JavaVM的值,否则VM会无法启动。</p>
-<p>注意:<span class="codefrag">mapred.child.java.opts</span>只用于设置task tracker启动的子任务。为守护进程设置内存选项请查看
-        <a href="cluster_setup.html#%E9%85%8D%E7%BD%AEHadoop%E5%AE%88%E6%8A%A4%E8%BF%9B%E7%A8%8B%E7%9A%84%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83">
-        cluster_setup.html </a>
-</p>
-<p>
-<span class="codefrag"> ${mapred.local.dir}/taskTracker/</span>是task tracker的本地目录,
-        用于创建本地缓存和job。它可以指定多个目录(跨越多个磁盘),文件会半随机的保存到本地路径下的某个目录。当job启动时,task tracker根据配置文档创建本地job目录,目录结构如以下所示:</p>
-<ul>
-	
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/archive/</span> :分布式缓存。这个目录保存本地的分布式缓存。因此本地分布式缓存是在所有task和job间共享的。</li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/</span> :
-        本地job目录。
-        <ul>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/work/</span>:
-        job指定的共享目录。各个任务可以使用这个空间做为暂存空间,用于它们之间共享文件。这个目录通过<span class="codefrag">job.local.dir </span>参数暴露给用户。这个路径可以通过API <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#getJobLocalDir()">
-        JobConf.getJobLocalDir()</a>来访问。它也可以被做为系统属性获得。因此,用户(比如运行streaming)可以调用<span class="codefrag">System.getProperty("job.local.dir")</span>获得该目录。
-        </li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/jars/</span>:
-        存放jar包的路径,用于存放作业的jar文件和展开的jar。<span class="codefrag">job.jar</span>是应用程序的jar文件,它会被自动分发到各台机器,在task启动前会被自动展开。使用api
-        <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#getJar()">
-        JobConf.getJar() </a>函数可以得到job.jar的位置。使用JobConf.getJar().getParent()可以访问存放展开的jar包的目录。
-        </li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/job.xml</span>:
-        一个job.xml文件,本地的通用的作业配置文件。
-        </li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid</span>:
-        每个任务有一个目录<span class="codefrag">task-id</span>,它里面有如下的目录结构:
-	<ul>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/job.xml</span>:
-       一个job.xml文件,本地化的任务作业配置文件。任务本地化是指为该task设定特定的属性值。这些值会在下面具体说明。
-	</li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/output</span>
-        一个存放中间过程的输出文件的目录。它保存了由framwork产生的临时map reduce数据,比如map的输出文件等。</li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work</span>:
-        task的当前工作目录。</li>
-        
-<li>
-<span class="codefrag">${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work/tmp</span>:
-        task的临时目录。(用户可以设定属性<span class="codefrag">mapred.child.tmp</span>
-        来为map和reduce task设定临时目录。缺省值是<span class="codefrag">./tmp</span>。如果这个值不是绝对路径,
-        它会把task的工作路径加到该路径前面作为task的临时文件路径。如果这个值是绝对路径则直接使用这个值。
-        如果指定的目录不存在,会自动创建该目录。之后,按照选项
-        <span class="codefrag">-Djava.io.tmpdir='临时文件的绝对路径'</span>执行java子任务。
-        pipes和streaming的临时文件路径是通过环境变量<span class="codefrag">TMPDIR='the absolute path of the tmp dir'</span>设定的)。
-        如果<span class="codefrag">mapred.child.tmp</span>有<span class="codefrag">./tmp</span>值,这个目录会被创建。</li>
-        
-</ul>
-        
-</li>
-        
-</ul>
-        
-</li>
-        
-</ul>
-<p>下面的属性是为每个task执行时使用的本地参数,它们保存在本地化的任务作业配置文件里:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-<th colspan="1" rowspan="1">名称</th><th colspan="1" rowspan="1">类型</th><th colspan="1" rowspan="1">描述</th>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.job.id</td><td colspan="1" rowspan="1">String</td><td colspan="1" rowspan="1">job id</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.jar</td><td colspan="1" rowspan="1">String</td>
-              <td colspan="1" rowspan="1">job目录下job.jar的位置</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">job.local.dir</td><td colspan="1" rowspan="1"> String</td>
-              <td colspan="1" rowspan="1">job指定的共享存储空间</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.tip.id</td><td colspan="1" rowspan="1"> String</td>
-              <td colspan="1" rowspan="1"> task id</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.task.id</td><td colspan="1" rowspan="1"> String</td>
-              <td colspan="1" rowspan="1"> task尝试id</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.task.is.map</td><td colspan="1" rowspan="1"> boolean </td>
-              <td colspan="1" rowspan="1">是否是map task</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.task.partition</td><td colspan="1" rowspan="1"> int </td>
-              <td colspan="1" rowspan="1">task在job中的id</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">map.input.file</td><td colspan="1" rowspan="1"> String</td>
-              <td colspan="1" rowspan="1"> map读取的文件名</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">map.input.start</td><td colspan="1" rowspan="1"> long</td>
-              <td colspan="1" rowspan="1"> map输入的数据块的起始位置偏移</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">map.input.length </td><td colspan="1" rowspan="1">long </td>
-              <td colspan="1" rowspan="1">map输入的数据块的字节数</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">mapred.work.output.dir</td><td colspan="1" rowspan="1"> String </td>
-              <td colspan="1" rowspan="1">task临时输出目录</td>
-</tr>
-        
-</table>
-<p>task的标准输出和错误输出流会被读到TaskTracker中,并且记录到
-        <span class="codefrag">${HADOOP_LOG_DIR}/userlogs</span>
-</p>
-<p>
-<a href="#DistributedCache">DistributedCache</a>
-        可用于map或reduce task中分发jar包和本地库。子jvm总是把
-        <em>当前工作目录</em> 加到
-        <span class="codefrag">java.library.path</span> 和 <span class="codefrag">LD_LIBRARY_PATH</span>。
-        因此,可以通过
-        <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#loadLibrary(java.lang.String)">
-        System.loadLibrary</a>或 
-        <a href="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/System.html#load(java.lang.String)">
-        System.load</a>装载缓存的库。有关使用分布式缓存加载共享库的细节请参考
-        <a href="native_libraries.html#%E4%BD%BF%E7%94%A8DistributedCache+%E5%8A%A0%E8%BD%BD%E6%9C%AC%E5%9C%B0%E5%BA%93">
-        native_libraries.html</a>
-</p>
-<a name="N109E3"></a><a name="%E4%BD%9C%E4%B8%9A%E7%9A%84%E6%8F%90%E4%BA%A4%E4%B8%8E%E7%9B%91%E6%8E%A7"></a>
-<h3 class="h4">作业的提交与监控</h3>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobClient.html">
-        JobClient</a>是用户提交的作业与<span class="codefrag">JobTracker</span>交互的主要接口。
-        </p>
-<p>
-<span class="codefrag">JobClient</span> 提供提交作业,追踪进程,访问子任务的日志记录,获得Map/Reduce集群状态信息等功能。
-        </p>
-<p>作业提交过程包括: </p>
-<ol>
-          
-<li>检查作业输入输出样式细节</li>
-          
-<li>为作业计算<span class="codefrag">InputSplit</span>值。</li>
-          
-<li>
-           如果需要的话,为作业的<span class="codefrag">DistributedCache</span>建立必须的统计信息。
-          </li>
-          
-<li>
-            拷贝作业的jar包和配置文件到<span class="codefrag">FileSystem</span>上的Map/Reduce系统目录下。
-          </li>
-          
-<li>
-            提交作业到<span class="codefrag">JobTracker</span>并且监控它的状态。
-          </li>
-        
-</ol>
-<p>作业的历史文件记录到指定目录的"_logs/history/"子目录下。这个指定目录由<span class="codefrag">hadoop.job.history.user.location</span>设定,默认是作业输出的目录。因此默认情况下,文件会存放在mapred.output.dir/_logs/history目录下。用户可以设置<span class="codefrag">hadoop.job.history.user.location</span>为<span class="codefrag">none</span>来停止日志记录。
-        </p>
-<p> 用户使用下面的命令可以看到在指定目录下的历史日志记录的摘要。
-        <br>
-        
-<span class="codefrag">$ bin/hadoop job -history output-dir</span>
-<br> 
-        这个命令会打印出作业的细节,以及失败的和被杀死的任务细节。<br>
-        要查看有关作业的更多细节例如成功的任务、每个任务尝试的次数(task attempt)等,可以使用下面的命令
-        <br>
-       
-<span class="codefrag">$ bin/hadoop job -history all output-dir</span>
-<br>
-</p>
-<p>用户可以使用 
-        <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/OutputLogFilter.html">OutputLogFilter</a>
-        从输出目录列表中筛选日志文件。</p>
-<p>一般情况,用户利用<span class="codefrag">JobConf</span>创建应用程序并配置作业属性,
-        然后用
-        <span class="codefrag">JobClient</span> 提交作业并监视它的进程。</p>
-<a name="N10A44"></a><a name="%E4%BD%9C%E4%B8%9A%E7%9A%84%E6%8E%A7%E5%88%B6"></a>
-<h4>作业的控制</h4>
-<p>有时候,用一个单独的Map/Reduce作业并不能完成一个复杂的任务,用户也许要链接多个Map/Reduce作业才行。这是容易实现的,因为作业通常输出到分布式文件系统上的,所以可以把这个作业的输出作为下一个作业的输入实现串联。
-          </p>
-<p>然而,这也意味着,确保每一作业完成(成功或失败)的责任就直接落在了客户身上。在这种情况下,可以用的控制作业的选项有:
-          </p>
-<ul>
-            
-<li>
-              
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobClient.html#runJob(org.apache.hadoop.mapred.JobConf)">
-              runJob(JobConf)</a>:提交作业,仅当作业完成时返回。
-            </li>
-            
-<li>
-              
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobClient.html#submitJob(org.apache.hadoop.mapred.JobConf)">
-              submitJob(JobConf)</a>:只提交作业,之后需要你轮询它返回的
-              <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/RunningJob.html">
-              RunningJob</a>句柄的状态,并根据情况调度。
-            </li>
-            
-<li>
-              
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setJobEndNotificationURI(java.lang.String)">
-              JobConf.setJobEndNotificationURI(String)</a>:设置一个作业完成通知,可避免轮询。
-           
-            </li>
-          
-</ul>
-<a name="N10A6E"></a><a name="%E4%BD%9C%E4%B8%9A%E7%9A%84%E8%BE%93%E5%85%A5"></a>
-<h3 class="h4">作业的输入</h3>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/InputFormat.html">
-        InputFormat</a> 为Map/Reduce作业描述输入的细节规范。
-        </p>
-<p>Map/Reduce框架根据作业的<span class="codefrag">InputFormat</span>来: 
-        </p>
-<ol>
-          
-<li>检查作业输入的有效性。</li>
-          
-<li>
-            把输入文件切分成多个逻辑<span class="codefrag">InputSplit</span>实例,
-            并把每一实例分别分发给一个
-            <span class="codefrag">Mapper</span>。
-          </li>
-          
-<li>
-            提供<span class="codefrag">RecordReader</span>的实现,这个RecordReader从逻辑<span class="codefrag">InputSplit</span>中获得输入记录,
-		这些记录将由<span class="codefrag">Mapper</span>处理。 
-          </li>
-        
-</ol>
-<p>基于文件的<span class="codefrag">InputFormat</span>实现(通常是
-	<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileInputFormat.html">
-        FileInputFormat</a>的子类)
-	默认行为是按照输入文件的字节大小,把输入数据切分成逻辑分块(<em>logical</em> 
-        <span class="codefrag">InputSplit</span> )。	
-        其中输入文件所在的<span class="codefrag">FileSystem</span>的数据块尺寸是分块大小的上限。下限可以设置<span class="codefrag">mapred.min.split.size</span>
-	的值。</p>
-<p>考虑到边界情况,对于很多应用程序来说,很明显按照文件大小进行逻辑分割是不能满足需求的。
-        在这种情况下,应用程序需要实现一个<span class="codefrag">RecordReader</span>来处理记录的边界并为每个任务提供一个逻辑分块的面向记录的视图。
-        </p>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/TextInputFormat.html">
-        TextInputFormat</a> 是默认的<span class="codefrag">InputFormat</span>。</p>
-<p>如果一个作业的<span class="codefrag">Inputformat</span>是<span class="codefrag">TextInputFormat</span>,
-        并且框架检测到输入文件的后缀是<em>.gz</em>和<em>.lzo</em>,就会使用对应的<span class="codefrag">CompressionCodec</span>自动解压缩这些文件。
-        但是需要注意,上述带后缀的压缩文件不会被切分,并且整个压缩文件会分给一个mapper来处理。
-        </p>
-<a name="N10AD2"></a><a name="InputSplit"></a>
-<h4>InputSplit</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/InputSplit.html">
-          InputSplit</a> 是一个单独的<span class="codefrag">Mapper</span>要处理的数据块。</p>
-<p>一般的<span class="codefrag">InputSplit</span> 是字节样式输入,然后由<span class="codefrag">RecordReader</span>处理并转化成记录样式。
-          </p>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileSplit.html">
-			  FileSplit</a> 是默认的<span class="codefrag">InputSplit</span>。 它把
-          <span class="codefrag">map.input.file</span> 设定为输入文件的路径,输入文件是逻辑分块文件。
-          </p>
-<a name="N10AF7"></a><a name="RecordReader"></a>
-<h4>RecordReader</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/RecordReader.html">
-          RecordReader</a> 从<span class="codefrag">InputSlit</span>读入<span class="codefrag">&lt;key, value&gt;</span>对。 
-          </p>
-<p>一般的,<span class="codefrag">RecordReader</span> 把由<span class="codefrag">InputSplit</span>
-	  提供的字节样式的输入文件,转化成由<span class="codefrag">Mapper</span>处理的记录样式的文件。
-          因此<span class="codefrag">RecordReader</span>负责处理记录的边界情况和把数据表示成keys/values对形式。
-          </p>
-<a name="N10B1A"></a><a name="%E4%BD%9C%E4%B8%9A%E7%9A%84%E8%BE%93%E5%87%BA"></a>
-<h3 class="h4">作业的输出</h3>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/OutputFormat.html">
-        OutputFormat</a> 描述Map/Reduce作业的输出样式。
-        </p>
-<p>Map/Reduce框架根据作业的<span class="codefrag">OutputFormat</span>来:
-        </p>
-<ol>
-          
-<li>
-            检验作业的输出,例如检查输出路径是否已经存在。
-          </li>
-          
-<li>
-            提供一个<span class="codefrag">RecordWriter</span>的实现,用来输出作业结果。
-            输出文件保存在<span class="codefrag">FileSystem</span>上。
-          </li>
-        
-</ol>
-<p>
-<span class="codefrag">TextOutputFormat</span>是默认的
-        <span class="codefrag">OutputFormat</span>。</p>
-<a name="N10B43"></a><a name="%E4%BB%BB%E5%8A%A1%E7%9A%84Side-Effect+File"></a>
-<h4>任务的Side-Effect File</h4>
-<p>在一些应用程序中,子任务需要产生一些side-file,这些文件与作业实际输出结果的文件不同。
-          </p>
-<p>在这种情况下,同一个<span class="codefrag">Mapper</span>或者<span class="codefrag">Reducer</span>的两个实例(比如预防性任务)同时打开或者写
-	  <span class="codefrag">FileSystem</span>上的同一文件就会产生冲突。因此应用程序在写文件的时候需要为每次任务尝试(不仅仅是每次任务,每个任务可以尝试执行很多次)选取一个独一无二的文件名(使用attemptid,例如<span class="codefrag">task_200709221812_0001_m_000000_0</span>)。 
-          </p>
-<p>为了避免冲突,Map/Reduce框架为每次尝试执行任务都建立和维护一个特殊的
-          <span class="codefrag">${mapred.output.dir}/_temporary/_${taskid}</span>子目录,这个目录位于本次尝试执行任务输出结果所在的<span class="codefrag">FileSystem</span>上,可以通过
-          <span class="codefrag">${mapred.work.output.dir}</span>来访问这个子目录。
-          对于成功完成的任务尝试,只有<span class="codefrag">${mapred.output.dir}/_temporary/_${taskid}</span>下的文件会<em>移动</em>到<span class="codefrag">${mapred.output.dir}</span>。当然,框架会丢弃那些失败的任务尝试的子目录。这种处理过程对于应用程序来说是完全透明的。</p>
-<p>在任务执行期间,应用程序在写文件时可以利用这个特性,比如
-	  通过<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)">
-          FileOutputFormat.getWorkOutputPath()</a>获得<span class="codefrag">${mapred.work.output.dir}</span>目录,
-	  并在其下创建任意任务执行时所需的side-file,框架在任务尝试成功时会马上移动这些文件,因此不需要在程序内为每次任务尝试选取一个独一无二的名字。
-          </p>
-<p>注意:在每次任务尝试执行期间,<span class="codefrag">${mapred.work.output.dir}</span> 的值实际上是
-          <span class="codefrag">${mapred.output.dir}/_temporary/_{$taskid}</span>,这个值是Map/Reduce框架创建的。
-          所以使用这个特性的方法是,在<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)">
-          FileOutputFormat.getWorkOutputPath() </a>
-	  路径下创建side-file即可。
-	  </p>
-<p>对于只使用map不使用reduce的作业,这个结论也成立。这种情况下,map的输出结果直接生成到HDFS上。
-           </p>
-<a name="N10B8B"></a><a name="RecordWriter"></a>
-<h4>RecordWriter</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/RecordWriter.html">
-          RecordWriter</a> 生成<span class="codefrag">&lt;key, value&gt;</span> 
-          对到输出文件。</p>
-<p>RecordWriter的实现把作业的输出结果写到
-          <span class="codefrag">FileSystem</span>。</p>
-<a name="N10BA2"></a><a name="%E5%85%B6%E4%BB%96%E6%9C%89%E7%94%A8%E7%9A%84%E7%89%B9%E6%80%A7"></a>
-<h3 class="h4">其他有用的特性</h3>
-<a name="N10BA8"></a><a name="Counters"></a>
-<h4>Counters</h4>
-<p>
-<span class="codefrag">Counters</span> 是多个由Map/Reduce框架或者应用程序定义的全局计数器。
-          每一个<span class="codefrag">Counter</span>可以是任何一种 
-          <span class="codefrag">Enum</span>类型。同一特定<span class="codefrag">Enum</span>类型的Counter可以汇集到一个组,其类型为<span class="codefrag">Counters.Group</span>。</p>
-<p>应用程序可以定义任意(Enum类型)的<span class="codefrag">Counters</span>并且可以通过 <span class="codefrag">map</span> 或者 
-          <span class="codefrag">reduce</span>方法中的
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reporter.html#incrCounter(java.lang.Enum, long)">
-          Reporter.incrCounter(Enum, long)</a>或者 
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/Reporter.html#incrCounter(java.lang.String, java.lang.String, long amount)">
-          Reporter.incrCounter(String, String, long)</a>
-          更新。之后框架会汇总这些全局counters。 
-          </p>
-<a name="N10BD4"></a><a name="DistributedCache"></a>
-<h4>DistributedCache</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html">
-          DistributedCache</a> 可将具体应用相关的、大尺寸的、只读的文件有效地分布放置。
-          </p>
-<p>
-<span class="codefrag">DistributedCache</span> 是Map/Reduce框架提供的功能,能够缓存应用程序所需的文件
-		(包括文本,档案文件,jar文件等)。
-          </p>
-<p>应用程序在<span class="codefrag">JobConf</span>中通过url(hdfs://)指定需要被缓存的文件。
-	  <span class="codefrag">DistributedCache</span>假定由hdfs://格式url指定的文件已经在 
-          <span class="codefrag">FileSystem</span>上了。</p>
-<p>Map-Redcue框架在作业所有任务执行之前会把必要的文件拷贝到slave节点上。
-          它运行高效是因为每个作业的文件只拷贝一次并且为那些没有文档的slave节点缓存文档。      
-          </p>
-<p>
-<span class="codefrag">DistributedCache</span> 根据缓存文档修改的时间戳进行追踪。
-	  在作业执行期间,当前应用程序或者外部程序不能修改缓存文件。 
-          </p>
-<p>
-<span class="codefrag">distributedCache</span>可以分发简单的只读数据或文本文件,也可以分发复杂类型的文件例如归档文件和jar文件。归档文件(zip,tar,tgz和tar.gz文件)在slave节点上会被<em>解档(un-archived)</em>。
-          这些文件可以设置<em>执行权限</em>。</p>
-<p>用户可以通过设置<span class="codefrag">mapred.cache.{files|archives}</span>来分发文件。
-          如果要分发多个文件,可以使用逗号分隔文件所在路径。也可以利用API来设置该属性:
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#addCacheFile(java.net.URI,%20org.apache.hadoop.conf.Configuration)">
-          DistributedCache.addCacheFile(URI,conf)</a>/
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#addCacheArchive(java.net.URI,%20org.apache.hadoop.conf.Configuration)">
-          DistributedCache.addCacheArchive(URI,conf)</a> and
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#setCacheFiles(java.net.URI[],%20org.apache.hadoop.conf.Configuration)">
-          DistributedCache.setCacheFiles(URIs,conf)</a>/
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#setCacheArchives(java.net.URI[],%20org.apache.hadoop.conf.Configuration)">
-          DistributedCache.setCacheArchives(URIs,conf)</a>
-          其中URI的形式是
-          <span class="codefrag">hdfs://host:port/absolute-path#link-name</span>
-          在Streaming程序中,可以通过命令行选项
-          <span class="codefrag">-cacheFile/-cacheArchive</span>
-          分发文件。</p>
-<p>
-	  用户可以通过<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
-          DistributedCache.createSymlink(Configuration)</a>方法让<span class="codefrag">DistributedCache</span>
-        在<em>当前工作目录</em>下创建到缓存文件的符号链接。
-	或者通过设置配置文件属性<span class="codefrag">mapred.create.symlink</span>为<span class="codefrag">yes</span>。
-	分布式缓存会截取URI的片段作为链接的名字。
-	例如,URI是 <span class="codefrag">hdfs://namenode:port/lib.so.1#lib.so</span>,
-	则在task当前工作目录会有名为<span class="codefrag">lib.so</span>的链接,
-        它会链接分布式缓存中的<span class="codefrag">lib.so.1</span>。
-        </p>
-<p>
-<span class="codefrag">DistributedCache</span>可在map/reduce任务中作为
-        一种基础软件分发机制使用。它可以被用于分发jar包和本地库(native libraries)。
-        <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#addArchiveToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)">
-        DistributedCache.addArchiveToClassPath(Path, Configuration)</a>和
-        <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/filecache/DistributedCache.html#addFileToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)">
-        DistributedCache.addFileToClassPath(Path, Configuration)</a> API能够被用于
-        缓存文件和jar包,并把它们加入子jvm的<em>classpath</em>。也可以通过设置配置文档里的属性
-        <span class="codefrag">mapred.job.classpath.{files|archives}</span>达到相同的效果。缓存文件可用于分发和装载本地库。
-        </p>
-<a name="N10C50"></a><a name="Tool"></a>
-<h4>Tool</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/util/Tool.html">Tool</a> 
-          接口支持处理常用的Hadoop命令行选项。
-          </p>
-<p>
-<span class="codefrag">Tool</span> 是Map/Reduce工具或应用的标准。应用程序应只处理其定制参数,
-          要把标准命令行选项通过
-		<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/util/ToolRunner.html#run(org.apache.hadoop.util.Tool, java.lang.String[])"> ToolRunner.run(Tool, String[])</a> 
-		委托给
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/util/GenericOptionsParser.html">
-          GenericOptionsParser</a>处理。
-          </p>
-<p>
-            Hadoop命令行的常用选项有:<br>
-            
-<span class="codefrag">
-              -conf &lt;configuration file&gt;
-            </span>
-            
-<br>
-            
-<span class="codefrag">
-              -D &lt;property=value&gt;
-            </span>
-            
-<br>
-            
-<span class="codefrag">
-              -fs &lt;local|namenode:port&gt;
-            </span>
-            
-<br>
-            
-<span class="codefrag">
-              -jt &lt;local|jobtracker:port&gt;
-            </span>
-          
-</p>
-<a name="N10C81"></a><a name="IsolationRunner"></a>
-<h4>IsolationRunner</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/IsolationRunner.html">
-          IsolationRunner</a> 是帮助调试Map/Reduce程序的工具。</p>
-<p>使用<span class="codefrag">IsolationRunner</span>的方法是,首先设置
-          <span class="codefrag">keep.failed.tasks.files</span>属性为<span class="codefrag">true</span> 
-          (同时参考<span class="codefrag">keep.tasks.files.pattern</span>)。</p>
-<p>
-            然后,登录到任务运行失败的节点上,进入
-            <span class="codefrag">TaskTracker</span>的本地路径运行
-            <span class="codefrag">IsolationRunner</span>:<br>
-            
-<span class="codefrag">$ cd &lt;local path&gt;/taskTracker/${taskid}/work</span>
-<br>
-            
-<span class="codefrag">
-              $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
-            </span>
-          
-</p>
-<p>
-<span class="codefrag">IsolationRunner</span>会把失败的任务放在单独的一个能够调试的jvm上运行,并且采用和之前完全一样的输入数据。
-		</p>
-<a name="N10CB4"></a><a name="Profiling"></a>
-<h4>Profiling</h4>
-<p>Profiling是一个工具,它使用内置的java profiler工具进行分析获得(2-3个)map或reduce样例运行分析报告。</p>
-<p>用户可以通过设置属性<span class="codefrag">mapred.task.profile</span>指定系统是否采集profiler信息。
-          利用api<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setProfileEnabled(boolean)">
-          JobConf.setProfileEnabled(boolean)可以修改属性值</a>。如果设为<span class="codefrag">true</span>,
-          则开启profiling功能。profiler信息保存在用户日志目录下。缺省情况,profiling功能是关闭的。</p>
-<p>如果用户设定使用profiling功能,可以使用配置文档里的属性
-          <span class="codefrag">mapred.task.profile.{maps|reduces}</span>
-          设置要profile map/reduce task的范围。设置该属性值的api是
-           <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setProfileTaskRange(boolean,%20java.lang.String)">
-          JobConf.setProfileTaskRange(boolean,String)</a>。
-          范围的缺省值是<span class="codefrag">0-2</span>。</p>
-<p>用户可以通过设定配置文档里的属性<span class="codefrag">mapred.task.profile.params</span>
-          来指定profiler配置参数。修改属性要使用api
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setProfileParams(java.lang.String)">
-          JobConf.setProfileParams(String)</a>。当运行task时,如果字符串包含<span class="codefrag">%s</span>。
-          它会被替换成profileing的输出文件名。这些参数会在命令行里传递到子JVM中。缺省的profiling
-          参数是
-          <span class="codefrag">-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s</span>。
-          </p>
-<a name="N10CE8"></a><a name="%E8%B0%83%E8%AF%95"></a>
-<h4>调试</h4>
-<p>Map/Reduce框架能够运行用户提供的用于调试的脚本程序。 
-          当map/reduce任务失败时,用户可以通过运行脚本在任务日志(例如任务的标准输出、标准错误、系统日志以及作业配置文件)上做后续处理工作。用户提供的调试脚本程序的标准输出和标准错误会输出为诊断文件。如果需要的话这些输出结果也可以打印在用户界面上。</p>
-<p> 在接下来的章节,我们讨论如何与作业一起提交调试脚本。为了提交调试脚本,
-          首先要把这个脚本分发出去,而且还要在配置文件里设置。
-     	  </p>
-<a name="N10CF4"></a><a name="%E5%A6%82%E4%BD%95%E5%88%86%E5%8F%91%E8%84%9A%E6%9C%AC%E6%96%87%E4%BB%B6%EF%BC%9A"></a>
-<h5> 如何分发脚本文件:</h5>
-<p>用户要用
-          <a href="mapred_tutorial.html#DistributedCache">DistributedCache</a>
-          机制来<em>分发</em>和<em>链接</em>脚本文件</p>
-<a name="N10D08"></a><a name="%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4%E8%84%9A%E6%9C%AC%EF%BC%9A"></a>
-<h5> 如何提交脚本:</h5>
-<p> 一个快速提交调试脚本的方法是分别为需要调试的map任务和reduce任务设置
-		"mapred.map.task.debug.script" 和 "mapred.reduce.task.debug.script"
-	 属性的值。这些属性也可以通过
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMapDebugScript(java.lang.String)">
-          JobConf.setMapDebugScript(String) </a>和 
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setReduceDebugScript(java.lang.String)">
-          JobConf.setReduceDebugScript(String) </a>API来设置。对于streaming,
-          可以分别为需要调试的map任务和reduce任务使用命令行选项-mapdebug 和 -reducedegug来提交调试脚本。
-          </p>
-<p>脚本的参数是任务的标准输出、标准错误、系统日志以及作业配置文件。在运行map/reduce失败的节点上运行调试命令是:
-		 <br>
-          
-<span class="codefrag"> $script $stdout $stderr $syslog $jobconf </span> 
-</p>
-<p> Pipes 程序根据第五个参数获得c++程序名。
-          因此调试pipes程序的命令是<br> 
-          
-<span class="codefrag">$script $stdout $stderr $syslog $jobconf $program </span>  
-          
-</p>
-<a name="N10D2A"></a><a name="%E9%BB%98%E8%AE%A4%E8%A1%8C%E4%B8%BA"></a>
-<h5> 默认行为 </h5>
-<p> 对于pipes,默认的脚本会用gdb处理core dump,
-          打印 stack trace并且给出正在运行线程的信息。</p>
-<a name="N10D35"></a><a name="JobControl"></a>
-<h4>JobControl</h4>
-<p>
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
-          JobControl</a>是一个工具,它封装了一组Map/Reduce作业以及他们之间的依赖关系。
-	  </p>
-<a name="N10D42"></a><a name="%E6%95%B0%E6%8D%AE%E5%8E%8B%E7%BC%A9"></a>
-<h4>数据压缩</h4>
-<p>Hadoop Map/Reduce框架为应用程序的写入文件操作提供压缩工具,这些工具可以为map输出的中间数据和作业最终输出数据(例如reduce的输出)提供支持。它还附带了一些
-          <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/compress/CompressionCodec.html">
-          CompressionCodec</a>的实现,比如实现了
-          <a href="http://www.zlib.net/">zlib</a>和<a href="http://www.oberhumer.com/opensource/lzo/">lzo</a>压缩算法。
-           Hadoop同样支持<a href="http://www.gzip.org/">gzip</a>文件格式。 
-          </p>
-<p>考虑到性能问题(zlib)以及Java类库的缺失(lzo)等因素,Hadoop也为上述压缩解压算法提供本地库的实现。更多的细节请参考
-          <a href="native_libraries.html">这里</a>。</p>
-<a name="N10D62"></a><a name="%E4%B8%AD%E9%97%B4%E8%BE%93%E5%87%BA"></a>
-<h5>中间输出</h5>
-<p>应用程序可以通过
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setCompressMapOutput(boolean)">
-            JobConf.setCompressMapOutput(boolean)</a>api控制map输出的中间结果,并且可以通过
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/JobConf.html#setMapOutputCompressorClass(java.lang.Class)">
-            JobConf.setMapOutputCompressorClass(Class)</a>api指定
-            <span class="codefrag">CompressionCodec</span>。
-        </p>
-<a name="N10D77"></a><a name="%E4%BD%9C%E4%B8%9A%E8%BE%93%E5%87%BA"></a>
-<h5>作业输出</h5>
-<p>应用程序可以通过
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)">
-            FileOutputFormat.setCompressOutput(JobConf, boolean)</a>
-            api控制输出是否需要压缩并且可以使用 
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/FileOutputFormat.html#setOutputCompressorClass(org.apache.hadoop.mapred.JobConf,%20java.lang.Class)">
-            FileOutputFormat.setOutputCompressorClass(JobConf, Class)</a>api指定<span class="codefrag">CompressionCodec</span>。</p>
-<p>如果作业输出要保存成 
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/SequenceFileOutputFormat.html">
-            SequenceFileOutputFormat</a>格式,需要使用
-            <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/mapred/SequenceFileOutputFormat.html#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.io.SequenceFile.CompressionType)">
-            SequenceFileOutputFormat.setOutputCompressionType(JobConf, 
-            SequenceFile.CompressionType)</a>api,来设定
-            <span class="codefrag">SequenceFile.CompressionType</span> (i.e. <span class="codefrag">RECORD</span> / 
-            <span class="codefrag">BLOCK</span> - 默认是<span class="codefrag">RECORD</span>)。
-            </p>
-</div>
-
-    
-<a name="N10DA6"></a><a name="%E4%BE%8B%E5%AD%90%EF%BC%9AWordCount+v2.0"></a>
-<h2 class="h3">例子:WordCount v2.0</h2>
-<div class="section">
-<p>这里是一个更全面的<span class="codefrag">WordCount</span>例子,它使用了我们已经讨论过的很多Map/Reduce框架提供的功能。 
-      </p>
-<p>运行这个例子需要HDFS的某些功能,特别是
-      <span class="codefrag">DistributedCache</span>相关功能。因此这个例子只能运行在
-      <a href="quickstart.html#SingleNodeSetup">伪分布式</a> 或者
-      <a href="quickstart.html#Fully-Distributed+Operation">完全分布式模式</a>的 
-      Hadoop上。</p>
-<a name="N10DC0"></a><a name="%E6%BA%90%E4%BB%A3%E7%A0%81-N10DC0"></a>
-<h3 class="h4">源代码</h3>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-          
-<tr>
-            
-<th colspan="1" rowspan="1"></th>
-            <th colspan="1" rowspan="1">WordCount.java</th>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">1.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">package org.myorg;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">2.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">3.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import java.io.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">4.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import java.util.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">5.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">6.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">7.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.filecache.DistributedCache;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">8.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.conf.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">9.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.io.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">10.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">11.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">import org.apache.hadoop.util.*;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">12.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">13.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">public class WordCount extends Configured implements Tool {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">14.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">15.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static class Map extends MapReduceBase 
-                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">16.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">17.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                static enum Counters { INPUT_WORDS }
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">18.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">19.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                private final static IntWritable one = new IntWritable(1);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">20.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private Text word = new Text();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">21.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">22.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private boolean caseSensitive = true;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">23.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private Set&lt;String&gt; patternsToSkip = new HashSet&lt;String&gt;();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">24.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">25.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private long numRecords = 0;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">26.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private String inputFile;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">27.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">28.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">public void configure(JobConf job) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">29.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">30.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">inputFile = job.get("map.input.file");</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">31.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">32.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">if (job.getBoolean("wordcount.skip.patterns", false)) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">33.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">Path[] patternsFiles = new Path[0];</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">34.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">try {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">35.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                patternsFiles = DistributedCache.getLocalCacheFiles(job);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">36.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">} catch (IOException ioe) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">37.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                System.err.println("Caught exception while getting cached files: " 
-                + StringUtils.stringifyException(ioe));
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">38.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">39.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">for (Path patternsFile : patternsFiles) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">40.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">parseSkipFile(patternsFile);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">41.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">42.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">43.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">44.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">45.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">private void parseSkipFile(Path patternsFile) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">46.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">try {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">47.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                BufferedReader fis = 
-                  new BufferedReader(new FileReader(patternsFile.toString()));
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">48.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">String pattern = null;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">49.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">while ((pattern = fis.readLine()) != null) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">50.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">patternsToSkip.add(pattern);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">51.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">52.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">} catch (IOException ioe) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">53.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                System.err.println("Caught exception while parsing the cached file '" +
-                                   patternsFile + "' : " + 
-                                   StringUtils.stringifyException(ioe));
-                
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">54.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">55.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">56.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">57.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                public void map(LongWritable key, Text value, 
-                OutputCollector&lt;Text, IntWritable&gt; output, 
-                Reporter reporter) throws IOException {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">58.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                String line = 
-                  (caseSensitive) ? value.toString() : 
-                                    value.toString().toLowerCase();
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">59.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">60.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">for (String pattern : patternsToSkip) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">61.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">line = line.replaceAll(pattern, "");</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">62.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">63.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">64.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">65.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">66.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">word.set(tokenizer.nextToken());</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">67.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">output.collect(word, one);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">68.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">reporter.incrCounter(Counters.INPUT_WORDS, 1);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">69.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">70.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">71.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">if ((++numRecords % 100) == 0) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">72.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                reporter.setStatus("Finished processing " + numRecords + 
-                                   " records " + "from the input file: " + 
-                                   inputFile);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">73.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">74.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">75.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">76.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">77.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static class Reduce extends MapReduceBase implements 
-                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">78.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
-                OutputCollector&lt;Text, IntWritable&gt; output, 
-                Reporter reporter) throws IOException {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">79.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">int sum = 0;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">80.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">while (values.hasNext()) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">81.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">sum += values.next().get();</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">82.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">83.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">84.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">85.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">86.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">87.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">public int run(String[] args) throws Exception {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">88.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                JobConf conf = new JobConf(getConf(), WordCount.class);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">89.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setJobName("wordcount");</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">90.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">91.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">92.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">93.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">94.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setMapperClass(Map.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">95.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">96.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">97.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">98.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">99.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">100.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">101.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                List&lt;String&gt; other_args = new ArrayList&lt;String&gt;();
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">102.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">for (int i=0; i &lt; args.length; ++i) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">103.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">if ("-skip".equals(args[i])) {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">104.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">105.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                conf.setBoolean("wordcount.skip.patterns", true);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">106.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">} else {</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">107.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">other_args.add(args[i]);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">108.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">109.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">110.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">111.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">FileInputFormat.setInputPaths(conf, new Path(other_args.get(0)));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">112.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">113.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">114.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">JobClient.runJob(conf);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">115.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">return 0;</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">116.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">117.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">118.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">
-                public static void main(String[] args) throws Exception {
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">119.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">
-                int res = ToolRunner.run(new Configuration(), new WordCount(), 
-                                         args);
-              </span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">120.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span class="codefrag">System.exit(res);</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">121.</td>
-            <td colspan="1" rowspan="1">
-              &nbsp;&nbsp;
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">122.</td>
-            <td colspan="1" rowspan="1">
-              <span class="codefrag">}</span>
-            </td>
-          
-</tr>
-          
-<tr>
-            
-<td colspan="1" rowspan="1">123.</td>
-            <td colspan="1" rowspan="1"></td>
-          
-</tr>
-        
-</table>
-<a name="N11522"></a><a name="%E8%BF%90%E8%A1%8C%E6%A0%B7%E4%BE%8B"></a>
-<h3 class="h4">运行样例</h3>
-<p>输入样例:</p>
-<p>
-          
-<span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
-<br>
-          
-<span class="codefrag">/usr/joe/wordcount/input/file01</span>
-<br>
-          
-<span class="codefrag">/usr/joe/wordcount/input/file02</span>
-<br>
-          
-<br>
-          
-<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
-<br>
-          
-<span class="codefrag">Hello World, Bye World!</span>
-<br>
-          
-<br>
-          
-<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
-<br>
-          
-<span class="codefrag">Hello Hadoop, Goodbye to hadoop.</span>
-        
-</p>
-<p>运行程序:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
-              /usr/joe/wordcount/input /usr/joe/wordcount/output 
-          </span>
-        
-</p>
-<p>输出:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
-          </span>
-          
-<br>
-          
-<span class="codefrag">Bye    1</span>
-<br>
-          
-<span class="codefrag">Goodbye    1</span>
-<br>
-          
-<span class="codefrag">Hadoop,    1</span>
-<br>
-          
-<span class="codefrag">Hello    2</span>
-<br>
-          
-<span class="codefrag">World!    1</span>
-<br>
-          
-<span class="codefrag">World,    1</span>
-<br>
-          
-<span class="codefrag">hadoop.    1</span>
-<br>
-          
-<span class="codefrag">to    1</span>
-<br>
-        
-</p>
-<p>注意此时的输入与第一个版本的不同,输出的结果也有不同。
-	</p>
-<p>现在通过<span class="codefrag">DistributedCache</span>插入一个模式文件,文件中保存了要被忽略的单词模式。
-	</p>
-<p>
-          
-<span class="codefrag">$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</span>
-<br>
-          
-<span class="codefrag">\.</span>
-<br>
-          
-<span class="codefrag">\,</span>
-<br>
-          
-<span class="codefrag">\!</span>
-<br>
-          
-<span class="codefrag">to</span>
-<br>
-        
-</p>
-<p>再运行一次,这次使用更多的选项:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
-              -Dwordcount.case.sensitive=true /usr/joe/wordcount/input 
-              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
-          </span>
-        
-</p>
-<p>应该得到这样的输出:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
-          </span>
-          
-<br>
-          
-<span class="codefrag">Bye    1</span>
-<br>
-          
-<span class="codefrag">Goodbye    1</span>
-<br>
-          
-<span class="codefrag">Hadoop    1</span>
-<br>
-          
-<span class="codefrag">Hello    2</span>
-<br>
-          
-<span class="codefrag">World    2</span>
-<br>
-          
-<span class="codefrag">hadoop    1</span>
-<br>
-        
-</p>
-<p>再运行一次,这一次关闭大小写敏感性(case-sensitivity):</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
-              -Dwordcount.case.sensitive=false /usr/joe/wordcount/input 
-              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
-          </span>
-        
-</p>
-<p>输出:</p>
-<p>
-          
-<span class="codefrag">
-            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
-          </span>
-          
-<br>
-          
-<span class="codefrag">bye    1</span>
-<br>
-          
-<span class="codefrag">goodbye    1</span>
-<br>
-          
-<span class="codefrag">hadoop    2</span>
-<br>
-          
-<span class="codefrag">hello    2</span>
-<br>
-          
-<span class="codefrag">world    2</span>
-<br>
-        
-</p>
-<a name="N115F6"></a><a name="%E7%A8%8B%E5%BA%8F%E8%A6%81%E7%82%B9"></a>
-<h3 class="h4">程序要点</h3>
-<p>
-	通过使用一些Map/Reduce框架提供的功能,<span class="codefrag">WordCount</span>的第二个版本在原始版本基础上有了如下的改进:
-        </p>
-<ul>
-          
-<li>
-            展示了应用程序如何在<span class="codefrag">Mapper</span> (和<span class="codefrag">Reducer</span>)中通过<span class="codefrag">configure</span>方法
-		修改配置参数(28-43行)。
-          </li>
-          
-<li>
-            展示了作业如何使用<span class="codefrag">DistributedCache</span> 来分发只读数据。
-	这里允许用户指定单词的模式,在计数时忽略那些符合模式的单词(104行)。
-          </li>
-          
-<li>
-        展示<span class="codefrag">Tool</span>接口和<span class="codefrag">GenericOptionsParser</span>处理Hadoop命令行选项的功能
-            (87-116, 119行)。
-          </li>
-          
-<li>
-	展示了应用程序如何使用<span class="codefrag">Counters</span>(68行),如何通过传递给<span class="codefrag">map</span>(和<span class="codefrag">reduce</span>)
-	方法的<span class="codefrag">Reporter</span>实例来设置应用程序的状态信息(72行)。
-          </li>
-        
-</ul>
-</div>
-
-    
-<p>
-      
-<em>Java和JNI是Sun Microsystems, Inc.在美国和其它国家的注册商标。</em>
-    
-</p>
-    
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 250
docs/cn/mapred_tutorial.pdf


+ 0 - 462
docs/cn/native_libraries.html

@@ -1,462 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop本地库</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menuitem">
-<a href="quickstart.html">快速入门</a>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">Hadoop本地库</div>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="native_libraries.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop本地库</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%9B%AE%E7%9A%84">目的</a>
-</li>
-<li>
-<a href="#%E7%BB%84%E4%BB%B6">组件</a>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95">使用方法</a>
-</li>
-<li>
-<a href="#%E6%94%AF%E6%8C%81%E7%9A%84%E5%B9%B3%E5%8F%B0">支持的平台</a>
-</li>
-<li>
-<a href="#%E6%9E%84%E5%BB%BAHadoop%E6%9C%AC%E5%9C%B0%E5%BA%93">构建Hadoop本地库</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%B3%A8%E6%84%8F">注意</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%BD%BF%E7%94%A8DistributedCache+%E5%8A%A0%E8%BD%BD%E6%9C%AC%E5%9C%B0%E5%BA%93"> 使用DistributedCache 加载本地库</a>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="%E7%9B%AE%E7%9A%84"></a>
-<h2 class="h3">目的</h2>
-<div class="section">
-<p>
-     鉴于性能问题以及某些Java类库的缺失,对于某些组件,Hadoop提供了自己的本地实现。
-	这些组件保存在Hadoop的一个独立的动态链接的库里。这个库在*nix平台上叫<em>libhadoop.so</em>. 本文主要介绍本地库的使用方法以及如何构建本地库。
-</p>
-</div>
-    
-    
-<a name="N1001A"></a><a name="%E7%BB%84%E4%BB%B6"></a>
-<h2 class="h3">组件</h2>
-<div class="section">
-<p>Hadoop现在已经有以下
-      <a href="http://hadoop.apache.org/core/docs/r0.18.2/api/org/apache/hadoop/io/compress/CompressionCodec.html">
-      compression codecs</a>本地组件:</p>
-<ul>
-        
-<li>
-<a href="http://www.zlib.net/">zlib</a>
-</li>
-        
-<li>
-<a href="http://www.gzip.org/">gzip</a>
-</li>
-        
-<li>
-<a href="http://www.oberhumer.com/opensource/lzo/">lzo</a>
-</li>
-      
-</ul>
-<p>在以上组件中,lzo和gzip压缩编解码器必须使用hadoop本地库才能运行。
-      </p>
-</div>
-
-    
-<a name="N1003D"></a><a name="%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95"></a>
-<h2 class="h3">使用方法</h2>
-<div class="section">
-<p>hadoop本地库的用法很简单:</p>
-<ul>
-        
-<li>
-          看一下
-	<a href="#%E6%94%AF%E6%8C%81%E7%9A%84%E5%B9%B3%E5%8F%B0">支持的平台</a>.
-        </li>
-        
-<li>
-           
-<a href="http://hadoop.apache.org/core/releases.html#Download">下载</a> 预构建的32位i386架构的Linux本地hadoop库(可以在hadoop发行版的<span class="codefrag">lib/native</span>目录下找到)或者自己
-          <a href="#%E6%9E%84%E5%BB%BAHadoop%E6%9C%AC%E5%9C%B0%E5%BA%93">构建</a> 这些库。
-        </li>
-        
-<li>
-          确保你的平台已经安装了<strong>zlib-1.2</strong>以上版本或者<strong>lzo2.0</strong>以上版本的软件包或者两者均已安装(根据你的需要)。
-        </li>
-      
-</ul>
-<p>
-<span class="codefrag">bin/hadoop</span> 脚本通过系统属性
-      <em>-Djava.library.path=&lt;path&gt;</em>来确认hadoop本地库是否包含在库路径里。</p>
-<p>检查hadoop日志文件可以查看hadoop库是否正常,正常情况下会看到:</p>
-<p>
-        
-<span class="codefrag">
-          DEBUG util.NativeCodeLoader - Trying to load the custom-built 
-          native-hadoop library... 
-        </span>
-<br>
-        
-<span class="codefrag">
-          INFO  util.NativeCodeLoader - Loaded the native-hadoop library
-        </span>
-      
-</p>
-<p>如果出错,会看到:</p>
-<p>
-        
-<span class="codefrag">
-          INFO util.NativeCodeLoader - Unable to load native-hadoop library for 
-          your platform... using builtin-java classes where applicable
-        </span>
-      
-</p>
-</div>
-    
-    
-<a name="N10086"></a><a name="%E6%94%AF%E6%8C%81%E7%9A%84%E5%B9%B3%E5%8F%B0"></a>
-<h2 class="h3">支持的平台</h2>
-<div class="section">
-<p>Hadoop本地库只支持*nix平台,已经广泛使用在GNU/Linux平台上,但是不支持
-      <a href="http://www.cygwin.com/">Cygwin</a> 
-      和 <a href="http://www.apple.com/macosx">Mac OS X</a>。 
-      </p>
-<p>已经测试过的GNU/Linux发行版本:</p>
-<ul>
-        
-<li>
-          
-<a href="http://www.redhat.com/rhel/">RHEL4</a>/<a href="http://fedora.redhat.com/">Fedora</a>
-        
-</li>
-        
-<li>
-<a href="http://www.ubuntu.com/">Ubuntu</a>
-</li>
-        
-<li>
-<a href="http://www.gentoo.org/">Gentoo</a>
-</li>
-      
-</ul>
-<p>在上述平台上,32/64位Hadoop本地库分别能和32/64位的jvm一起正常运行。
-      </p>
-</div>
-    
-    
-<a name="N100B6"></a><a name="%E6%9E%84%E5%BB%BAHadoop%E6%9C%AC%E5%9C%B0%E5%BA%93"></a>
-<h2 class="h3">构建Hadoop本地库</h2>
-<div class="section">
-<p>Hadoop本地库使用
-      <a href="http://en.wikipedia.org/wiki/ANSI_C">ANSI C</a> 编写,使用GNU autotools工具链 (autoconf, autoheader, automake, autoscan, libtool)构建。也就是说构建hadoop库的平台需要有标准C的编译器和GNU autotools工具链。请参看
-      <a href="#%E6%94%AF%E6%8C%81%E7%9A%84%E5%B9%B3%E5%8F%B0">支持的平台</a>。</p>
-<p>你的目标平台上可能会需要的软件包:
-      </p>
-<ul>
-        
-<li>
-          C 编译器 (e.g. <a href="http://gcc.gnu.org/">GNU C Compiler</a>)
-        </li>
-        
-<li>
-          GNU Autools 工具链: 
-          <a href="http://www.gnu.org/software/autoconf/">autoconf</a>, 
-          <a href="http://www.gnu.org/software/automake/">automake</a>, 
-          <a href="http://www.gnu.org/software/libtool/">libtool</a>
-        
-</li>
-        
-<li> 
-          zlib开发包 (stable version &gt;= 1.2.0)
-        </li>
-        
-<li> 
-          lzo开发包 (stable version &gt;= 2.0)
-        </li> 
-      
-</ul>
-<p>如果已经满足了上述先决条件,可以使用<span class="codefrag">build.xml</span> 
-      文件,并把其中的<span class="codefrag">compile.native</span>置为 
-      <span class="codefrag">true</span>,这样就可以生成hadoop本地库:</p>
-<p>
-<span class="codefrag">$ ant -Dcompile.native=true &lt;target&gt;</span>
-</p>
-<p>因为不是所有用户都需要Hadoop本地库,所以默认情况下hadoop不生成该库。
-      </p>
-<p>你可以在下面的路径查看新生成的hadoop本地库:</p>
-<p>
-<span class="codefrag">$ build/native/&lt;platform&gt;/lib</span>
-</p>
-<p>其中&lt;platform&gt;是下列系统属性的组合 
-      <span class="codefrag">${os.name}-${os.arch}-${sun.arch.data.model}</span>;例如 
-      Linux-i386-32。</p>
-<a name="N10109"></a><a name="%E6%B3%A8%E6%84%8F"></a>
-<h3 class="h4">注意</h3>
-<ul>
-          
-<li>
-            在生成hadoop本地库的目标平台上<strong>必须</strong> 安装了zlib和lzo开发包;但是如果你只希望使用其中一个的话,在部署时,安装其中任何一个都是足够的。
-          </li>
-          
-<li>
-		  在目标平台上生成以及部署hadoop本地库时,都需要根据32/64位jvm选取对应的32/64位zlib/lzo软件包。
-          </li>
-        
-</ul>
-</div>
-<!--DCCOMMENT:diff begin-->
-    
-<a name="N1011F"></a><a name="%E4%BD%BF%E7%94%A8DistributedCache+%E5%8A%A0%E8%BD%BD%E6%9C%AC%E5%9C%B0%E5%BA%93"></a>
-<h2 class="h3"> 使用DistributedCache 加载本地库</h2>
-<div class="section">
-<p>用户可以通过
-      <a href="mapred_tutorial.html#DistributedCache">DistributedCache</a>
-      加载本地共享库,并<em>分发</em>和建立库文件的<em>符号链接</em>。
-      </p>
-<p>这个例子描述了如何分发库文件并在从map/reduce任务中装载库文件。
-      </p>
-<ol>
-      
-<li>首先拷贝库文件到HDFS。<br>
-      
-<span class="codefrag">bin/hadoop fs -copyFromLocal mylib.so.1 /libraries/mylib.so.1</span>
-      
-</li>
-      
-<li>启动作业时包含以下代码:<br>
-      
-<span class="codefrag"> DistributedCache.createSymlink(conf); </span> 
-<br>
-      
-<span class="codefrag"> DistributedCache.addCacheFile("hdfs://host:port/libraries/mylib.so.1#mylib.so", conf);
-      </span>
-      
-</li>
-      
-<li>map/reduce任务中包含以下代码:<br>
-      
-<span class="codefrag"> System.loadLibrary("mylib.so"); </span>
-      
-</li>
-      
-</ol>
-</div>
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 107
docs/cn/native_libraries.pdf


+ 0 - 574
docs/cn/quickstart.html

@@ -1,574 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<meta content="Apache Forrest" name="Generator">
-<meta name="Forrest-version" content="0.8">
-<meta name="Forrest-skin-name" content="pelt">
-<title>Hadoop快速入门</title>
-<link type="text/css" href="skin/basic.css" rel="stylesheet">
-<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
-<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
-<link type="text/css" href="skin/profile.css" rel="stylesheet">
-<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
-<link rel="shortcut icon" href="images/favicon.ico">
-</head>
-<body onload="init()">
-<script type="text/javascript">ndeSetTextSize();</script>
-<div id="top">
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://hadoop.apache.org/">Hadoop</a> &gt; <a href="http://hadoop.apache.org/core/">Core</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
-</div>
-<!--+
-    |header
-    +-->
-<div class="header">
-<!--+
-    |start group logo
-    +-->
-<div class="grouplogo">
-<a href="http://hadoop.apache.org/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Apache Hadoop"></a>
-</div>
-<!--+
-    |end group logo
-    +-->
-<!--+
-    |start Project Logo
-    +-->
-<div class="projectlogo">
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/core-logo.gif" title="Scalable Computing Platform"></a>
-</div>
-<!--+
-    |end Project Logo
-    +-->
-<!--+
-    |start Search
-    +-->
-<div class="searchbox">
-<form action="http://www.google.com/search" method="get" class="roundtopsmall">
-<input value="hadoop.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
-                    <input name="Search" value="Search" type="submit">
-</form>
-</div>
-<!--+
-    |end search
-    +-->
-<!--+
-    |start Tabs
-    +-->
-<ul id="tabs">
-<li>
-<a class="unselected" href="http://hadoop.apache.org/core/">项目</a>
-</li>
-<li>
-<a class="unselected" href="http://wiki.apache.org/hadoop">维基</a>
-</li>
-<li class="current">
-<a class="selected" href="index.html">Hadoop 0.18文档</a>
-</li>
-</ul>
-<!--+
-    |end Tabs
-    +-->
-</div>
-</div>
-<div id="main">
-<div id="publishedStrip">
-<!--+
-    |start Subtabs
-    +-->
-<div id="level2tabs"></div>
-<!--+
-    |end Endtabs
-    +-->
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<!--+
-    |breadtrail
-    +-->
-<div class="breadtrail">
-
-             &nbsp;
-           </div>
-<!--+
-    |start Menu, mainarea
-    +-->
-<!--+
-    |start Menu
-    +-->
-<div id="menu">
-<div onclick="SwitchMenu('menu_selected_1.1', 'skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">文档</div>
-<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
-<div class="menuitem">
-<a href="index.html">概述</a>
-</div>
-<div class="menupage">
-<div class="menupagetitle">快速入门</div>
-</div>
-<div class="menuitem">
-<a href="cluster_setup.html">集群搭建</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_design.html">HDFS构架设计</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_user_guide.html">HDFS使用指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS权限指南</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_quota_admin_guide.html">HDFS配额管理指南</a>
-</div>
-<div class="menuitem">
-<a href="commands_manual.html">命令手册</a>
-</div>
-<div class="menuitem">
-<a href="hdfs_shell.html">FS Shell使用指南</a>
-</div>
-<div class="menuitem">
-<a href="distcp.html">DistCp使用指南</a>
-</div>
-<div class="menuitem">
-<a href="mapred_tutorial.html">Map-Reduce教程</a>
-</div>
-<div class="menuitem">
-<a href="native_libraries.html">Hadoop本地库</a>
-</div>
-<div class="menuitem">
-<a href="streaming.html">Streaming</a>
-</div>
-<div class="menuitem">
-<a href="hadoop_archives.html">Hadoop Archives</a>
-</div>
-<div class="menuitem">
-<a href="hod.html">Hadoop On Demand</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/api/index.html">API参考</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/jdiff/changes.html">API Changes</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/">维基</a>
-</div>
-<div class="menuitem">
-<a href="http://wiki.apache.org/hadoop/FAQ">常见问题</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/mailing_lists.html">邮件列表</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/releasenotes.html">发行说明</a>
-</div>
-<div class="menuitem">
-<a href="http://hadoop.apache.org/core/docs/r0.18.2/changes.html">变更日志</a>
-</div>
-</div>
-<div id="credit"></div>
-<div id="roundbottom">
-<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
-<!--+
-  |alternative credits
-  +-->
-<div id="credit2"></div>
-</div>
-<!--+
-    |end Menu
-    +-->
-<!--+
-    |start content
-    +-->
-<div id="content">
-<div title="Portable Document Format" class="pdflink">
-<a class="dida" href="quickstart.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
-        PDF</a>
-</div>
-<h1>Hadoop快速入门</h1>
-<div id="minitoc-area">
-<ul class="minitoc">
-<li>
-<a href="#%E7%9B%AE%E7%9A%84">目的</a>
-</li>
-<li>
-<a href="#PreReqs">先决条件</a>
-<ul class="minitoc">
-<li>
-<a href="#%E6%94%AF%E6%8C%81%E5%B9%B3%E5%8F%B0">支持平台</a>
-</li>
-<li>
-<a href="#%E6%89%80%E9%9C%80%E8%BD%AF%E4%BB%B6">所需软件</a>
-</li>
-<li>
-<a href="#%E5%AE%89%E8%A3%85%E8%BD%AF%E4%BB%B6">安装软件</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#%E4%B8%8B%E8%BD%BD">下载</a>
-</li>
-<li>
-<a href="#%E8%BF%90%E8%A1%8CHadoop%E9%9B%86%E7%BE%A4%E7%9A%84%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C">运行Hadoop集群的准备工作</a>
-</li>
-<li>
-<a href="#Local">单机模式的操作方法</a>
-</li>
-<li>
-<a href="#PseudoDistributed">伪分布式模式的操作方法</a>
-<ul class="minitoc">
-<li>
-<a href="#%E9%85%8D%E7%BD%AE">配置</a>
-</li>
-<li>
-<a href="#%E5%85%8D%E5%AF%86%E7%A0%81">免密码ssh设置</a>
-</li>
-<li>
-<a href="#%E6%89%A7%E8%A1%8C">执行</a>
-</li>
-</ul>
-</li>
-<li>
-<a href="#FullyDistributed">完全分布式模式的操作方法</a>
-</li>
-</ul>
-</div>
-  
-    
-<a name="N1000D"></a><a name="%E7%9B%AE%E7%9A%84"></a>
-<h2 class="h3">目的</h2>
-<div class="section">
-<p>这篇文档的目的是帮助你快速完成单机上的Hadoop安装与使用以便你对<a href="hdfs_design.html">Hadoop分布式文件系统(<acronym title="Hadoop Distributed File System">HDFS</acronym>)</a>和Map-Reduce框架有所体会,比如在HDFS上运行示例程序或简单作业等。</p>
-</div>
-    
-<a name="N1001F"></a><a name="PreReqs"></a>
-<h2 class="h3">先决条件</h2>
-<div class="section">
-<a name="N10025"></a><a name="%E6%94%AF%E6%8C%81%E5%B9%B3%E5%8F%B0"></a>
-<h3 class="h4">支持平台</h3>
-<ul>
-          
-<li>
-                GNU/Linux是产品开发和运行的平台。
-	        Hadoop已在有2000个节点的GNU/Linux主机组成的集群系统上得到验证。
-          </li>
-          
-<li>
-            Win32平台是作为<em>开发平台</em>支持的。由于分布式操作尚未在Win32平台上充分测试,所以还不作为一个<em>生产平台</em>被支持。
-          </li>
-        
-</ul>
-<a name="N1003B"></a><a name="%E6%89%80%E9%9C%80%E8%BD%AF%E4%BB%B6"></a>
-<h3 class="h4">所需软件</h3>
-<p>Linux和Windows所需软件包括:</p>
-<ol>
-          
-<li>
-            Java<sup>TM</sup>1.5.x,必须安装,建议选择Sun公司发行的Java版本。
-          </li>
-          
-<li>
-            
-<strong>ssh</strong> 必须安装并且保证 <strong>sshd</strong>一直运行,以便用Hadoop
-	    脚本管理远端Hadoop守护进程。
-          </li>
-        
-</ol>
-<p>Windows下的附加软件需求</p>
-<ol>
-            
-<li>
-              
-<a href="http://www.cygwin.com/">Cygwin</a> - 提供上述软件之外的shell支持。 
-            </li>
-          
-</ol>
-<a name="N10064"></a><a name="%E5%AE%89%E8%A3%85%E8%BD%AF%E4%BB%B6"></a>
-<h3 class="h4">安装软件</h3>
-<p>如果你的集群尚未安装所需软件,你得首先安装它们。</p>
-<p>以Ubuntu Linux为例:</p>
-<p>
-          
-<span class="codefrag">$ sudo apt-get install ssh</span>
-<br>
-          
-<span class="codefrag">$ sudo apt-get install rsync</span>
-        
-</p>
-<p>在Windows平台上,如果安装cygwin时未安装全部所需软件,则需启动cyqwin安装管理器安装如下软件包:</p>
-<ul>
-          
-<li>openssh - <em>Net</em> 类</li>
-        
-</ul>
-</div>
-    
-    
-<a name="N10088"></a><a name="%E4%B8%8B%E8%BD%BD"></a>
-<h2 class="h3">下载</h2>
-<div class="section">
-<p>
-        为了获取Hadoop的发行版,从Apache的某个镜像服务器上下载最近的
-        <a href="http://hadoop.apache.org/core/releases.html">稳定发行版</a>。</p>
-</div>
-
-    
-<a name="N10096"></a><a name="%E8%BF%90%E8%A1%8CHadoop%E9%9B%86%E7%BE%A4%E7%9A%84%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C"></a>
-<h2 class="h3">运行Hadoop集群的准备工作</h2>
-<div class="section">
-<p>
-        解压所下载的Hadoop发行版。编辑
-        <span class="codefrag">conf/hadoop-env.sh</span>文件,至少需要将<span class="codefrag">JAVA_HOME</span>设置为Java安装根路径。
-      </p>
-<p>
-	    尝试如下命令:<br>
-        
-<span class="codefrag">$ bin/hadoop</span>
-<br>
-        将会显示<strong>hadoop</strong> 脚本的使用文档。
-      </p>
-<p>现在你可以用以下三种支持的模式中的一种启动Hadoop集群:
-      </p>
-<ul>
-        
-<li>单机模式</li>
-        
-<li>伪分布式模式</li>
-        
-<li>完全分布式模式</li>
-      
-</ul>
-</div>
-    
-    
-<a name="N100C1"></a><a name="Local"></a>
-<h2 class="h3">单机模式的操作方法</h2>
-<div class="section">
-<p>默认情况下,Hadoop被配置成以非分布式模式运行的一个独立Java进程。这对调试非常有帮助。</p>
-<p>
-        下面的实例将已解压的 <span class="codefrag">conf</span> 目录拷贝作为输入,查找并显示匹配给定正则表达式的条目。输出写入到指定的<span class="codefrag">output</span>目录。
-        <br>
-        
-<span class="codefrag">$ mkdir input</span>
-<br>
-        
-<span class="codefrag">$ cp conf/*.xml input</span>
-<br>
-        
-<span class="codefrag">
-          $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
-        </span>
-<br>
-        
-<span class="codefrag">$ cat output/*</span>
-      
-</p>
-</div>
-    
-    
-<a name="N100E5"></a><a name="PseudoDistributed"></a>
-<h2 class="h3">伪分布式模式的操作方法</h2>
-<div class="section">
-<p>Hadoop可以在单节点上以所谓的伪分布式模式运行,此时每一个Hadoop守护进程都作为一个独立的Java进程运行。</p>
-<a name="N100EE"></a><a name="%E9%85%8D%E7%BD%AE"></a>
-<h3 class="h4">配置</h3>
-<p>使用如下的 <span class="codefrag">conf/hadoop-site.xml</span>:</p>
-<table class="ForrestTable" cellspacing="1" cellpadding="4">
-        
-<tr>
-<td colspan="1" rowspan="1">&lt;configuration&gt;</td>
-</tr>
-
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;fs.default.name&lt;/name&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9000&lt;/value&gt;</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
-</tr>
-
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;mapred.job.tracker&lt;/name&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9001&lt;/value&gt;</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
-</tr>
-
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;dfs.replication&lt;/name&gt;</td>
-</tr>
-            
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</td>
-</tr>
-          
-<tr>
-<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
-</tr>
-
-        
-<tr>
-<td colspan="1" rowspan="1">&lt;/configuration&gt;</td>
-</tr>
-        
-</table>
-<a name="N10152"></a><a name="%E5%85%8D%E5%AF%86%E7%A0%81"></a>
-<h3 class="h4">免密码ssh设置</h3>
-<p>
-          现在确认能否不输入口令就用ssh登录localhost:<br>
-          
-<span class="codefrag">$ ssh localhost</span>
-        
-</p>
-<p>
-          如果不输入口令就无法用ssh登陆localhost,执行下面的命令:<br>
-   		  
-<span class="codefrag">$ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa</span>
-<br>
-		  
-<span class="codefrag">$ cat ~/.ssh/id_dsa.pub &gt;&gt; ~/.ssh/authorized_keys</span>
-		
-</p>
-<a name="N10170"></a><a name="%E6%89%A7%E8%A1%8C"></a>
-<h3 class="h4">执行</h3>
-<p>
-          格式化一个新的分布式文件系统:<br>
-          
-<span class="codefrag">$ bin/hadoop namenode -format</span>
-        
-</p>
-<p>
-		  启动Hadoop守护进程:<br>
-          
-<span class="codefrag">$ bin/start-all.sh</span>
-        
-</p>
-<p>Hadoop守护进程的日志写入到 
-        <span class="codefrag">${HADOOP_LOG_DIR}</span> 目录 (默认是 
-        <span class="codefrag">${HADOOP_HOME}/logs</span>).</p>
-<p>浏览NameNode和JobTracker的网络接口,它们的地址默认为:</p>
-<ul>
-          
-<li>
-            
-<span class="codefrag">NameNode</span> - 
-            <a href="http://localhost:50070/">http://localhost:50070/</a>
-          
-</li>
-          
-<li>
-            
-<span class="codefrag">JobTracker</span> - 
-            <a href="http://localhost:50030/">http://localhost:50030/</a>
-          
-</li>
-        
-</ul>
-<p>
-          将输入文件拷贝到分布式文件系统:<br>
-		  
-<span class="codefrag">$ bin/hadoop fs -put conf input</span>
-		
-</p>
-<p>
-          运行发行版提供的示例程序:<br>
-          
-<span class="codefrag">
-            $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
-          </span>
-        
-</p>
-<p>查看输出文件:</p>
-<p>
-          将输出文件从分布式文件系统拷贝到本地文件系统查看:<br>
-          
-<span class="codefrag">$ bin/hadoop fs -get output output</span>
-<br>
-          
-<span class="codefrag">$ cat output/*</span>
-        
-</p>
-<p> 或者 </p>
-<p>
-          在分布式文件系统上查看输出文件:<br>
-          
-<span class="codefrag">$ bin/hadoop fs -cat output/*</span>
-        
-</p>
-<p>
-		  完成全部操作后,停止守护进程:<br>
-		  
-<span class="codefrag">$ bin/stop-all.sh</span>
-		
-</p>
-</div>
-    
-    
-<a name="N101DD"></a><a name="FullyDistributed"></a>
-<h2 class="h3">完全分布式模式的操作方法</h2>
-<div class="section">
-<p>关于搭建完全分布式模式的,有实际意义的集群的资料可以在<a href="cluster_setup.html">这里</a>找到。</p>
-</div>
-    
-    
-<p>
-	    
-<em>Java与JNI是Sun Microsystems, Inc.在美国以及其他国家地区的商标或注册商标。</em>
-    
-</p>
-    
-  
-</div>
-<!--+
-    |end content
-    +-->
-<div class="clearboth">&nbsp;</div>
-</div>
-<div id="footer">
-<!--+
-    |start bottomstrip
-    +-->
-<div class="lastmodified">
-<script type="text/javascript"><!--
-document.write("Last Published: " + document.lastModified);
-//  --></script>
-</div>
-<div class="copyright">
-        Copyright &copy;
-         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
-</div>
-<!--+
-    |end bottomstrip
-    +-->
-</div>
-</body>
-</html>

File diff suppressed because it is too large
+ 0 - 173
docs/cn/quickstart.pdf


+ 0 - 23
docs/cn/skin/CommonMessages_de.xml

@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<catalogue>
-  <message key="Font size:">Schriftgrösse:</message>
-  <message key="Last Published:">Zuletzt veröffentlicht:</message>
-  <message key="Search">Suche:</message>
-  <message key="Search the site with">Suche auf der Seite mit</message>
-</catalogue>

+ 0 - 23
docs/cn/skin/CommonMessages_en_US.xml

@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<catalogue>
-  <message  key="Font size:">Font size:</message>
-  <message key="Last Published:">Last Published:</message>
-  <message key="Search">Search</message>
-  <message key="Search the site with">Search site with</message>
-</catalogue>

+ 0 - 23
docs/cn/skin/CommonMessages_es.xml

@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<catalogue>
-  <message key="Font size:">Tamaño del texto:</message>
-  <message key="Last Published:">Fecha de publicación:</message>
-  <message key="Search">Buscar</message>
-  <message key="Search the site with">Buscar en</message>
-</catalogue>

+ 0 - 23
docs/cn/skin/CommonMessages_fr.xml

@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<catalogue>
-  <message key="Font size:">Taille :</message>
-  <message key="Last Published:">Dernière publication :</message>
-  <message key="Search">Rechercher</message>
-  <message key="Search the site with">Rechercher sur le site avec</message>
-</catalogue>

+ 0 - 166
docs/cn/skin/basic.css

@@ -1,166 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  background: #C00;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.warning .content {
-  background: #FFF0F0;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.fixme .label {
-  background: #C6C600;
-  color: black;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.fixme .content {
-  padding: 5px 10px;
-}
-
-/**
- * Typography
- */
-
-body {
-  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
-  font-size: 100%;
-}
-
-#content {
-  font-family: Georgia, Palatino, Times, serif;
-  font-size: 95%;
-}
-#tabs {
-  font-size: 70%;
-}
-#menu {
-  font-size: 80%;
-}
-#footer {
-  font-size: 70%;
-}
-
-h1, h2, h3, h4, h5, h6 {
-  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
-  font-weight: bold;
-  margin-top: 1em;
-  margin-bottom: .5em;
-}
-
-h1 {
-    margin-top: 0;
-    margin-bottom: 1em;
-  font-size: 1.4em;
-}
-#content h1 {
-  font-size: 160%;
-  margin-bottom: .5em;
-}
-#menu h1 {
-  margin: 0;
-  padding: 10px;
-  background: #336699;
-  color: white;
-}
-h2 { font-size: 120%; }
-h3 { font-size: 100%; }
-h4 { font-size: 90%; }
-h5 { font-size: 80%; }
-h6 { font-size: 75%; }
-
-p {
-  line-height: 120%;
-  text-align: left;
-  margin-top: .5em;
-  margin-bottom: 1em;
-}
-
-#content li,
-#content th,
-#content td,
-#content li ul,
-#content li ol{
-  margin-top: .5em;
-  margin-bottom: .5em;
-}
-
-
-#content li li,
-#minitoc-area li{
-  margin-top: 0em;
-  margin-bottom: 0em;
-}
-
-#content .attribution {
-  text-align: right;
-  font-style: italic;
-  font-size: 85%;
-  margin-top: 1em;
-}
-
-.codefrag {
-  font-family: "Courier New", Courier, monospace;
-  font-size: 110%;
-}

+ 0 - 90
docs/cn/skin/breadcrumbs-optimized.js

@@ -1,90 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-var PREPREND_CRUMBS=new Array();
-var link1="@skinconfig.trail.link1.name@";
-var link2="@skinconfig.trail.link2.name@";
-var link3="@skinconfig.trail.link3.name@";
-if(!(link1=="")&&!link1.indexOf( "@" ) == 0){
-  PREPREND_CRUMBS.push( new Array( link1, @skinconfig.trail.link1.href@ ) ); }
-if(!(link2=="")&&!link2.indexOf( "@" ) == 0){
-  PREPREND_CRUMBS.push( new Array( link2, @skinconfig.trail.link2.href@ ) ); }
-if(!(link3=="")&&!link3.indexOf( "@" ) == 0){
-  PREPREND_CRUMBS.push( new Array( link3, @skinconfig.trail.link3.href@ ) ); }
-var DISPLAY_SEPARATOR=" &gt; ";
-var DISPLAY_PREPREND=" &gt; ";
-var DISPLAY_POSTPREND=":";
-var CSS_CLASS_CRUMB="breadcrumb";
-var CSS_CLASS_TRAIL="breadcrumbTrail";
-var CSS_CLASS_SEPARATOR="crumbSeparator";
-var FILE_EXTENSIONS=new Array( ".html", ".htm", ".jsp", ".php", ".php3", ".php4" );
-var PATH_SEPARATOR="/";
-
-function sc(s) {
-	var l=s.toLowerCase();
-	return l.substr(0,1).toUpperCase()+l.substr(1);
-}
-function getdirs() {
-	var t=document.location.pathname.split(PATH_SEPARATOR);
-	var lc=t[t.length-1];
-	for(var i=0;i < FILE_EXTENSIONS.length;i++)
-	{
-		if(lc.indexOf(FILE_EXTENSIONS[i]))
-			return t.slice(1,t.length-1); }
-	return t.slice(1,t.length);
-}
-function getcrumbs( d )
-{
-	var pre = "/";
-	var post = "/";
-	var c = new Array();
-	if( d != null )
-	{
-		for(var i=0;i < d.length;i++) {
-			pre+=d[i]+postfix;
-			c.push(new Array(d[i],pre)); }
-	}
-	if(PREPREND_CRUMBS.length > 0 )
-		return PREPREND_CRUMBS.concat( c );
-	return c;
-}
-function gettrail( c )
-{
-	var h=DISPLAY_PREPREND;
-	for(var i=0;i < c.length;i++)
-	{
-		h+='<a href="'+c[i][1]+'" >'+sc(c[i][0])+'</a>';
-		if(i!=(c.length-1))
-			h+=DISPLAY_SEPARATOR; }
-	return h+DISPLAY_POSTPREND;
-}
-
-function gettrailXHTML( c )
-{
-	var h='<span class="'+CSS_CLASS_TRAIL+'">'+DISPLAY_PREPREND;
-	for(var i=0;i < c.length;i++)
-	{
-		h+='<a href="'+c[i][1]+'" class="'+CSS_CLASS_CRUMB+'">'+sc(c[i][0])+'</a>';
-		if(i!=(c.length-1))
-			h+='<span class="'+CSS_CLASS_SEPARATOR+'">'+DISPLAY_SEPARATOR+'</span>'; }
-	return h+DISPLAY_POSTPREND+'</span>';
-}
-
-if(document.location.href.toLowerCase().indexOf("http://")==-1)
-	document.write(gettrail(getcrumbs()));
-else
-	document.write(gettrail(getcrumbs(getdirs())));
-

+ 0 - 237
docs/cn/skin/breadcrumbs.js

@@ -1,237 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * This script, when included in a html file, builds a neat breadcrumb trail
- * based on its url. That is, if it doesn't contains bugs (I'm relatively
- * sure it does).
- *
- * Typical usage:
- * <script type="text/javascript" language="JavaScript" src="breadcrumbs.js"></script>
- */
-
-/**
- * IE 5 on Mac doesn't know Array.push.
- *
- * Implement it - courtesy to fritz.
- */
-var abc	= new Array();
-if (!abc.push) {
-  Array.prototype.push	= function(what){this[this.length]=what}
-}
-
-/* ========================================================================
-	CONSTANTS
-   ======================================================================== */
-
-/**
- * Two-dimensional array containing extra crumbs to place at the front of
- * the trail. Specify first the name of the crumb, then the URI that belongs
- * to it. You'll need to modify this for every domain or subdomain where
- * you use this script (you can leave it as an empty array if you wish)
- */
-var PREPREND_CRUMBS = new Array();
-
-var link1 = "@skinconfig.trail.link1.name@";
-var link2 = "@skinconfig.trail.link2.name@";
-var link3 = "@skinconfig.trail.link3.name@";
-
-var href1 = "@skinconfig.trail.link1.href@";
-var href2 = "@skinconfig.trail.link2.href@";
-var href3 = "@skinconfig.trail.link3.href@";
-
-   if(!(link1=="")&&!link1.indexOf( "@" ) == 0){
-     PREPREND_CRUMBS.push( new Array( link1, href1 ) );
-   }
-   if(!(link2=="")&&!link2.indexOf( "@" ) == 0){
-     PREPREND_CRUMBS.push( new Array( link2, href2 ) );
-   }
-   if(!(link3=="")&&!link3.indexOf( "@" ) == 0){
-     PREPREND_CRUMBS.push( new Array( link3, href3 ) );
-   }
-
-/**
- * String to include between crumbs:
- */
-var DISPLAY_SEPARATOR = " &gt; ";
-/**
- * String to include at the beginning of the trail
- */
-var DISPLAY_PREPREND = " &gt; ";
-/**
- * String to include at the end of the trail
- */
-var DISPLAY_POSTPREND = "";
-
-/**
- * CSS Class to use for a single crumb:
- */
-var CSS_CLASS_CRUMB = "breadcrumb";
-
-/**
- * CSS Class to use for the complete trail:
- */
-var CSS_CLASS_TRAIL = "breadcrumbTrail";
-
-/**
- * CSS Class to use for crumb separator:
- */
-var CSS_CLASS_SEPARATOR = "crumbSeparator";
-
-/**
- * Array of strings containing common file extensions. We use this to
- * determine what part of the url to ignore (if it contains one of the
- * string specified here, we ignore it).
- */
-var FILE_EXTENSIONS = new Array( ".html", ".htm", ".jsp", ".php", ".php3", ".php4" );
-
-/**
- * String that separates parts of the breadcrumb trail from each other.
- * When this is no longer a slash, I'm sure I'll be old and grey.
- */
-var PATH_SEPARATOR = "/";
-
-/* ========================================================================
-	UTILITY FUNCTIONS
-   ======================================================================== */
-/**
- * Capitalize first letter of the provided string and return the modified
- * string.
- */
-function sentenceCase( string )
-{        return string;
-	//var lower = string.toLowerCase();
-	//return lower.substr(0,1).toUpperCase() + lower.substr(1);
-}
-
-/**
- * Returns an array containing the names of all the directories in the
- * current document URL
- */
-function getDirectoriesInURL()
-{
-	var trail = document.location.pathname.split( PATH_SEPARATOR );
-
-	// check whether last section is a file or a directory
-	var lastcrumb = trail[trail.length-1];
-	for( var i = 0; i < FILE_EXTENSIONS.length; i++ )
-	{
-		if( lastcrumb.indexOf( FILE_EXTENSIONS[i] ) )
-		{
-			// it is, remove it and send results
-			return trail.slice( 1, trail.length-1 );
-		}
-	}
-
-	// it's not; send the trail unmodified
-	return trail.slice( 1, trail.length );
-}
-
-/* ========================================================================
-	BREADCRUMB FUNCTIONALITY
-   ======================================================================== */
-/**
- * Return a two-dimensional array describing the breadcrumbs based on the
- * array of directories passed in.
- */
-function getBreadcrumbs( dirs )
-{
-	var prefix = "/";
-	var postfix = "/";
-
-	// the array we will return
-	var crumbs = new Array();
-
-	if( dirs != null )
-	{
-		for( var i = 0; i < dirs.length; i++ )
-		{
-			prefix += dirs[i] + postfix;
-			crumbs.push( new Array( dirs[i], prefix ) );
-		}
-	}
-
-	// preprend the PREPREND_CRUMBS
-	if(PREPREND_CRUMBS.length > 0 )
-	{
-		return PREPREND_CRUMBS.concat( crumbs );
-	}
-
-	return crumbs;
-}
-
-/**
- * Return a string containing a simple text breadcrumb trail based on the
- * two-dimensional array passed in.
- */
-function getCrumbTrail( crumbs )
-{
-	var xhtml = DISPLAY_PREPREND;
-
-	for( var i = 0; i < crumbs.length; i++ )
-	{
-		xhtml += '<a href="' + crumbs[i][1] + '" >';
-		xhtml += unescape( crumbs[i][0] ) + '</a>';
-		if( i != (crumbs.length-1) )
-		{
-			xhtml += DISPLAY_SEPARATOR;
-		}
-	}
-
-	xhtml += DISPLAY_POSTPREND;
-
-	return xhtml;
-}
-
-/**
- * Return a string containing an XHTML breadcrumb trail based on the
- * two-dimensional array passed in.
- */
-function getCrumbTrailXHTML( crumbs )
-{
-	var xhtml = '<span class="' + CSS_CLASS_TRAIL  + '">';
-	xhtml += DISPLAY_PREPREND;
-
-	for( var i = 0; i < crumbs.length; i++ )
-	{
-		xhtml += '<a href="' + crumbs[i][1] + '" class="' + CSS_CLASS_CRUMB + '">';
-		xhtml += unescape( crumbs[i][0] ) + '</a>';
-		if( i != (crumbs.length-1) )
-		{
-			xhtml += '<span class="' + CSS_CLASS_SEPARATOR + '">' + DISPLAY_SEPARATOR + '</span>';
-		}
-	}
-
-	xhtml += DISPLAY_POSTPREND;
-	xhtml += '</span>';
-
-	return xhtml;
-}
-
-/* ========================================================================
-	PRINT BREADCRUMB TRAIL
-   ======================================================================== */
-
-// check if we're local; if so, only print the PREPREND_CRUMBS
-if( document.location.href.toLowerCase().indexOf( "http://" ) == -1 )
-{
-	document.write( getCrumbTrail( getBreadcrumbs() ) );
-}
-else
-{
-	document.write( getCrumbTrail( getBreadcrumbs( getDirectoriesInURL() ) ) );
-}
-

+ 0 - 166
docs/cn/skin/fontsize.js

@@ -1,166 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-function init() 
-{ //embedded in the doc
-  //ndeSetTextSize();
-}
-
-function checkBrowser(){
-  if (!document.getElementsByTagName){
-    return true;
-  }
-  else{
-    return false;
-  }
-}
-
-
-function ndeSetTextSize(chgsize,rs) 
-{
-  var startSize;
-  var newSize;
-
-  if (!checkBrowser)
-  {
-    return;
-  }
-
-  startSize = parseInt(ndeGetDocTextSize());
-
-  if (!startSize)
-  {
-    startSize = 16;
-  }
-
-  switch (chgsize)
-  {
-  case 'incr':
-    newSize = startSize + 2;
-    break;
-
-  case 'decr':
-    newSize = startSize - 2;
-    break;
-
-  case 'reset':
-    if (rs) {newSize = rs;} else {newSize = 16;}
-    break;
-
-  default:
-    try{
-      newSize = parseInt(ndeReadCookie("nde-textsize"));
-    }
-    catch(e){
-      alert(e);
-    }
-    
-    if (!newSize || newSize == 'NaN')
-    {
-      newSize = startSize;
-    }
-    break;
-
-  }
-
-  if (newSize < 10) 
-  {
-    newSize = 10;
-  }
-
-  newSize += 'px';
-
-  document.getElementsByTagName('html')[0].style.fontSize = newSize;
-  document.getElementsByTagName('body')[0].style.fontSize = newSize;
-
-  ndeCreateCookie("nde-textsize", newSize, 365);
-}
-
-function ndeGetDocTextSize() 
-{
-  if (!checkBrowser)
-  {
-    return 0;
-  }
-
-  var size = 0;
-  var body = document.getElementsByTagName('body')[0];
-
-  if (body.style && body.style.fontSize)
-  {
-    size = body.style.fontSize;
-  }
-  else if (typeof(getComputedStyle) != 'undefined')
-  {
-    size = getComputedStyle(body,'').getPropertyValue('font-size');
-  }
-  else if (body.currentStyle)
-  {
-   size = body.currentStyle.fontSize;
-  }
-
-  //fix IE bug
-  if( isNaN(size)){
-    if(size.substring(size.length-1)=="%"){
-      return
-    }
-
-  }
-
-  return size;
-
-}
-
-
-
-function ndeCreateCookie(name,value,days) 
-{
-  var cookie = name + "=" + value + ";";
-
-  if (days) 
-  {
-    var date = new Date();
-    date.setTime(date.getTime()+(days*24*60*60*1000));
-    cookie += " expires=" + date.toGMTString() + ";";
-  }
-  cookie += " path=/";
-
-  document.cookie = cookie;
-
-}
-
-function ndeReadCookie(name) 
-{
-  var nameEQ = name + "=";
-  var ca = document.cookie.split(';');
-
- 
-  for(var i = 0; i < ca.length; i++) 
-  {
-    var c = ca[i];
-    while (c.charAt(0) == ' ') 
-    {
-      c = c.substring(1, c.length);
-    }
-
-    ctest = c.substring(0,name.length);
- 
-    if(ctest == name){
-      return c.substring(nameEQ.length,c.length);
-    }
-  }
-  return null;
-}

+ 0 - 40
docs/cn/skin/getBlank.js

@@ -1,40 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * getBlank script - when included in a html file and called from a form text field, will set the value of this field to ""
- * if the text value is still the standard value.
- * getPrompt script - when included in a html file and called from a form text field, will set the value of this field to the prompt
- * if the text value is empty.
- *
- * Typical usage:
- * <script type="text/javascript" language="JavaScript" src="getBlank.js"></script>
- * <input type="text" id="query" value="Search the site:" onFocus="getBlank (this, 'Search the site:');" onBlur="getBlank (this, 'Search the site:');"/>
- */
-<!--
-function getBlank (form, stdValue){
-if (form.value == stdValue){
-	form.value = '';
-	}
-return true;
-}
-function getPrompt (form, stdValue){
-if (form.value == ''){
-	form.value = stdValue;
-	}
-return true;
-}
-//-->

+ 0 - 45
docs/cn/skin/getMenu.js

@@ -1,45 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * This script, when included in a html file, can be used to make collapsible menus
- *
- * Typical usage:
- * <script type="text/javascript" language="JavaScript" src="menu.js"></script>
- */
-
-if (document.getElementById){ 
-  document.write('<style type="text/css">.menuitemgroup{display: none;}</style>')
-}
-
-
-function SwitchMenu(obj, thePath)
-{
-var open = 'url("'+thePath + 'images/chapter_open.gif")';
-var close = 'url("'+thePath + 'images/chapter.gif")';
-  if(document.getElementById)  {
-    var el = document.getElementById(obj);
-    var title = document.getElementById(obj+'Title');
-
-    if(el.style.display != "block"){ 
-      title.style.backgroundImage = open;
-      el.style.display = "block";
-    }else{
-      title.style.backgroundImage = close;
-      el.style.display = "none";
-    }
-  }// end -  if(document.getElementById) 
-}//end - function SwitchMenu(obj)

+ 0 - 1
docs/cn/skin/images/README.txt

@@ -1 +0,0 @@
-The images in this directory are used if the current skin lacks them.

BIN
docs/cn/skin/images/add.jpg


BIN
docs/cn/skin/images/built-with-forrest-button.png


BIN
docs/cn/skin/images/chapter.gif


BIN
docs/cn/skin/images/chapter_open.gif


+ 0 - 92
docs/cn/skin/images/corner-imports.svg.xslt

@@ -1,92 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
-  <xsl:param name="orientation-tb"/>
-  <xsl:param name="orientation-lr"/>
-  <xsl:param name="size"/>
-  <xsl:param name="bg-color-name"/>
-  <xsl:param name="stroke-color-name"/>
-  <xsl:param name="fg-color-name"/>
-<!-- if not all colors are present, don't even try to render the corners -->
-  <xsl:variable name="isize">
-    <xsl:choose>
-      <xsl:when test="$bg-color-name and $stroke-color-name and $fg-color-name">
-        <xsl:value-of select="$size"/>
-      </xsl:when>
-      <xsl:otherwise>0</xsl:otherwise>
-    </xsl:choose>
-  </xsl:variable>
-  <xsl:variable name="smallersize" select="number($isize)-1"/>
-  <xsl:variable name="biggersize" select="number($isize)+1"/>
-  <xsl:variable name="bg">
-    <xsl:if test="skinconfig/colors/color[@name=$bg-color-name]">fill:<xsl:value-of select="skinconfig/colors/color[@name=$bg-color-name]/@value"/>;</xsl:if>
-  </xsl:variable>
-  <xsl:variable name="fill">
-    <xsl:if test="skinconfig/colors/color[@name=$stroke-color-name]">fill:<xsl:value-of select="skinconfig/colors/color[@name=$stroke-color-name]/@value"/>;</xsl:if>
-  </xsl:variable>
-  <xsl:variable name="stroke">
-    <xsl:if test="skinconfig/colors/color[@name=$fg-color-name]">stroke:<xsl:value-of select="skinconfig/colors/color[@name=$fg-color-name]/@value"/>;</xsl:if>
-  </xsl:variable>
-  <xsl:template match="skinconfig">
-    <svg width="{$isize}" height="{$isize}">
-<!-- background-->
-      <rect x="-1" y="-1" width="{$biggersize}" height="{$biggersize}" style="{$bg}stroke-width:0"/>
-<!-- 0,0 0,-4 4,0 4,-4-->
-      <xsl:variable name="flip-tb-scale">
-        <xsl:choose>
-          <xsl:when test="$orientation-tb='t'">1</xsl:when>
-          <xsl:otherwise>-1</xsl:otherwise>
-        </xsl:choose>
-      </xsl:variable>
-      <xsl:variable name="flip-lr-scale">
-        <xsl:choose>
-          <xsl:when test="$orientation-lr='l'">1</xsl:when>
-          <xsl:otherwise>-1</xsl:otherwise>
-        </xsl:choose>
-      </xsl:variable>
-      <xsl:variable name="flip-tb-translate">
-        <xsl:choose>
-          <xsl:when test="$orientation-tb='t'">0</xsl:when>
-          <xsl:otherwise>-<xsl:value-of select="$isize" />
-          </xsl:otherwise>
-        </xsl:choose>
-      </xsl:variable>
-      <xsl:variable name="flip-lr-translate">
-        <xsl:choose>
-          <xsl:when test="$orientation-lr='l'">0</xsl:when>
-          <xsl:otherwise>-<xsl:value-of select="$isize" />
-          </xsl:otherwise>
-        </xsl:choose>
-      </xsl:variable>
-<!-- flip transform -->
-      <g transform="scale({$flip-lr-scale},{$flip-tb-scale}) translate({$flip-lr-translate}, {$flip-tb-translate})">
-        <xsl:call-template name="figure" />
-      </g>
-    </svg>
-  </xsl:template>
-  <xsl:template name="figure">
-<!-- Just change shape here -->
-    <g transform="translate(0.5 0.5)">
-      <ellipse cx="{$smallersize}" cy="{$smallersize}" rx="{$smallersize}" ry="{$smallersize}"
-				 style="{$fill}{$stroke}stroke-width:1"/>
-    </g>
-<!-- end -->
-  </xsl:template>
-  <xsl:template match="*"></xsl:template>
-  <xsl:template match="text()"></xsl:template>
-</xsl:stylesheet>

BIN
docs/cn/skin/images/current.gif


+ 0 - 28
docs/cn/skin/images/dc.svg.xslt

@@ -1,28 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
-  <xsl:import href="corner-imports.svg.xslt" />
-<!-- Diagonal 45 degrees corner -->
-  <xsl:template name="figure">
-    <xsl:variable name="biggersize" select="number($size)+number($size)"/>
-    <g transform="translate(0 0.5)">
-      <polygon points="0,{$size} {$size},0 {$biggersize},0 {$biggersize},{$biggersize} 0,{$biggersize}"
-                    style="{$fill}{$stroke}stroke-width:1"/>
-    </g>
-  </xsl:template>
-</xsl:stylesheet>

BIN
docs/cn/skin/images/error.png


BIN
docs/cn/skin/images/external-link.gif


BIN
docs/cn/skin/images/fix.jpg


BIN
docs/cn/skin/images/forrest-credit-logo.png


BIN
docs/cn/skin/images/hack.jpg


BIN
docs/cn/skin/images/header_white_line.gif


BIN
docs/cn/skin/images/info.png


BIN
docs/cn/skin/images/instruction_arrow.png


BIN
docs/cn/skin/images/label.gif


BIN
docs/cn/skin/images/page.gif


BIN
docs/cn/skin/images/pdfdoc.gif


BIN
docs/cn/skin/images/poddoc.png


+ 0 - 55
docs/cn/skin/images/poddoc.svg.xslt

@@ -1,55 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<svg width="20pt" height="20pt"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:xlink="http://www.w3.org/1999/xlink">
-  <defs
-     id="defs550">
-    <linearGradient id="gray2white">
-      <stop style="stop-color:#7f7f7f;stop-opacity:1;" offset="0.000000"/>
-      <stop style="stop-color:#ffffff;stop-opacity:1;" offset="1.000000"/>
-    </linearGradient>
-    <linearGradient id="pageshade" xlink:href="#gray2white"
-       x1="0.95" y1="0.95"
-       x2="0.40" y2="0.20"
-       gradientUnits="objectBoundingBox" spreadMethod="pad" />
-    <path d="M 0 0 L 200 0" style="stroke:#000000;stroke-width:1pt;" id="hr"/>
-  </defs>
-  <g transform="scale(0.08)">
-    <g transform="translate(40, 0)">
-      <rect width="230" height="300" x="0" y="0"
-            style="fill:url(#pageshade);fill-rule:evenodd;
-            stroke:#000000;stroke-width:1.25;"/>
-      <g transform="translate(15, 60)">
-        <use xlink:href="#hr" x="0" y="0"/>
-        <use xlink:href="#hr" x="0" y="60"/>
-        <use xlink:href="#hr" x="0" y="120"/>
-        <use xlink:href="#hr" x="0" y="180"/>
-      </g>
-    </g>
-    <g transform="translate(0,70),scale(1.1,1.6)">
-      <rect width="200" height="100" x="0" y="0"
-         style="fill:#ff0000;fill-rule:evenodd;
-                stroke:#000000;stroke-width:2.33903;"/>
-      <text x="20" y="75"
-            style="stroke:#ffffff;stroke-width:1.0;
-                   font-size:72;font-weight:normal;fill:#ffffff;
-                   font-family:Arial;text-anchor:start;">POD</text>
-    </g>
-  </g>
-</svg>

BIN
docs/cn/skin/images/printer.gif


BIN
docs/cn/skin/images/rc-b-l-15-1body-2menu-3menu.png


BIN
docs/cn/skin/images/rc-b-r-15-1body-2menu-3menu.png


BIN
docs/cn/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png


BIN
docs/cn/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png


BIN
docs/cn/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png


BIN
docs/cn/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png


BIN
docs/cn/skin/images/rc-t-r-15-1body-2menu-3menu.png


BIN
docs/cn/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png


Some files were not shown because too many files changed in this diff