Browse Source

Merge -r 594459:594460 from trunk to branch-0.15 to fix HADOOP-1917

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/branches/branch-0.15@594461 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 17 years ago
parent
commit
2ef58393f1

+ 11 - 0
CHANGES.txt

@@ -3,6 +3,17 @@ Hadoop Change Log
 
 Release 0.15.1 -
 
+  IMPROVEMENTS
+
+    HADOOP-1917.  Addition of guides/tutorial for better overall
+    documentation for Hadoop. Specifically: 
+    * quickstart.html is targetted towards first-time users and helps them 
+      setup a single-node cluster and play with Hadoop. 
+    * cluster_setup.html helps admins to configure and setup non-trivial
+      hadoop clusters.
+    * mapred_tutorial.html is a comprehensive Map-Reduce tutorial. 
+    (acmurthy) 
+
   BUG FIXES
 
     HADOOP-2174.  Removed the unnecessary Reporter.setStatus call from

+ 730 - 0
docs/cluster_setup.html

@@ -0,0 +1,730 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<meta content="Apache Forrest" name="Generator">
+<meta name="Forrest-version" content="0.8">
+<meta name="Forrest-skin-name" content="pelt">
+<title>Hadoop Cluster Setup</title>
+<link type="text/css" href="skin/basic.css" rel="stylesheet">
+<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
+<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
+<link type="text/css" href="skin/profile.css" rel="stylesheet">
+<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
+<link rel="shortcut icon" href="images/favicon.ico">
+</head>
+<body onload="init()">
+<script type="text/javascript">ndeSetTextSize();</script>
+<div id="top">
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://lucene.apache.org/">Lucene</a> &gt; <a href="http://lucene.apache.org/hadoop/">Hadoop</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
+</div>
+<!--+
+    |header
+    +-->
+<div class="header">
+<!--+
+    |start group logo
+    +-->
+<div class="grouplogo">
+<a href="http://lucene.apache.org/"><img class="logoImage" alt="Lucene" src="images/lucene_green_150.gif" title="Apache Lucene"></a>
+</div>
+<!--+
+    |end group logo
+    +-->
+<!--+
+    |start Project Logo
+    +-->
+<div class="projectlogo">
+<a href="http://lucene.apache.org/hadoop/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
+</div>
+<!--+
+    |end Project Logo
+    +-->
+<!--+
+    |start Search
+    +-->
+<div class="searchbox">
+<form action="http://www.google.com/search" method="get" class="roundtopsmall">
+<input value="lucene.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
+                    <input name="Search" value="Search" type="submit">
+</form>
+</div>
+<!--+
+    |end search
+    +-->
+<!--+
+    |start Tabs
+    +-->
+<ul id="tabs">
+<li class="current">
+<a class="selected" href="index.html">Main</a>
+</li>
+<li>
+<a class="unselected" href="http://wiki.apache.org/lucene-hadoop">Wiki</a>
+</li>
+</ul>
+<!--+
+    |end Tabs
+    +-->
+</div>
+</div>
+<div id="main">
+<div id="publishedStrip">
+<!--+
+    |start Subtabs
+    +-->
+<div id="level2tabs"></div>
+<!--+
+    |end Endtabs
+    +-->
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+
+             &nbsp;
+           </div>
+<!--+
+    |start Menu, mainarea
+    +-->
+<!--+
+    |start Menu
+    +-->
+<div id="menu">
+<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">Project</div>
+<div id="menu_1.1" class="menuitemgroup">
+<div class="menuitem">
+<a href="releases.html">Releases</a>
+</div>
+<div class="menuitem">
+<a href="releases.html#News">News</a>
+</div>
+<div class="menuitem">
+<a href="credits.html">Credits</a>
+</div>
+<div class="menuitem">
+<a href="http://www.cafepress.com/hadoop/">Buy Stuff</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_selected_1.2', 'skin/')" id="menu_selected_1.2Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
+<div id="menu_selected_1.2" class="selectedmenuitemgroup" style="display: block;">
+<div class="menuitem">
+<a href="documentation.html">Overview</a>
+</div>
+<div class="menuitem">
+<a href="quickstart.html">Quickstart</a>
+</div>
+<div class="menupage">
+<div class="menupagetitle">Cluster Setup</div>
+</div>
+<div class="menuitem">
+<a href="hdfs_design.html">HDFS Architecture</a>
+</div>
+<div class="menuitem">
+<a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
+</div>
+<div class="menuitem">
+<a href="api/index.html">API Docs</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/">Wiki</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/FAQ">FAQ</a>
+</div>
+<div class="menuitem">
+<a href="mailing_lists.html#Users">Mailing Lists</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_1.3', 'skin/')" id="menu_1.3Title" class="menutitle">Developers</div>
+<div id="menu_1.3" class="menuitemgroup">
+<div class="menuitem">
+<a href="mailing_lists.html#Developers">Mailing Lists</a>
+</div>
+<div class="menuitem">
+<a href="issue_tracking.html">Issue Tracking</a>
+</div>
+<div class="menuitem">
+<a href="version_control.html">Version Control</a>
+</div>
+<div class="menuitem">
+<a href="http://lucene.zones.apache.org:8080/hudson/job/Hadoop-Nightly/">Nightly Build</a>
+</div>
+<div class="menuitem">
+<a href="irc.html">IRC Channel</a>
+</div>
+</div>
+<div id="credit"></div>
+<div id="roundbottom">
+<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
+<!--+
+  |alternative credits
+  +-->
+<div id="credit2"></div>
+</div>
+<!--+
+    |end Menu
+    +-->
+<!--+
+    |start content
+    +-->
+<div id="content">
+<div title="Portable Document Format" class="pdflink">
+<a class="dida" href="cluster_setup.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
+        PDF</a>
+</div>
+<h1>Hadoop Cluster Setup</h1>
+<div id="minitoc-area">
+<ul class="minitoc">
+<li>
+<a href="#Purpose">Purpose</a>
+</li>
+<li>
+<a href="#Pre-requisites">Pre-requisites</a>
+</li>
+<li>
+<a href="#Installation">Installation</a>
+</li>
+<li>
+<a href="#Configuration">Configuration</a>
+<ul class="minitoc">
+<li>
+<a href="#Configuration+Files">Configuration Files</a>
+</li>
+<li>
+<a href="#Site+Configuration">Site Configuration</a>
+<ul class="minitoc">
+<li>
+<a href="#Configuring+the+Environment+of+the+Hadoop+Daemons">Configuring the Environment of the Hadoop Daemons</a>
+</li>
+<li>
+<a href="#Configuring+the+Hadoop+Daemons">Configuring the Hadoop Daemons</a>
+</li>
+<li>
+<a href="#Slaves">Slaves</a>
+</li>
+<li>
+<a href="#Logging">Logging</a>
+</li>
+</ul>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Hadoop+Startup">Hadoop Startup</a>
+</li>
+<li>
+<a href="#Hadoop+Shutdown">Hadoop Shutdown</a>
+</li>
+</ul>
+</div>
+  
+    
+<a name="N1000C"></a><a name="Purpose"></a>
+<h2 class="h3">Purpose</h2>
+<div class="section">
+<p>This document describes how to install, configure and manage non-trivial
+      Hadoop clusters ranging from a few nodes to extremely large clusters with 
+      thousands of nodes.</p>
+<p>If you are looking to install Hadoop on a single machine to play
+      with it, you can find relevant details <a href="quickstart.html">here</a>.
+      </p>
+</div>
+    
+    
+<a name="N1001D"></a><a name="Pre-requisites"></a>
+<h2 class="h3">Pre-requisites</h2>
+<div class="section">
+<ol>
+        
+<li>
+          Make sure all <a href="quickstart.html#PreReqs">requisite</a> software 
+          is installed on all nodes in your cluster.
+        </li>
+        
+<li>
+          
+<a href="quickstart.html#GetHadoop">Get</a> the Hadoop software.
+        </li>
+      
+</ol>
+</div>
+    
+    
+<a name="N10035"></a><a name="Installation"></a>
+<h2 class="h3">Installation</h2>
+<div class="section">
+<p>Installing a Hadoop cluster typically involves unpacking the software 
+      on all the machines in the cluster.</p>
+<p>Typically one machine in the cluster is designated as the 
+      <span class="codefrag">NameNode</span> and another machine the as <span class="codefrag">JobTracker</span>,
+      exclusively. These are the <em>masters</em>. The rest of the machines in 
+      the cluster act as both <span class="codefrag">DataNode</span> <em>and</em> 
+      <span class="codefrag">TaskTracker</span>. These are the <em>slaves</em>.</p>
+<p>The root of the distribution is referred to as 
+      <span class="codefrag">HADOOP_HOME</span>. All machines in the cluster usually have the same 
+      <span class="codefrag">HADOOP_HOME</span> path.</p>
+</div>
+    
+    
+<a name="N10060"></a><a name="Configuration"></a>
+<h2 class="h3">Configuration</h2>
+<div class="section">
+<p>The following sections describe how to configure a Hadoop cluster.</p>
+<a name="N10069"></a><a name="Configuration+Files"></a>
+<h3 class="h4">Configuration Files</h3>
+<p>Hadoop configuration is driven by two important configuration files
+        found in the <span class="codefrag">conf/</span> directory of the distribution:</p>
+<ol>
+          
+<li>
+            
+<a href="http://lucene.apache.org/hadoop/hadoop-default.html">hadoop-default.xml</a> - Read-only 
+            default configuration.
+          </li>
+          
+<li>
+            
+<em>hadoop-site.xml</em> - Site-specific configuration.
+          </li>
+        
+</ol>
+<p>To learn more about how the Hadoop framework is controlled by these 
+        configuration files, look <a href="api/org/apache/hadoop/conf/Configuration.html">here</a>.</p>
+<p>Additionally, you can control the Hadoop scripts found in the 
+        <span class="codefrag">bin/</span> directory of the distribution, by setting site-specific 
+        values via the <span class="codefrag">conf/hadoop-env.sh</span>.</p>
+<a name="N10096"></a><a name="Site+Configuration"></a>
+<h3 class="h4">Site Configuration</h3>
+<p>To configure the the Hadoop cluster you will need to configure the
+        <em>environment</em> in which the Hadoop daemons execute as well as
+        the <em>configuration parameters</em> for the Hadoop daemons.</p>
+<p>The Hadoop daemons are <span class="codefrag">NameNode</span>/<span class="codefrag">DataNode</span> 
+        and <span class="codefrag">JobTracker</span>/<span class="codefrag">TaskTracker</span>.</p>
+<a name="N100B4"></a><a name="Configuring+the+Environment+of+the+Hadoop+Daemons"></a>
+<h4>Configuring the Environment of the Hadoop Daemons</h4>
+<p>Administrators should use the <span class="codefrag">conf/hadoop-env.sh</span> script
+          to do site-specific customization of the Hadoop daemons' process 
+          environment.</p>
+<p>At the very least you should specify the
+          <span class="codefrag">JAVA_HOME</span> so that it is correctly defined on each
+          remote node.</p>
+<p>Other useful configuration parameters that you can customize 
+          include:</p>
+<ul>
+            
+<li>
+              
+<span class="codefrag">HADOOP_LOG_DIR</span> - The directory where the daemons'
+              log files are stored. They are automatically created if they don't
+              exist.
+            </li>
+            
+<li>
+              
+<span class="codefrag">HADOOP_HEAPSIZE</span> - The maximum amount of heapsize 
+              to use, in MB e.g. <span class="codefrag">2000MB</span>.
+            </li>
+          
+</ul>
+<a name="N100DC"></a><a name="Configuring+the+Hadoop+Daemons"></a>
+<h4>Configuring the Hadoop Daemons</h4>
+<p>This section deals with important parameters to be specified in the
+          <span class="codefrag">conf/hadoop-site.xml</span> for the Hadoop cluster.</p>
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+  		    
+<tr>
+		      
+<th colspan="1" rowspan="1">Parameter</th>
+		      <th colspan="1" rowspan="1">Value</th> 
+		      <th colspan="1" rowspan="1">Notes</th>
+		    
+</tr>
+  		    
+<tr>
+		      
+<td colspan="1" rowspan="1">fs.default.name</td>
+  		      <td colspan="1" rowspan="1">Hostname or IP address of <span class="codefrag">NameNode</span>.</td>
+		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">mapred.job.tracker</td>
+		      <td colspan="1" rowspan="1">Hostname or IP address of <span class="codefrag">JobTracker</span>.</td>
+		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">dfs.name.dir</td>
+		      <td colspan="1" rowspan="1">
+		        Path on the local filesystem where the <span class="codefrag">NameNode</span> 
+		        stores the namespace and transactions logs persistently.</td>
+		      <td colspan="1" rowspan="1">
+		        If this is a comma-delimited list of directories then the name 
+		        table is replicated in all of the directories, for redundancy.
+		      </td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">dfs.data.dir</td>
+		      <td colspan="1" rowspan="1">
+		        Comma separated list of paths on the local filesystem of a 
+		        <span class="codefrag">DataNode</span> where it should store its blocks.
+		      </td>
+		      <td colspan="1" rowspan="1">
+		        If this is a comma-delimited list of directories, then data will 
+		        be stored in all named directories, typically on different 
+		        devices.
+		      </td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">mapred.system.dir</td>
+		      <td colspan="1" rowspan="1">
+		        Path on the HDFS where where the Map-Reduce framework stores 
+		        system files e.g. <span class="codefrag">/hadoop/mapred/system/</span>.
+		      </td>
+		      <td colspan="1" rowspan="1">
+		        This is in the default filesystem (HDFS) and must be accessible 
+		        from both the server and client machines.
+		      </td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">mapred.local.dir</td>
+		      <td colspan="1" rowspan="1">
+		        Comma-separated list of paths on the local filesystem where 
+		        temporary Map-Reduce data is written.
+		      </td>
+		      <td colspan="1" rowspan="1">Multiple paths help spread disk i/o.</td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">mapred.tasktracker.tasks.maximum</td>
+		      <td colspan="1" rowspan="1">
+		        The maximum number of map and reduce tasks, which are run 
+		        simultaneously on a given <span class="codefrag">TaskTracker</span>, individually.
+		      </td>
+		      <td colspan="1" rowspan="1">
+		        Defaults to 2 (2 maps and 2 reduces), but vary it depending on 
+		        your hardware.
+		      </td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">dfs.hosts/dfs.hosts.exclude</td>
+		      <td colspan="1" rowspan="1">List of permitted/excluded DataNodes.</td>
+		      <td colspan="1" rowspan="1">
+		        If necessary, use these files to control the list of allowable 
+		        datanodes.
+		      </td>
+		    
+</tr>
+		    
+<tr>
+		      
+<td colspan="1" rowspan="1">mapred.hosts/mapred.hosts.exclude</td>
+		      <td colspan="1" rowspan="1">List of permitted/excluded TaskTrackers.</td>
+		      <td colspan="1" rowspan="1">
+		        If necessary, use these files to control the list of allowable 
+		        tasktrackers.
+		      </td>
+  		    
+</tr>
+		  
+</table>
+<p>Typically all the above parameters are marked as 
+          <a href="api/index.html?org/apache/hadoop/conf/Configuration.html#FinalParameters">
+          final</a> to ensure that they cannot be overriden by user-applications.
+          </p>
+<a name="N101BC"></a><a name="Real-World+Cluster+Configurations"></a>
+<h5>Real-World Cluster Configurations</h5>
+<p>This section lists some non-default configuration parameters which 
+            have been used to run the <em>sort</em> benchmark on very large 
+            clusters.</p>
+<ul>
+              
+<li>
+                
+<p>Some non-default configuration values used to run sort900,
+                that is 9TB of data sorted on a cluster with 900 nodes:</p>
+                
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+  		          
+<tr>
+		            
+<th colspan="1" rowspan="1">Parameter</th>
+		            <th colspan="1" rowspan="1">Value</th> 
+		            <th colspan="1" rowspan="1">Notes</th>
+		          
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">dfs.block.size</td>
+                    <td colspan="1" rowspan="1">134217728</td>
+                    <td colspan="1" rowspan="1">HDFS blocksize of 128MB for large file-systems.</td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">dfs.namenode.handler.count</td>
+                    <td colspan="1" rowspan="1">40</td>
+                    <td colspan="1" rowspan="1">
+                      More NameNode server threads to handle RPCs from large 
+                      number of DataNodes.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
+                    <td colspan="1" rowspan="1">20</td>
+                    <td colspan="1" rowspan="1">
+                      Higher number of parallel copies run by reduces to fetch
+                      outputs from very large number of maps.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">mapred.child.java.opts</td>
+                    <td colspan="1" rowspan="1">-Xmx512M</td>
+                    <td colspan="1" rowspan="1">
+                      Larger heap-size for child jvms of maps/reduces.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">fs.inmemory.size.mb</td>
+                    <td colspan="1" rowspan="1">200</td>
+                    <td colspan="1" rowspan="1">
+                      Larger amount of memory allocated for the in-memory 
+                      file-system used to merge map-outputs at the reduces.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">io.sort.factor</td>
+                    <td colspan="1" rowspan="1">100</td>
+                    <td colspan="1" rowspan="1">More streams merged at once while sorting files.</td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">io.sort.mb</td>
+                    <td colspan="1" rowspan="1">200</td>
+                    <td colspan="1" rowspan="1">Higher memory-limit while sorting data.</td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">io.file.buffer.size</td>
+                    <td colspan="1" rowspan="1">131072</td>
+                    <td colspan="1" rowspan="1">Size of read/write buffer used in SequenceFiles.</td>
+                  
+</tr>
+                
+</table>
+              
+</li>
+              
+<li>
+                
+<p>Updates to some configuration values to run sort1400 and 
+                sort2000, that is 14TB of data sorted on 1400 nodes and 20TB of
+                data sorted on 2000 nodes:</p>
+                
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+  		          
+<tr>
+		            
+<th colspan="1" rowspan="1">Parameter</th>
+		            <th colspan="1" rowspan="1">Value</th> 
+		            <th colspan="1" rowspan="1">Notes</th>
+		          
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">mapred.job.tracker.handler.count</td>
+                    <td colspan="1" rowspan="1">60</td>
+                    <td colspan="1" rowspan="1">
+                      More JobTracker server threads to handle RPCs from large 
+                      number of TaskTrackers.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">mapred.reduce.parallel.copies</td>
+                    <td colspan="1" rowspan="1">50</td>
+                    <td colspan="1" rowspan="1"></td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">tasktracker.http.threads</td>
+                    <td colspan="1" rowspan="1">50</td>
+                    <td colspan="1" rowspan="1">
+                      More worker threads for the TaskTracker's http server. The
+                      http server is used by reduces to fetch intermediate 
+                      map-outputs.
+                    </td>
+                  
+</tr>
+                  
+<tr>
+                    
+<td colspan="1" rowspan="1">mapred.child.java.opts</td>
+                    <td colspan="1" rowspan="1">-Xmx1024M</td>
+                    <td colspan="1" rowspan="1"></td>
+                  
+</tr>
+                
+</table>
+              
+</li>
+            
+</ul>
+<a name="N102D9"></a><a name="Slaves"></a>
+<h4>Slaves</h4>
+<p>Typically you choose one machine in the cluster to act as the 
+          <span class="codefrag">NameNode</span> and one machine as to act as the 
+          <span class="codefrag">JobTracker</span>, exclusively. The rest of the machines act as 
+          both a <span class="codefrag">DataNode</span> and <span class="codefrag">TaskTracker</span> and are 
+          referred to as <em>slaves</em>.</p>
+<p>List all slave hostnames or IP addresses in your 
+          <span class="codefrag">conf/slaves</span> file, one per line.</p>
+<a name="N102F8"></a><a name="Logging"></a>
+<h4>Logging</h4>
+<p>Hadoop uses the <a href="http://logging.apache.org/log4j/">Apache 
+          log4j</a> via the <a href="http://commons.apache.org/logging/">Apache 
+          Commons Logging</a> framework for logging. Edit the 
+          <span class="codefrag">conf/log4j.properties</span> file to customize the Hadoop 
+          daemons' logging configuration (log-formats and so on).</p>
+<p>Once all the necessary configuration is complete, distribute the files
+      to the <span class="codefrag">HADOOP_CONF_DIR</span> directory on all the machines, 
+      typically <span class="codefrag">${HADOOP_HOME}/conf</span>.</p>
+</div>
+    
+    
+<a name="N10318"></a><a name="Hadoop+Startup"></a>
+<h2 class="h3">Hadoop Startup</h2>
+<div class="section">
+<p>To start a Hadoop cluster you will need to start both the HDFS and 
+      Map-Reduce cluster.</p>
+<p>
+        Format a new distributed filesystem:<br>
+        
+<span class="codefrag">$ bin/hadoop namenode -format</span>
+      
+</p>
+<p>
+        Start the HDFS with the following command, run on the designated
+        <span class="codefrag">NameNode</span>:<br>
+        
+<span class="codefrag">$ bin/start-dfs.sh</span>
+      
+</p>
+<p>The <span class="codefrag">bin/start-dfs.sh</span> script also consults the 
+      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">NameNode</span> 
+      and starts the <span class="codefrag">DataNode</span> daemon on all the listed slaves.</p>
+<p>
+        Start Map-Reduce with the following command, run on the designated
+        <span class="codefrag">JobTracker</span>:<br>
+        
+<span class="codefrag">$ bin/start-mapred.sh</span>
+      
+</p>
+<p>The <span class="codefrag">bin/start-mapred.sh</span> script also consults the 
+      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">JobTracker</span> 
+      and starts the <span class="codefrag">TaskTracker</span> daemon on all the listed slaves.
+      </p>
+</div>
+    
+    
+<a name="N1035E"></a><a name="Hadoop+Shutdown"></a>
+<h2 class="h3">Hadoop Shutdown</h2>
+<div class="section">
+<p>
+        Stop HDFS with the following command, run on the designated 
+        <span class="codefrag">NameNode</span>:<br>
+        
+<span class="codefrag">$ bin/stop-dfs.sh</span>
+      
+</p>
+<p>The <span class="codefrag">bin/stop-dfs.sh</span> script also consults the 
+      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">NameNode</span> 
+      and stops the <span class="codefrag">DataNode</span> daemon on all the listed slaves.</p>
+<p>
+        Stop Map-Reduce with the following command, run on the designated
+        the designated <span class="codefrag">JobTracker</span>:<br>
+        
+<span class="codefrag">$ bin/stop-mapred.sh</span>
+<br>
+      
+</p>
+<p>The <span class="codefrag">bin/stop-mapred.sh</span> script also consults the 
+      <span class="codefrag">${HADOOP_CONF_DIR}/slaves</span> file on the <span class="codefrag">JobTracker</span> 
+      and stops the <span class="codefrag">TaskTracker</span> daemon on all the listed slaves.</p>
+</div>
+  
+</div>
+<!--+
+    |end content
+    +-->
+<div class="clearboth">&nbsp;</div>
+</div>
+<div id="footer">
+<!--+
+    |start bottomstrip
+    +-->
+<div class="lastmodified">
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<div class="copyright">
+        Copyright &copy;
+         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
+</div>
+<!--+
+    |end bottomstrip
+    +-->
+</div>
+</body>
+</html>

File diff suppressed because it is too large
+ 118 - 0
docs/cluster_setup.pdf


+ 26 - 8
docs/documentation.html

@@ -114,13 +114,22 @@ document.write("Last Published: " + document.lastModified);
 <a href="http://www.cafepress.com/hadoop/">Buy Stuff</a>
 </div>
 </div>
-<div onclick="SwitchMenu('menu_1.2', 'skin/')" id="menu_1.2Title" class="menutitle">Documentation</div>
-<div id="menu_1.2" class="menuitemgroup">
+<div onclick="SwitchMenu('menu_selected_1.2', 'skin/')" id="menu_selected_1.2Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
+<div id="menu_selected_1.2" class="selectedmenuitemgroup" style="display: block;">
+<div class="menupage">
+<div class="menupagetitle">Overview</div>
+</div>
+<div class="menuitem">
+<a href="quickstart.html">Quickstart</a>
+</div>
 <div class="menuitem">
-<a href="hdfs_design.html">Hadoop File System</a>
+<a href="cluster_setup.html">Cluster Setup</a>
 </div>
 <div class="menuitem">
-<a href="api/overview-summary.html#overview_description">Install and Configure</a>
+<a href="hdfs_design.html">HDFS Architecture</a>
+</div>
+<div class="menuitem">
+<a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 </div>
 <div class="menuitem">
 <a href="api/index.html">API Docs</a>
@@ -175,18 +184,27 @@ document.write("Last Published: " + document.lastModified);
 <h1>Hadoop Documentation</h1>
     
 <p>
-    The following documents provide concepts and procedures that will help you get started using Hadoop.
-    If you have more questions, you can ask the <a href="mailing_lists.html">mailing list</a> or browse the archives.
+    The following documents provide concepts and procedures that will help you 
+    get started using Hadoop. If you have more questions, you can ask the 
+    <a href="mailing_lists.html">mailing list</a> or browse the archives.
     </p>
     
 <ul>
       
 <li>
-<a href="hdfs_design.html">Hadoop Distributed File System (<acronym title="Hadoop Distributed File System">HDFS</acronym>)</a>
+<a href="quickstart.html">Hadoop Quickstart</a>
+</li>
+      
+<li>
+<a href="cluster_setup.html">Hadoop Cluster Setup</a>
+</li>
+      
+<li>
+<a href="hdfs_design.html">Hadoop Distributed File System</a>
 </li>
       
 <li>
-<a href="api/overview-summary.html#overview_description">Install and configure</a>
+<a href="mapred_tutorial.html">Hadoop Map-Reduce Tutorial</a>
 </li>
       
 <li>

+ 60 - 34
docs/documentation.pdf

@@ -5,10 +5,10 @@
 /Producer (FOP 0.20.5) >>
 endobj
 5 0 obj
-<< /Length 1045 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1161 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gat=+968f@&AJ$CkX];h<DPi>8Q2)Me.uYB-p9Pu!)JPJr.)o(YPaWHZ/]7P^rkC"qf]*l/e;a`bE;qD+9Ll2KrV>l6==G7JE-q;Y?J7MK%+t[q>gLIT):9:O*BM<EEr<UnX'OB5pV.k6tpZHrdj_Q$-E8q%Hms.G8t9@ef_*hU4V@SK>fa.j7F<;41,d2aLPJ8B'C`O27F]&!cc`!pdo_857h7k$#N&7pBtQ'UGu<*?Go?F]r.\^G!bfC$U6.0fS4_jU<OhH67h_dLmm*b[ICgL@GCZ,/]pGLC"?K;ng`>gV\/+-:UGN1YBoQ0ebj9$]ig>2nG>;OW+/2UgJU4h7*7^oL+0KdgTSNq6'j[k$s(q4lXAUpB04IQ5bVj_3;??UVr9,18Q-5Ue&4TZgOu4:;Yb)BU(aV528O9s(TglS()UU7W4"Y0<-DWSoULhTftKQl*7pp"Im4>7d7NE'0-*/Q3g;u![U'n>pddS'EpMNa\ofJVj3[Bg^G8?6?E@47Bnn))Le`#8"^L4LpaQI8["9HSCHrG!VBL8n]rS%);81@-H7;k5RZ&SB`V;aQL(ItP7,FuQbruoF>W'pP-m#Eh'=acWVQ#:]NX6k%V'>qAD==L(?+oET3/m)ZTIVdERVW7<)\B:fV$qR&hVM8]]3.6Z/m"OS.oeNAJ"4UR2o"pf=1:/7m\t*]=Efq_m6j/dZL:c[l8h'ckS]td*u5m2FQY9Pe]g&XTLo]`6L,\??)3$$H#,Uf1Oq/!W`jNf`FO_Qc;bZ27u2#h.l/pG/4_6Qs"#;)mM%EK=YuA+Ic\SuJ@+k2ro`ki\Ns7:75R_'4A<hLi2jJb`ZTT,]rNJ\:5Se:r%a="Dg$T1h4tLAB_H;rAY.K)s$X'</AWrjc@!HK2pql=p5OZ0D3.lVs15#_=g^LG?\?5sX%*d*#mZjugQ%!/kM>Xr&r1HY50RSBZ8n"<Q-sE1YNbR?Mg"tac2Sa"Z*FX%Nj@@F^G5/;I\>GrFBb:69NtYsAJ3So,G=2Mbias_F6AuDEfln\#Z^*g!DkM_TE~>
+Gat=+9lldX&A@ZcFA-:M<CfZ:A(!2N3n'KDp.Puk%g:'6'5SXPilSs=ps]A"&5Ne]0&$'V/ut8I?<U]R)57TfBXtQBnIo"-D(HV<rF[@Yn-OkX-6ud_d,0a'r[Np=Qfp5JV1*T-'*n>.m5$s7j8C;#LR(.^-q?1t5W_NlFg3&aKs]X8!eqL%0%P"V`Q)r40s4N9S/U!&M-n.8#RfX?TFPpt^L)-7#ATMO$7s9,IV,Zk(e)HuWp!il[l?=d7Y.Kpo92kMl?'We$3rXeML!ptW2^jrNk=rigX^iX=mk8P=>.3WMdiTV7Y1_l&*f9@_q\7oq:j>DYq"GjiB+?4/$R'JQHq88nor0[)&?2[Sd5oZ\VlJ]@GP/>b9f:'TXFn+"VbD.LJ_3/PJhm<Jlk\[%j==_K%ZOUs$e.M_'Tj%Nl!WRe\]#X0W,*YCZE?4pF1r[WB+&HDMN2C'NN0M8[&Y2<JoP5@ukFH`\<a%e0Cf;Un0K,j_BZa=UNpBf^ieB)3b2?ee>6o_:(37dG0\2e,2AC6@kY-0SGO9#Z#ehntQbug,9Z/^!FUS\_OJ<MEI8LSEafI^X0Dj:,Gf2g2gl"27rcgTn*8*X^JN_#"%V2.)li'FbXqO7X:H%A5$nX]&43b>XJM="e^0`GhsFJFX.[5:g&77eITU;pAB@^U)XZ9+)9,irBS/T3NjNT!]dAjZl`HW9'A+MTHTub)a-`Un0p:dAY65g?SK2m[NS%'DlPHI69+Fl2PED.D&A6QP'u8?b+5CJ6F_FLI7=sHl@$'W-r@O#dp$]KViMjZPr!Mg*uCtlViMeCH%#%(i6\"Ea&%>W<\W+F#(kP3HgV#)^Q1I&J,+lG&,I=bN&F-9^7J%tKL6$rLYrjS-Jm?j6@n91T#F:b8$cAd'(!n^%DA3001L.2A$kp!fbtLYLTZ*`NKNtV1d*)4oE0nUJ;2uWot&0d>,c?e&?EQodTQ5XmK`/tNY'ICLJ_HO=^<2q:$>CJ*6o1ub;0)-jD`Zil':2\5#\]JDGTNOX8=OB`[V/GH3UrcY:U07.7]4]l\:,YkKPBVUoqU$IQ#g<7<#%]Q'PJ,>N3P)&oP.Gq5Sk=gbooo`&1Toh-([t#=!b4,$nnP567A138^p^Y@A!XWis^j.(N8aX&LCKC5#CIrrIqX;Xs~>
 endstream
 endobj
 6 0 obj
@@ -28,6 +28,8 @@ endobj
 11 0 R
 12 0 R
 13 0 R
+14 0 R
+15 0 R
 ]
 endobj
 8 0 obj
@@ -44,10 +46,10 @@ endobj
 9 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 542.2 304.668 530.2 ]
+/Rect [ 108.0 542.2 198.984 530.2 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (hdfs_design.html)
+/A << /URI (quickstart.html)
 /S /URI >>
 /H /I
 >>
@@ -55,10 +57,10 @@ endobj
 10 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 529.0 207.312 517.0 ]
+/Rect [ 108.0 529.0 213.996 517.0 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (api/overview-summary.html#overview_description)
+/A << /URI (cluster_setup.html)
 /S /URI >>
 /H /I
 >>
@@ -66,10 +68,10 @@ endobj
 11 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 515.8 154.992 503.8 ]
+/Rect [ 108.0 515.8 263.004 503.8 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (api/index.html)
+/A << /URI (hdfs_design.html)
 /S /URI >>
 /H /I
 >>
@@ -77,10 +79,10 @@ endobj
 12 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 502.6 132.0 490.6 ]
+/Rect [ 108.0 502.6 252.636 490.6 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (http://wiki.apache.org/lucene-hadoop/)
+/A << /URI (mapred_tutorial.html)
 /S /URI >>
 /H /I
 >>
@@ -88,36 +90,58 @@ endobj
 13 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 489.4 132.0 477.4 ]
+/Rect [ 108.0 489.4 154.992 477.4 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (http://wiki.apache.org/lucene-hadoop/FAQ)
+/A << /URI (api/index.html)
 /S /URI >>
 /H /I
 >>
 endobj
 14 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 108.0 476.2 132.0 464.2 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A << /URI (http://wiki.apache.org/lucene-hadoop/)
+/S /URI >>
+/H /I
+>>
+endobj
+15 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 108.0 463.0 132.0 451.0 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A << /URI (http://wiki.apache.org/lucene-hadoop/FAQ)
+/S /URI >>
+/H /I
+>>
+endobj
+16 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F3
 /BaseFont /Helvetica-Bold
 /Encoding /WinAnsiEncoding >>
 endobj
-15 0 obj
+17 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F5
 /BaseFont /Times-Roman
 /Encoding /WinAnsiEncoding >>
 endobj
-16 0 obj
+18 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F1
 /BaseFont /Helvetica
 /Encoding /WinAnsiEncoding >>
 endobj
-17 0 obj
+19 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F2
@@ -136,35 +160,37 @@ endobj
 endobj
 3 0 obj
 << 
-/Font << /F3 14 0 R /F5 15 0 R /F1 16 0 R /F2 17 0 R >> 
+/Font << /F3 16 0 R /F5 17 0 R /F1 18 0 R /F2 19 0 R >> 
 /ProcSet [ /PDF /ImageC /Text ] >> 
 endobj
 xref
-0 18
+0 20
 0000000000 65535 f 
-0000002889 00000 n 
-0000002947 00000 n 
-0000002997 00000 n 
+0000003322 00000 n 
+0000003380 00000 n 
+0000003430 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
-0000001208 00000 n 
-0000001328 00000 n 
-0000001387 00000 n 
-0000001554 00000 n 
-0000001717 00000 n 
-0000001911 00000 n 
-0000002073 00000 n 
-0000002256 00000 n 
-0000002442 00000 n 
-0000002555 00000 n 
-0000002665 00000 n 
-0000002773 00000 n 
+0000001324 00000 n 
+0000001444 00000 n 
+0000001517 00000 n 
+0000001684 00000 n 
+0000001846 00000 n 
+0000002012 00000 n 
+0000002176 00000 n 
+0000002344 00000 n 
+0000002506 00000 n 
+0000002689 00000 n 
+0000002875 00000 n 
+0000002988 00000 n 
+0000003098 00000 n 
+0000003206 00000 n 
 trailer
 <<
-/Size 18
+/Size 20
 /Root 2 0 R
 /Info 4 0 R
 >>
 startxref
-3109
+3542
 %%EOF

+ 15 - 4
docs/index.html

@@ -117,10 +117,19 @@ document.write("Last Published: " + document.lastModified);
 <div onclick="SwitchMenu('menu_1.2', 'skin/')" id="menu_1.2Title" class="menutitle">Documentation</div>
 <div id="menu_1.2" class="menuitemgroup">
 <div class="menuitem">
-<a href="hdfs_design.html">Hadoop File System</a>
+<a href="documentation.html">Overview</a>
 </div>
 <div class="menuitem">
-<a href="api/overview-summary.html#overview_description">Install and Configure</a>
+<a href="quickstart.html">Quickstart</a>
+</div>
+<div class="menuitem">
+<a href="cluster_setup.html">Cluster Setup</a>
+</div>
+<div class="menuitem">
+<a href="hdfs_design.html">HDFS Architecture</a>
+</div>
+<div class="menuitem">
+<a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 </div>
 <div class="menuitem">
 <a href="api/index.html">API Docs</a>
@@ -262,8 +271,10 @@ document.write("Last Published: " + document.lastModified);
 <li>
 <a href="releases.html">Download</a> Hadoop from the release page.</li>
         
+<li>Hadoop <a href="quickstart.html">Quickstart</a>.</li>
+        
 <li>
-<a href="api/overview-summary.html#overview_description">Install and configure</a> Hadoop. Scroll down the page.</li>
+<a href="cluster_setup.html">Hadoop Cluster Setup</a>.</li>
         
 <li>
 <a href="mailing_lists.html">Discuss it</a> on the mailing list.</li>
@@ -272,7 +283,7 @@ document.write("Last Published: " + document.lastModified);
 </div>
 
     
-<a name="N10073"></a><a name="Getting+Involved"></a>
+<a name="N1007A"></a><a name="Getting+Involved"></a>
 <h2 class="h3"> Getting Involved </h2>
 <div class="section">
 <p>

+ 66 - 53
docs/index.pdf

@@ -143,10 +143,10 @@ endobj
 >>
 endobj
 21 0 obj
-<< /Length 1638 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1678 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gat=,gMYe)&:NH>:u<7c@73_PW4Wg=;nE?6TWK$O6e_iKbM?0n't:c!lbDmb2\'ubKjiUF.F'rkiGdr#+1@Z2\!qk+e2b>5_^9D\lNBKRIsnJlhhmLLVi]3Qk4d,UmH34fG:8([g\o-/N*Q.O:2@og$!cB@St&%6[R^/5H-7+j]3jQg;J.dL([<*1<+mJ6o?n$tAY.'jc;pV'o=Wa2c?EE[+4/c;W_8^J4DJf+'6Q@fPq5OZaOEXBbMrCXcJQq@o4YU>7/VMCR/';pdbG6aBlu8-(KD^&(p,<h\3LXOH83`[%PKVb2jcq*k85@o!=mp7:6'%K-<)^8#<PE9jU%lJcWOO?PXlURGP?2M#Wk/-P#-8ZU&FO/gY,ZQ*tQYO\tWEp67>V>of.fDWh7BQQO"Wn([#cj!?7Qgi!e*u*[@aNRP-EKng4,EST[9"4"u@"@u,Z!^_b0*K<p1ZE]8GFFk+$\D\rpOLPck.0*=t<6H6W7rhH7GWMD`"]S`ahjjt"V_TMbnA608i%>ea#_*\i(Cm]M0@Cf;L`eOAc]&]9:GI%<j4#XHp*2%_R:(%X_A1$=L9q:6r]G_Y/ET6YRn^\r&,JU4=D3gU[lf+Y,7DI/ED1u(co+AZ3fB=UEeoH2AJp9s?!t:35L<*5j^Iq=^;"[8OlT>i:Oho.JOEqsZ2n1o9,%hRSn'aX0aI.-[eEr?[6(;bjJNP95_UW]4q7D`<\##R%K_FZm?V*1BRg,_8)j8]\_osVE:8%=[0eJU#/=.=n&S`-#\dMFB0o"W!\_)fO-LH+XjD[.a<$CF.8@28HS^8f\m2hdoAS$lQ"QHs:"1KL.0X\6rIZRQ@_0sIu<V@uG$XtP@mY:qpfaZZZp;$8+$g.n,MUW2<,O-hb2GnUZTLaU*F^@XroYX)FebK]i[j(Y.)5INV5H9-SM$l.]YJ@kj:As6T3@DbTm^(aCA=e=PE0D%O(Y3;'.h%0g[uGV2DN6[-PVJpQo;Y?R5Wi5OB_1f0ZRi,`PZV-^]O,IGV)mRt(5=l71L&J%[G*a<k-sp[[0#@*?cGU9CbBe_40OtU*PnMhd]kD?mllt@ju^9WI2qO!)Zg4VrP[dNc4uij"4;`:?XgAK1o%P&3!Je@!)O1M+Vk(_5C5cYVbXLB2f2C9_H0m;!8IcdSe^O%2g[pQk"dC>BAFSZ".Sb^Z:t>cobnqs\p@ZPG-JS)W&>uAk\ngZNet<S*6b2N>TJ4Ylu7DsQ$')@U1oeGeEm,f,VC0qmZkBkbP_qN+alhH[9i:M>+rht_E`;N8BD-J&2_7Aimt`q@MnsTPe3hQq)6iF#6qO44Y$!B(mS7i"8QDS^>SJI$HhB>=/-XXYa:JK?\rB&Rc\7JN]"X>38kf.&.OoZhf-p1a1"LDLG8@QmT:kfQpYqP29r23YfrLC%l@Cq2`7G<1Oa(!1me4EU03@Kr)Y>6g2<W[V`IVS(NWj*&]C<3D+HfV=k$o1Q!)Nm:romi$s;E]LLGO2^U#/2BLN\jSf7/p@l;LD,hB\r=EKiN^+Mh+dJqdsO+GMBc;V1cfgN'kkEZg9)BEcepokFcfBDg!OWKNWHpXBhGCM>Bc%roJBL^gmgtI=A=R?quXEJ2$Yr]5bCS`%.ga*l^;ALBOd<EY2e@BF0~>
+Gat=,997gc&AJ$CW4*5_0G6fH<%^V'1tr3dcu]`Xd.SU*>[(K:FS/a[s*[BJMF5'n/:n=)H=jmZiGdqS5AW8CD)_K44Qc]opRB5MXqLp=r6KWZq5rhkKK4a.G+5\)&)5@@ceS%=I'\7V\?2&)s#d4.jHKd"F1*IZcb&RA^2`#Xf$)srnQr3$=e?)pO%_%1Oi;T[QBa4tb9YA69\&<O[:>`qZSXn>Di^nN@2In$8+S`<VfQ.cU*L"<?0&=C^452@m;0OrU@fbJFL2m1-P'p/QC"s>Ram=r&S-j]aso#?7WpHIF.1"5R2l`k;!;tZ/:mpuh'l*ZBoY)N0rkNk,Ok7,d)bCO/g3l`:W^5phG=c,HDj>#dAP%6ibc5K;U-BYcGjoo0C(qS:QFUZ&qQA*aW-WQ*]b,#@BKT742o#RTX?pTW'6YWAR+:hn0>H_!O;(.e:\[.#WN-X=>hln4/a?Ij.33NhHbNH]Kl*iOmr-$i1_Z2]H>Zg3r6N^lBBGm\4TbMr@fCs>TY<+EgU`87$e0#Osm'Bc79FSNS64tfUpY>cW%QL9p",)+o-,0*Xd*oa@II1)4,$s*$=7lbj^E%?Y@A;id;s,7FT>9Z$5fmA>5'=8$)*XOO'l+(VMWHk?JK%.gYlbSL.-\^R=5(5X?=?o,oIOF<9(\KlH"YSJ*2V]^r;`H!Q+\fYNP-[II!=5>*%K*fD`rHkHS*(+)P&h>Q!6dj9K\dg]?^MD[7&fES;;K+k_=X#6$G&$i4]1F6Oi,2qi6'6JrZ9(OPZ"1!"s);h@0c7;O75f<It=seApY1*jaltU._Kt3WiRGb_p;Mul@d<)D0iH4eMEm^@EP*d+hh<kuT2P8(=Z8+oc!q>!-0hObs0QTKphoi%*bl*\hX6a!WEDeBWaaHVq'Ne-f77cVDca5iKL2&^*gf)/X0XFYiM0YDeMZdSmr2fK^9#QE1oX)p<pj4NT5G!F,U%O%aX=eLDG!7G'f7R+BIZniKOuK/A%o;<o\-bT:JN[JfR<9cfLqY)GZi3^O"%nMTE*a:D%Z%?CMLq2pSRkZpV8PK!icqT`96J+PB'9Zjc3g+";lpQN9(T$0J$":+0I/7m1!Jb,gPE14G;;jElc&kH6K1oJWWAf%F)OF4^b\K!gbs_k%0I.0MBtY0TL&3O&Z5j2OTB.<OV!ET#!W>]j(D;"$\QPVg7[q8Pc4\$)kap1lBSoU'oe\);:3\#4#.:Zb[!Z,(pN*@"&UiqYBapUDFgdE'/5mLoJ4ESh%BtcBY@FO+13/i:J@La8KUZbj<gJB![#cZ%u?L_)R.6E<r>.07[LMhT$q+@65h,Y/]l'Y4q,;Wj4:irfeDoV;XSaKnha")21cEiO';H97W$_&lsHil)#ioqK(euWeSOg=BQ&?A0Q)X-(R:"@_;b&2#iM:?;$q`hIAYW*^-,X5#_(\P$Z-&W1mh_]F/giDFm?icHblGjkp`/Eo!>#WF7l^g,#NIA^IbsmFq]!ZeTk?3R,ap$+Dc),+cpE!7\/pb1/%HE$9'RE"01P`YjCfCGBQFd*Qdl&:5$:C=>$;[TidAE@X5<T0SW$K_uJT"NrdHpc?jG)ZAG4tit`A`*Y%ck(e!BJ*drJdd_i?7:Y8Y.h-ZCnAjoua1opW=_erm2<k$'_X4nLhXBr%+h5X\dfHj_E;AMModC\Fgr-O]k~>
 endstream
 endobj
 22 0 obj
@@ -167,6 +167,7 @@ endobj
 28 0 R
 29 0 R
 30 0 R
+31 0 R
 ]
 endobj
 24 0 obj
@@ -194,10 +195,10 @@ endobj
 26 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 604.0 207.312 592.0 ]
+/Rect [ 148.992 604.0 198.984 592.0 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (api/overview-summary.html#overview_description)
+/A << /URI (quickstart.html)
 /S /URI >>
 /H /I
 >>
@@ -205,10 +206,10 @@ endobj
 27 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 108.0 590.8 155.004 578.8 ]
+/Rect [ 108.0 590.8 213.996 578.8 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (mailing_lists.html)
+/A << /URI (cluster_setup.html)
 /S /URI >>
 /H /I
 >>
@@ -216,10 +217,10 @@ endobj
 28 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 147.324 498.866 290.652 486.866 ]
+/Rect [ 108.0 577.6 155.004 565.6 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (http://wiki.apache.org/lucene-hadoop/HowToContribute)
+/A << /URI (mailing_lists.html)
 /S /URI >>
 /H /I
 >>
@@ -227,10 +228,10 @@ endobj
 29 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 147.996 485.666 191.304 473.666 ]
+/Rect [ 147.324 485.666 290.652 473.666 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (issue_tracking.html)
+/A << /URI (http://wiki.apache.org/lucene-hadoop/HowToContribute)
 /S /URI >>
 /H /I
 >>
@@ -238,7 +239,18 @@ endobj
 30 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 148.668 472.466 203.016 460.466 ]
+/Rect [ 147.996 472.466 191.304 460.466 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A << /URI (issue_tracking.html)
+/S /URI >>
+/H /I
+>>
+endobj
+31 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 148.668 459.266 203.016 447.266 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
 /A << /URI (mailing_lists.html)
@@ -246,49 +258,49 @@ endobj
 /H /I
 >>
 endobj
-32 0 obj
+33 0 obj
 <<
  /Title (\376\377\0\61\0\40\0\107\0\145\0\164\0\164\0\151\0\156\0\147\0\40\0\123\0\164\0\141\0\162\0\164\0\145\0\144)
- /Parent 31 0 R
- /Next 33 0 R
+ /Parent 32 0 R
+ /Next 34 0 R
  /A 9 0 R
 >> endobj
-33 0 obj
+34 0 obj
 <<
  /Title (\376\377\0\62\0\40\0\107\0\145\0\164\0\164\0\151\0\156\0\147\0\40\0\111\0\156\0\166\0\157\0\154\0\166\0\145\0\144)
- /Parent 31 0 R
- /Prev 32 0 R
+ /Parent 32 0 R
+ /Prev 33 0 R
  /A 11 0 R
 >> endobj
-34 0 obj
+35 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F3
 /BaseFont /Helvetica-Bold
 /Encoding /WinAnsiEncoding >>
 endobj
-35 0 obj
+36 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F5
 /BaseFont /Times-Roman
 /Encoding /WinAnsiEncoding >>
 endobj
-36 0 obj
+37 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F1
 /BaseFont /Helvetica
 /Encoding /WinAnsiEncoding >>
 endobj
-37 0 obj
+38 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F2
 /BaseFont /Helvetica-Oblique
 /Encoding /WinAnsiEncoding >>
 endobj
-38 0 obj
+39 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F7
@@ -303,13 +315,13 @@ endobj
 2 0 obj
 << /Type /Catalog
 /Pages 1 0 R
- /Outlines 31 0 R
+ /Outlines 32 0 R
  /PageMode /UseOutlines
  >>
 endobj
 3 0 obj
 << 
-/Font << /F3 34 0 R /F5 35 0 R /F1 36 0 R /F2 37 0 R /F7 38 0 R >> 
+/Font << /F3 35 0 R /F5 36 0 R /F1 37 0 R /F2 38 0 R /F7 39 0 R >> 
 /ProcSet [ /PDF /ImageC /Text ] /XObject <</Im1 13 0 R 
  >>
 >> 
@@ -323,28 +335,28 @@ endobj
 11 0 obj
 <<
 /S /GoTo
-/D [22 0 R /XYZ 85.0 571.8 null]
+/D [22 0 R /XYZ 85.0 558.6 null]
 >>
 endobj
-31 0 obj
+32 0 obj
 <<
- /First 32 0 R
- /Last 33 0 R
+ /First 33 0 R
+ /Last 34 0 R
 >> endobj
 xref
-0 39
+0 40
 0000000000 65535 f 
-0000026195 00000 n 
-0000026267 00000 n 
-0000026359 00000 n 
+0000026379 00000 n 
+0000026451 00000 n 
+0000026543 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000000572 00000 n 
 0000000692 00000 n 
 0000000724 00000 n 
-0000026510 00000 n 
+0000026694 00000 n 
 0000000859 00000 n 
-0000026573 00000 n 
+0000026757 00000 n 
 0000000995 00000 n 
 0000002849 00000 n 
 0000021027 00000 n 
@@ -355,29 +367,30 @@ xref
 0000021745 00000 n 
 0000021924 00000 n 
 0000022111 00000 n 
-0000023842 00000 n 
-0000023965 00000 n 
-0000024034 00000 n 
-0000024200 00000 n 
-0000024361 00000 n 
-0000024555 00000 n 
-0000024721 00000 n 
-0000024927 00000 n 
-0000025100 00000 n 
-0000026637 00000 n 
-0000025272 00000 n 
-0000025452 00000 n 
-0000025639 00000 n 
-0000025752 00000 n 
-0000025862 00000 n 
-0000025970 00000 n 
-0000026086 00000 n 
+0000023882 00000 n 
+0000024005 00000 n 
+0000024081 00000 n 
+0000024247 00000 n 
+0000024408 00000 n 
+0000024573 00000 n 
+0000024739 00000 n 
+0000024905 00000 n 
+0000025111 00000 n 
+0000025284 00000 n 
+0000026821 00000 n 
+0000025456 00000 n 
+0000025636 00000 n 
+0000025823 00000 n 
+0000025936 00000 n 
+0000026046 00000 n 
+0000026154 00000 n 
+0000026270 00000 n 
 trailer
 <<
-/Size 39
+/Size 40
 /Root 2 0 R
 /Info 4 0 R
 >>
 startxref
-26688
+26872
 %%EOF

+ 3218 - 0
docs/mapred_tutorial.html

@@ -0,0 +1,3218 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<meta content="Apache Forrest" name="Generator">
+<meta name="Forrest-version" content="0.8">
+<meta name="Forrest-skin-name" content="pelt">
+<title>Hadoop Map-Reduce Tutorial</title>
+<link type="text/css" href="skin/basic.css" rel="stylesheet">
+<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
+<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
+<link type="text/css" href="skin/profile.css" rel="stylesheet">
+<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
+<link rel="shortcut icon" href="images/favicon.ico">
+</head>
+<body onload="init()">
+<script type="text/javascript">ndeSetTextSize();</script>
+<div id="top">
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://lucene.apache.org/">Lucene</a> &gt; <a href="http://lucene.apache.org/hadoop/">Hadoop</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
+</div>
+<!--+
+    |header
+    +-->
+<div class="header">
+<!--+
+    |start group logo
+    +-->
+<div class="grouplogo">
+<a href="http://lucene.apache.org/"><img class="logoImage" alt="Lucene" src="images/lucene_green_150.gif" title="Apache Lucene"></a>
+</div>
+<!--+
+    |end group logo
+    +-->
+<!--+
+    |start Project Logo
+    +-->
+<div class="projectlogo">
+<a href="http://lucene.apache.org/hadoop/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
+</div>
+<!--+
+    |end Project Logo
+    +-->
+<!--+
+    |start Search
+    +-->
+<div class="searchbox">
+<form action="http://www.google.com/search" method="get" class="roundtopsmall">
+<input value="lucene.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
+                    <input name="Search" value="Search" type="submit">
+</form>
+</div>
+<!--+
+    |end search
+    +-->
+<!--+
+    |start Tabs
+    +-->
+<ul id="tabs">
+<li class="current">
+<a class="selected" href="index.html">Main</a>
+</li>
+<li>
+<a class="unselected" href="http://wiki.apache.org/lucene-hadoop">Wiki</a>
+</li>
+</ul>
+<!--+
+    |end Tabs
+    +-->
+</div>
+</div>
+<div id="main">
+<div id="publishedStrip">
+<!--+
+    |start Subtabs
+    +-->
+<div id="level2tabs"></div>
+<!--+
+    |end Endtabs
+    +-->
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+
+             &nbsp;
+           </div>
+<!--+
+    |start Menu, mainarea
+    +-->
+<!--+
+    |start Menu
+    +-->
+<div id="menu">
+<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">Project</div>
+<div id="menu_1.1" class="menuitemgroup">
+<div class="menuitem">
+<a href="releases.html">Releases</a>
+</div>
+<div class="menuitem">
+<a href="releases.html#News">News</a>
+</div>
+<div class="menuitem">
+<a href="credits.html">Credits</a>
+</div>
+<div class="menuitem">
+<a href="http://www.cafepress.com/hadoop/">Buy Stuff</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_selected_1.2', 'skin/')" id="menu_selected_1.2Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
+<div id="menu_selected_1.2" class="selectedmenuitemgroup" style="display: block;">
+<div class="menuitem">
+<a href="documentation.html">Overview</a>
+</div>
+<div class="menuitem">
+<a href="quickstart.html">Quickstart</a>
+</div>
+<div class="menuitem">
+<a href="cluster_setup.html">Cluster Setup</a>
+</div>
+<div class="menuitem">
+<a href="hdfs_design.html">HDFS Architecture</a>
+</div>
+<div class="menupage">
+<div class="menupagetitle">Map-Reduce Tutorial</div>
+</div>
+<div class="menuitem">
+<a href="api/index.html">API Docs</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/">Wiki</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/FAQ">FAQ</a>
+</div>
+<div class="menuitem">
+<a href="mailing_lists.html#Users">Mailing Lists</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_1.3', 'skin/')" id="menu_1.3Title" class="menutitle">Developers</div>
+<div id="menu_1.3" class="menuitemgroup">
+<div class="menuitem">
+<a href="mailing_lists.html#Developers">Mailing Lists</a>
+</div>
+<div class="menuitem">
+<a href="issue_tracking.html">Issue Tracking</a>
+</div>
+<div class="menuitem">
+<a href="version_control.html">Version Control</a>
+</div>
+<div class="menuitem">
+<a href="http://lucene.zones.apache.org:8080/hudson/job/Hadoop-Nightly/">Nightly Build</a>
+</div>
+<div class="menuitem">
+<a href="irc.html">IRC Channel</a>
+</div>
+</div>
+<div id="credit"></div>
+<div id="roundbottom">
+<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
+<!--+
+  |alternative credits
+  +-->
+<div id="credit2"></div>
+</div>
+<!--+
+    |end Menu
+    +-->
+<!--+
+    |start content
+    +-->
+<div id="content">
+<div title="Portable Document Format" class="pdflink">
+<a class="dida" href="mapred_tutorial.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
+        PDF</a>
+</div>
+<h1>Hadoop Map-Reduce Tutorial</h1>
+<div id="minitoc-area">
+<ul class="minitoc">
+<li>
+<a href="#Purpose">Purpose</a>
+</li>
+<li>
+<a href="#Pre-requisites">Pre-requisites</a>
+</li>
+<li>
+<a href="#Overview">Overview</a>
+</li>
+<li>
+<a href="#Inputs+and+Outputs">Inputs and Outputs</a>
+</li>
+<li>
+<a href="#Example%3A+WordCount+v1.0">Example: WordCount v1.0</a>
+<ul class="minitoc">
+<li>
+<a href="#Source+Code">Source Code</a>
+</li>
+<li>
+<a href="#Usage">Usage</a>
+</li>
+<li>
+<a href="#Walk-through">Walk-through</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Map-Reduce+-+User+Interfaces">Map-Reduce - User Interfaces</a>
+<ul class="minitoc">
+<li>
+<a href="#Payload">Payload</a>
+<ul class="minitoc">
+<li>
+<a href="#Mapper">Mapper</a>
+</li>
+<li>
+<a href="#Reducer">Reducer</a>
+</li>
+<li>
+<a href="#Partitioner">Partitioner</a>
+</li>
+<li>
+<a href="#Reporter">Reporter</a>
+</li>
+<li>
+<a href="#OutputCollector">OutputCollector</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Job+Configuration">Job Configuration</a>
+</li>
+<li>
+<a href="#Job+Submission+and+Monitoring">Job Submission and Monitoring</a>
+<ul class="minitoc">
+<li>
+<a href="#Job+Control">Job Control</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Job+Input">Job Input</a>
+<ul class="minitoc">
+<li>
+<a href="#InputSplit">InputSplit</a>
+</li>
+<li>
+<a href="#RecordReader">RecordReader</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Job+Output">Job Output</a>
+<ul class="minitoc">
+<li>
+<a href="#Task+Side-Effect+Files">Task Side-Effect Files</a>
+</li>
+<li>
+<a href="#RecordWriter">RecordWriter</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Other+Useful+Features">Other Useful Features</a>
+<ul class="minitoc">
+<li>
+<a href="#Counters">Counters</a>
+</li>
+<li>
+<a href="#DistributedCache">DistributedCache</a>
+</li>
+<li>
+<a href="#Tool">Tool</a>
+</li>
+<li>
+<a href="#IsolationRunner">IsolationRunner</a>
+</li>
+<li>
+<a href="#JobControl">JobControl</a>
+</li>
+</ul>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Example%3A+WordCount+v2.0">Example: WordCount v2.0</a>
+<ul class="minitoc">
+<li>
+<a href="#Source+Code-N10A91">Source Code</a>
+</li>
+<li>
+<a href="#Sample+Runs">Sample Runs</a>
+</li>
+<li>
+<a href="#Salient+Points">Salient Points</a>
+</li>
+</ul>
+</li>
+</ul>
+</div>
+  
+    
+<a name="N1000C"></a><a name="Purpose"></a>
+<h2 class="h3">Purpose</h2>
+<div class="section">
+<p>This document comprehensively describes all user-facing facets of the 
+      Hadoop Map-Reduce framework and serve as a tutorial.
+      </p>
+</div>
+    
+    
+<a name="N10016"></a><a name="Pre-requisites"></a>
+<h2 class="h3">Pre-requisites</h2>
+<div class="section">
+<p>Ensure that Hadoop is installed, configured and is running. More
+      details:</p>
+<ul>
+        
+<li>
+          Hadoop <a href="quickstart.html">Quickstart</a> for first-time users.
+        </li>
+        
+<li>
+          Hadoop <a href="cluster_setup.html">Cluster Setup</a> for large, 
+          distributed clusters.
+        </li>
+      
+</ul>
+</div>
+    
+    
+<a name="N10031"></a><a name="Overview"></a>
+<h2 class="h3">Overview</h2>
+<div class="section">
+<p>Hadoop Map-Reduce is a software framework for easily writing 
+      applications which process vast amounts of data (multi-terabyte data-sets) 
+      in-parallel on large clusters (thousands of nodes) of commodity 
+      hardware in a reliable, fault-tolerant manner.</p>
+<p>A Map-Reduce <em>job</em> usually splits the input data-set into 
+      independent chunks which are processed by the <em>map tasks</em> in a
+      completely parallel manner. The framework sorts the outputs of the maps, 
+      which are then input to the <em>reduce tasks</em>. Typically both the 
+      input and the output of the job are stored in a file-system. The framework 
+      takes care of scheduling tasks, monitoring them and re-executes the failed
+      tasks.</p>
+<p>Typically the compute nodes and the storage nodes are the same, that is, 
+      the Map-Reduce framework and the <a href="hdfs_design.html">Distributed 
+      FileSystem</a> are running on the same set of nodes. This configuration
+      allows the framework to effectively schedule tasks on the nodes where data 
+      is already present, resulting in very high aggregate bandwidth across the 
+      cluster.</p>
+<p>The Map-Reduce framework consists of a single master 
+      <span class="codefrag">JobTracker</span> and one slave <span class="codefrag">TaskTracker</span> per 
+      cluster-node. The master is responsible for scheduling the jobs' component 
+      tasks on the slaves, monitoring them and re-executing the failed tasks. The 
+      slaves execute the tasks as directed by the master.</p>
+<p>Minimally, applications specify the input/output locations and supply
+      <em>map</em> and <em>reduce</em> functions via implementations of
+      appropriate interfaces and/or abstract-classes. These, and other job 
+      parameters, comprise the <em>job configuration</em>. The Hadoop 
+      <em>job client</em> then submits the job (jar/executable etc.) and 
+      configuration to the <span class="codefrag">JobTracker</span> which then assumes the 
+      responsibility of distributing the software/configuration to the slaves, 
+      scheduling tasks and monitoring them, providing status and diagnostic 
+      information to the job-client.</p>
+<p>Although the Hadoop framework is implemented in Java<sup>TM</sup>, 
+      Map-Reduce applications need not be written in Java.</p>
+<ul>
+        
+<li>
+          
+<a href="api/org/apache/hadoop/streaming/package-summary.html">
+          Hadoop Streaming</a> is a utility which allows users to create and run 
+          jobs with any executables (e.g. shell utilities) as the mapper and/or 
+          the reducer.
+        </li>
+        
+<li>
+          
+<a href="api/org/apache/hadoop/mapred/pipes/package-summary.html">
+          Hadoop Pipes</a> is a <a href="http://www.swig.org/">SWIG</a>-
+          compatible <em>C++ API</em> to implement Map-Reduce applications (non 
+          JNI<sup>TM</sup> based).
+        </li>
+      
+</ul>
+</div>
+    
+    
+<a name="N1008A"></a><a name="Inputs+and+Outputs"></a>
+<h2 class="h3">Inputs and Outputs</h2>
+<div class="section">
+<p>The Map-Reduce framework operates exclusively on 
+      <span class="codefrag">&lt;key, value&gt;</span> pairs, that is, the framework views the 
+      input to the job as a set of <span class="codefrag">&lt;key, value&gt;</span> pairs and 
+      produces a set of <span class="codefrag">&lt;key, value&gt;</span> pairs as the output of 
+      the job, conceivably of different types.</p>
+<p>The <span class="codefrag">key</span> and <span class="codefrag">value</span> classes have to be 
+      serializable by the framework and hence need to implement the 
+      <a href="api/org/apache/hadoop/io/Writable.html">Writable</a> 
+      interface. Additionally, the <span class="codefrag">key</span> classes have to implement the
+      <a href="api/org/apache/hadoop/io/WritableComparable.html">
+      WritableComparable</a> interface to facilitate sorting by the framework.
+      </p>
+<p>Input and Output types of a Map-Reduce job:</p>
+<p>
+        (input) <span class="codefrag">&lt;k1, v1&gt;</span> 
+        -&gt; 
+        <strong>map</strong> 
+        -&gt; 
+        <span class="codefrag">&lt;k2, v2&gt;</span> 
+        -&gt; 
+        <strong>combine</strong> 
+        -&gt; 
+        <span class="codefrag">&lt;k2, v2&gt;</span> 
+        -&gt; 
+        <strong>reduce</strong> 
+        -&gt; 
+        <span class="codefrag">&lt;k3, v3&gt;</span> (output)
+      </p>
+</div>
+
+    
+<a name="N100CC"></a><a name="Example%3A+WordCount+v1.0"></a>
+<h2 class="h3">Example: WordCount v1.0</h2>
+<div class="section">
+<p>Before we jump into the details, lets walk through an example Map-Reduce 
+      application to get a flavour for how they work.</p>
+<p>
+<span class="codefrag">WordCount</span> is a simple application that counts the number of
+      occurences of each word in a given input set.</p>
+<a name="N100DA"></a><a name="Source+Code"></a>
+<h3 class="h4">Source Code</h3>
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+          
+<tr>
+            
+<th colspan="1" rowspan="1"></th>
+            <th colspan="1" rowspan="1">WordCount.java</th>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">1.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">package org.myorg;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">2.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">3.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import java.io.Exception;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">4.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import java.util.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">5.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">6.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">7.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.conf.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">8.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.io.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">9.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">10.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.util.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">11.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">12.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">public class WordCount {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">13.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">14.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static class MapClass extends MapReduceBase 
+                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">15.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                private final static IntWritable one = new IntWritable(1);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">16.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private Text word = new Text();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">17.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">18.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                public void map(LongWritable key, Text value, 
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">19.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">String line = value.toString();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">20.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">21.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">22.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">word.set(tokenizer.nextToken());</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">23.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">output.collect(word, one);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">24.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">25.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">26.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">27.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">28.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static class Reduce extends MapReduceBase implements 
+                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">29.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">30.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">int sum = 0;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">31.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">while (values.hasNext()) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">32.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">sum += values.next().get();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">33.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">34.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">35.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">36.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">37.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">38.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static void main(String[] args) throws Exception {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">39.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                JobConf conf = new JobConf(WordCount.class);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">40.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setJobName("wordcount");</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">41.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">42.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">43.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">44.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">45.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setMapperClass(MapClass.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">46.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">47.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">48.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">49.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">50.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">51.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">52.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setInputPath(new Path(args[1]));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">53.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputPath(new Path(args[2]));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">54.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">55.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">JobClient.runJob(conf);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">57.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">58.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">59.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+        
+</table>
+<a name="N1045C"></a><a name="Usage"></a>
+<h3 class="h4">Usage</h3>
+<p>Assuming <span class="codefrag">HADOOP_HOME</span> is the root of the installation and 
+        <span class="codefrag">HADOOP_VERSION</span> is the Hadoop version installed, compile 
+        <span class="codefrag">WordCount.java</span> and create a jar:</p>
+<p>
+          
+<span class="codefrag">
+            $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar 
+              WordCount.java
+          </span>
+<br>
+          
+<span class="codefrag">$ jar -cvf /usr/joe/wordcount.jar WordCount.class</span> 
+        
+</p>
+<p>Assuming that:</p>
+<ul>
+          
+<li>
+            
+<span class="codefrag">/usr/joe/wordcount/input</span>  - input directory in HDFS
+          </li>
+          
+<li>
+            
+<span class="codefrag">/usr/joe/wordcount/output</span> - output directory in HDFS
+          </li>
+        
+</ul>
+<p>Sample text-files as input:</p>
+<p>
+          
+<span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
+<br>
+          
+<span class="codefrag">/usr/joe/wordcount/input/file01</span>
+<br>
+          
+<span class="codefrag">/usr/joe/wordcount/input/file02</span>
+<br>
+          
+<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
+<br>
+          
+<span class="codefrag">Hello World Bye World</span>
+<br>
+          
+<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
+<br>
+          
+<span class="codefrag">Hello Hadoop Goodbye Hadoop</span>
+        
+</p>
+<p>Run the application:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              /usr/joe/wordcount/input /usr/joe/wordcount/output 
+          </span>
+        
+</p>
+<p>Output:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </span>
+          
+<br>
+          
+<span class="codefrag">Bye    1</span>
+<br>
+          
+<span class="codefrag">Goodbye    1</span>
+<br>
+          
+<span class="codefrag">Hadoop    2</span>
+<br>
+          
+<span class="codefrag">Hello    2</span>
+<br>
+          
+<span class="codefrag">World    2</span>
+<br>
+        
+</p>
+<a name="N104D8"></a><a name="Walk-through"></a>
+<h3 class="h4">Walk-through</h3>
+<p>The <span class="codefrag">WordCount</span> application is quite straight-forward.</p>
+<p>The <span class="codefrag">Mapper</span> implementation (lines 14-26), via the 
+        <span class="codefrag">map</span> method (lines 18-25), processes one line at a time,
+        as provided by the specified <span class="codefrag">TextInputFormat</span> (line 49). 
+        It then splits the line into tokens separated by whitespaces, via the 
+        <span class="codefrag">StringTokenizer</span>, and emits a key-value pair of 
+        <span class="codefrag">&lt; &lt;word&gt;, 1&gt;</span>.</p>
+<p>
+          For the given sample input the first map emits:<br>
+          
+<span class="codefrag">&lt; Hello, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; World, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Bye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; World, 1&gt;</span>
+<br>
+        
+</p>
+<p>
+          The second map emits:<br>
+          
+<span class="codefrag">&lt; Hello, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hadoop, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Goodbye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hadoop, 1&gt;</span>
+<br>
+        
+</p>
+<p>We'll learn more about the number of maps spawned for a given job, and
+        how to control them in a fine-grained manner, a bit later in the 
+        tutorial.</p>
+<p>
+<span class="codefrag">WordCount</span> also specifies a <span class="codefrag">combiner</span> (line 
+        46). Hence, the output of each map is passed through the local combiner 
+        (which is same as the <span class="codefrag">Reducer</span> as per the job 
+        configuration) for local aggregation, after being sorted on the 
+        <em>key</em>s.</p>
+<p>
+          The output of the first map:<br>
+          
+<span class="codefrag">&lt; Bye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hello, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; World, 2&gt;</span>
+<br>
+        
+</p>
+<p>
+          The output of the second map:<br>
+          
+<span class="codefrag">&lt; Goodbye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hadoop, 2&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hello, 1&gt;</span>
+<br>
+        
+</p>
+<p>The <span class="codefrag">Reducer</span> implementation (lines 28-36), via the
+        <span class="codefrag">reduce</span> method (lines 29-35) just sums up the values,
+        which are the occurence counts for each key (i.e. words in this example).
+        </p>
+<p>
+          Thus the output of the job is:<br>
+          
+<span class="codefrag">&lt; Bye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Goodbye, 1&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hadoop, 2&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; Hello, 2&gt;</span>
+<br>
+          
+<span class="codefrag">&lt; World, 2&gt;</span>
+<br>
+        
+</p>
+<p>The <span class="codefrag">run</span> method specifies various facets of the job, such 
+        as the input/output paths (passed via the command line), key/value 
+        types, input/output formats etc., in the <span class="codefrag">JobConf</span>.
+        It then calls the <span class="codefrag">JobClient.runJob</span> (line  55) to submit the
+        and monitor its progress.</p>
+<p>We'll learn more about <span class="codefrag">JobConf</span>, <span class="codefrag">JobClient</span>,
+        <span class="codefrag">Tool</span> and other interfaces and classes a bit later in the 
+        tutorial.</p>
+</div>
+    
+    
+<a name="N1058F"></a><a name="Map-Reduce+-+User+Interfaces"></a>
+<h2 class="h3">Map-Reduce - User Interfaces</h2>
+<div class="section">
+<p>This section provides a reasonable amount of detail on every user-facing 
+      aspect of the Map-Reduce framwork. This should help users implement, 
+      configure and tune their jobs in a fine-grained manner. However, please 
+      note that the javadoc for each class/interface remains the most 
+      comprehensive documentation available; this is only meant to be a tutorial.
+      </p>
+<p>Let us first take the <span class="codefrag">Mapper</span> and <span class="codefrag">Reducer</span> 
+      interfaces. Applications typically implement them to provide the 
+      <span class="codefrag">map</span> and <span class="codefrag">reduce</span> methods.</p>
+<p>We will then discuss other core interfaces including 
+      <span class="codefrag">JobConf</span>, <span class="codefrag">JobClient</span>, <span class="codefrag">Partitioner</span>, 
+      <span class="codefrag">OutputCollector</span>, <span class="codefrag">Reporter</span>, 
+      <span class="codefrag">InputFormat</span>, <span class="codefrag">OutputFormat</span> and others.</p>
+<p>Finally, we will wrap up by discussing some useful features of the
+      framework such as the <span class="codefrag">DistributedCache</span>, 
+      <span class="codefrag">IsolationRunner</span> etc.</p>
+<a name="N105C8"></a><a name="Payload"></a>
+<h3 class="h4">Payload</h3>
+<p>Applications typically implement the <span class="codefrag">Mapper</span> and 
+        <span class="codefrag">Reducer</span> interfaces to provide the <span class="codefrag">map</span> and 
+        <span class="codefrag">reduce</span> methods. These form the core of the job.</p>
+<a name="N105DD"></a><a name="Mapper"></a>
+<h4>Mapper</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/Mapper.html">
+          Mapper</a> maps input key/value pairs to a set of intermediate 
+          key/value pairs.</p>
+<p>Maps are the individual tasks that transform input records into 
+          intermediate records. The transformed intermediate records do not need
+          to be of the same type as the input records. A given input pair may 
+          map to zero or many output pairs.</p>
+<p>The Hadoop Map-Reduce framework spawns one map task for each 
+          <span class="codefrag">InputSplit</span> generated by the <span class="codefrag">InputFormat</span> for 
+          the job.</p>
+<p>Overall, <span class="codefrag">Mapper</span> implementations are passed the 
+          <span class="codefrag">JobConf</span> for the job via the 
+          <a href="api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
+          JobConfigurable.configure(JobConf)</a> method and override it to 
+          initialize themselves. The framework then calls 
+          <a href="api/org/apache/hadoop/mapred/Mapper.html#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
+          map(WritableComparable, Writable, OutputCollector, Reporter)</a> for 
+          each key/value pair in the <span class="codefrag">InputSplit</span> for that task.        
+          Applications can then override the
+          <a href="api/org/apache/hadoop/io/Closeable.html#close()">
+          Closeable.close()</a> method to perform any required cleanup.</p>
+<p>Output pairs do not need to be of the same types as input pairs. A 
+          given input pair may map to zero or many output pairs.  Output pairs 
+          are collected with calls to 
+          <a href="api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
+          OutputCollector.collect(WritableComparable,Writable)</a>.</p>
+<p>Applications can use the <span class="codefrag">Reporter</span> to report 
+          progress, set application-level status messages and update 
+          <span class="codefrag">Counters</span>, or just indicate that they are alive.</p>
+<p>All intermediate values associated with a given output key are 
+          subsequently grouped by the framework, and passed to the
+          <span class="codefrag">Reducer</span>(s) to  determine the final output. Users can 
+          control the grouping by specifying a <span class="codefrag">Comparator</span> via 
+          <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
+          JobConf.setOutputKeyComparatorClass(Class)</a>.</p>
+<p>The <span class="codefrag">Mapper</span> outputs are sorted and then 
+          partitioned per <span class="codefrag">Reducer</span>. The total number of partitions is 
+          the same as the number of reduce tasks for the job. Users can control 
+          which keys (and hence records) go to which <span class="codefrag">Reducer</span> by 
+          implementing a custom <span class="codefrag">Partitioner</span>.</p>
+<p>Users can optionally specify a <span class="codefrag">combiner</span>, via 
+          <a href="api/org/apache/hadoop/mapred/JobConf.html#setCombinerClass(java.lang.Class)">
+          JobConf.setCombinerClass(Class)</a>, to perform local aggregation of 
+          the intermediate outputs, which helps to cut down the amount of data 
+          transferred from the <span class="codefrag">Mapper</span> to the <span class="codefrag">Reducer</span>.
+          </p>
+<p>The intermediate, sorted outputs are always stored in files of 
+          <a href="api/org/apache/hadoop/io/SequenceFile.html">
+          SequenceFile</a> format. Applications can control if, and how, the 
+          intermediate outputs are to be compressed and the 
+          <a href="api/org/apache/hadoop/io/compress/CompressionCodec.html">
+          CompressionCodec</a> to be used via the <span class="codefrag">JobConf</span>.
+          </p>
+<a name="N10657"></a><a name="How+Many+Maps%3F"></a>
+<h5>How Many Maps?</h5>
+<p>The number of maps is usually driven by the total size of the 
+            inputs, that is, the total number of blocks of the input files.</p>
+<p>The right level of parallelism for maps seems to be around 10-100 
+            maps per-node, although it has been set up to 300 maps for very 
+            cpu-light map tasks. Task setup takes awhile, so it is best if the 
+            maps take at least a minute to execute.</p>
+<p>Thus, if you expect 10TB of input data and have a blocksize of 
+            <span class="codefrag">128MB</span>, you'll end up with 82,000 maps, unless 
+            <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
+            setNumMapTasks(int)</a> (which only provides a hint to the framework) 
+            is used to set it even higher.</p>
+<a name="N1066F"></a><a name="Reducer"></a>
+<h4>Reducer</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/Reducer.html">
+          Reducer</a> reduces a set of intermediate values which share a key to
+          a smaller set of values.</p>
+<p>The number of reduces for the job is set by the user 
+          via <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
+          JobConf.setNumReduceTasks(int)</a>.</p>
+<p>Overall, <span class="codefrag">Reducer</span> implementations are passed the 
+          <span class="codefrag">JobConf</span> for the job via the 
+          <a href="api/org/apache/hadoop/mapred/JobConfigurable.html#configure(org.apache.hadoop.mapred.JobConf)">
+          JobConfigurable.configure(JobConf)</a> method and can override it to 
+          initialize themselves. The framework then calls   
+          <a href="api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
+          reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
+          method for each <span class="codefrag">&lt;key, (list of values)&gt;</span> 
+          pair in the grouped inputs. Applications can then override the           
+          <a href="api/org/apache/hadoop/io/Closeable.html#close()">
+          Closeable.close()</a> method to perform any required cleanup.</p>
+<p>
+<span class="codefrag">Reducer</span> has 3 primary phases: shuffle, sort and reduce.
+          </p>
+<a name="N1069F"></a><a name="Shuffle"></a>
+<h5>Shuffle</h5>
+<p>Input to the <span class="codefrag">Reducer</span> is the sorted output of the
+            mappers. In this phase the framework fetches the relevant partition 
+            of the output of all the mappers, via HTTP.</p>
+<a name="N106AC"></a><a name="Sort"></a>
+<h5>Sort</h5>
+<p>The framework groups <span class="codefrag">Reducer</span> inputs by keys (since 
+            different mappers may have output the same key) in this stage.</p>
+<p>The shuffle and sort phases occur simultaneously; while 
+            map-outputs are being fetched they are merged.</p>
+<a name="N106BB"></a><a name="Secondary+Sort"></a>
+<h5>Secondary Sort</h5>
+<p>If equivalence rules for grouping the intermediate keys are 
+              required to be different from those for grouping keys before 
+              reduction, then one may specify a <span class="codefrag">Comparator</span> via 
+              <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputValueGroupingComparator(java.lang.Class)">
+              JobConf.setOutputValueGroupingComparator(Class)</a>. Since 
+              <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputKeyComparatorClass(java.lang.Class)">
+              JobConf.setOutputKeyComparatorClass(Class)</a> can be used to 
+              control how intermediate keys are grouped, these can be used in 
+              conjunction to simulate <em>secondary sort on values</em>.</p>
+<a name="N106D4"></a><a name="Reduce"></a>
+<h5>Reduce</h5>
+<p>In this phase the 
+            <a href="api/org/apache/hadoop/mapred/Reducer.html#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)">
+            reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
+            method is called for each <span class="codefrag">&lt;key, (list of values)&gt;</span> 
+            pair in the grouped inputs.</p>
+<p>The output of the reduce task is typically written to the 
+            <a href="api/org/apache/hadoop/fs/FileSystem.html">
+            FileSystem</a> via 
+            <a href="api/org/apache/hadoop/mapred/OutputCollector.html#collect(K, V)">
+            OutputCollector.collect(WritableComparable, Writable)</a>.</p>
+<p>Applications can use the <span class="codefrag">Reporter</span> to report 
+            progress, set application-level status messages and update 
+            <span class="codefrag">Counters</span>, or just indicate that they are alive.</p>
+<p>The output of the <span class="codefrag">Reducer</span> is <em>not sorted</em>.</p>
+<a name="N10702"></a><a name="How+Many+Reduces%3F"></a>
+<h5>How Many Reduces?</h5>
+<p>The right number of reduces seems to be <span class="codefrag">0.95</span> or 
+            <span class="codefrag">1.75</span> multiplied by (&lt;<em>no. of nodes</em>&gt; * 
+            <span class="codefrag">mapred.tasktracker.tasks.maximum</span>).</p>
+<p>With <span class="codefrag">0.95</span> all of the reduces can launch immediately 
+            and start transfering map outputs as the maps finish. With 
+            <span class="codefrag">1.75</span> the faster nodes will finish their first round of 
+            reduces and launch a second wave of reduces doing a much better job 
+            of load balancing.</p>
+<p>Increasing the number of reduces increases the framework overhead, 
+            but increases load balancing and lowers the cost of failures.</p>
+<p>The scaling factors above are slightly less than whole numbers to 
+            reserve a few reduce slots in the framework for speculative-tasks and
+            failed tasks.</p>
+<a name="N10727"></a><a name="Reducer+NONE"></a>
+<h5>Reducer NONE</h5>
+<p>It is legal to set the number of reduce-tasks to <em>zero</em> if 
+            no reduction is desired.</p>
+<p>In this case the outputs of the map-tasks go directly to the
+            <span class="codefrag">FileSystem</span>, into the output path set by 
+            <a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputPath(org.apache.hadoop.fs.Path)">
+            setOutputPath(Path)</a>. The framework does not sort the 
+            map-outputs before writing them out to the <span class="codefrag">FileSystem</span>.
+            </p>
+<a name="N10742"></a><a name="Partitioner"></a>
+<h4>Partitioner</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/Partitioner.html">
+          Partitioner</a> partitions the key space.</p>
+<p>Partitioner controls the partitioning of the keys of the 
+          intermediate map-outputs. The key (or a subset of the key) is used to 
+          derive the partition, typically by a <em>hash function</em>. The total 
+          number of partitions is the same as the number of reduce tasks for the 
+          job. Hence this controls which of the <span class="codefrag">m</span> reduce tasks the 
+          intermediate key (and hence the record) is sent to for reduction.</p>
+<p>
+<a href="api/org/apache/hadoop/mapred/lib/HashPartitioner.html">
+          HashPartitioner</a> is the default <span class="codefrag">Partitioner</span>.</p>
+<a name="N10761"></a><a name="Reporter"></a>
+<h4>Reporter</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/Reporter.html">
+          Reporter</a> is a facility for Map-Reduce applications to report 
+          progress, set application-level status messages and update 
+          <span class="codefrag">Counters</span>.</p>
+<p>
+<span class="codefrag">Mapper</span> and <span class="codefrag">Reducer</span> implementations can use 
+          the <span class="codefrag">Reporter</span> to report progress or just indicate 
+          that they are alive. In scenarios where the application takes a
+          significant amount of time to process individual key/value pairs, 
+          this is crucial since the framework might assume that the task has 
+          timed-out and kill that task. Another way to avoid this is to 
+          set the configuration parameter <span class="codefrag">mapred.task.timeout</span> to a
+          high-enough value (or even set it to <em>zero</em> for no time-outs).
+          </p>
+<p>Applications can also update <span class="codefrag">Counters</span> using the 
+          <span class="codefrag">Reporter</span>.</p>
+<a name="N1078B"></a><a name="OutputCollector"></a>
+<h4>OutputCollector</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/OutputCollector.html">
+          OutputCollector</a> is a generalization of the facility provided by
+          the Map-Reduce framework to collect data output by the 
+          <span class="codefrag">Mapper</span> or the <span class="codefrag">Reducer</span> (either the 
+          intermediate outputs or the output of the job).</p>
+<p>Hadoop Map-Reduce comes bundled with a 
+        <a href="api/org/apache/hadoop/mapred/lib/package-summary.html">
+        library</a> of generally useful mappers, reducers, and partitioners.</p>
+<a name="N107A6"></a><a name="Job+Configuration"></a>
+<h3 class="h4">Job Configuration</h3>
+<p>
+<a href="api/org/apache/hadoop/mapred/JobConf.html">
+        JobConf</a> represents a Map-Reduce job configuration.</p>
+<p>
+<span class="codefrag">JobConf</span> is the primary interface for a user to describe
+        a map-reduce job to the Hadoop framework for execution. The framework 
+        tries to faithfully execute the job as described by <span class="codefrag">JobConf</span>, 
+        however:</p>
+<ul>
+          
+<li>f
+            Some configuration parameters may have been marked as 
+            <a href="api/org/apache/hadoop/conf/Configuration.html#FinalParams">
+            final</a> by administrators and hence cannot be altered.
+          </li>
+          
+<li>
+            While some job parameters are straight-forward to set (e.g. 
+            <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumReduceTasks(int)">
+            setNumReduceTasks(int)</a>), other parameters interact subtly with 
+            the rest of the framework and/or job configuration and are 
+            more complex to set (e.g. 
+            <a href="api/org/apache/hadoop/mapred/JobConf.html#setNumMapTasks(int)">
+            setNumMapTasks(int)</a>).
+          </li>
+        
+</ul>
+<p>
+<span class="codefrag">JobConf</span> is typically used to specify the 
+        <span class="codefrag">Mapper</span>, combiner (if any), <span class="codefrag">Partitioner</span>, 
+        <span class="codefrag">Reducer</span>, <span class="codefrag">InputFormat</span> and 
+        <span class="codefrag">OutputFormat</span> implementations. <span class="codefrag">JobConf</span> also 
+        indicates the set of input files 
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setInputPath(org.apache.hadoop.fs.Path)">setInputPath(Path)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#addInputPath(org.apache.hadoop.fs.Path)">addInputPath(Path)</a>)
+        and where the output files should be written
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setOutputPath(org.apache.hadoop.fs.Path)">setOutputPath(Path)</a>).</p>
+<p>Optionally, <span class="codefrag">JobConf</span> is used to specify other advanced 
+        facets of the job such as the <span class="codefrag">Comparator</span> to be used, files 
+        to be put in the <span class="codefrag">DistributedCache</span>, whether intermediate 
+        and/or job outputs are to be compressed (and how), debugging via 
+        user-provided scripts
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMapDebugScript(java.lang.String)">setMapDebugScript(String)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setReduceDebugScript(java.lang.String)">setReduceDebugScript(String)</a>) 
+        , whether job tasks can be executed in a <em>speculative</em> manner 
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setSpeculativeExecution(boolean)">setSpeculativeExecution(boolean)</a>)
+        , maximum number of attempts per task
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxMapAttempts(int)">setMaxMapAttempts(int)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceAttempts(int)">setMaxReduceAttempts(int)</a>) 
+        , percentage of tasks failure which can be tolerated by the job
+        (<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxMapTaskFailuresPercent(int)">setMaxMapTaskFailuresPercent(int)</a>/<a href="api/org/apache/hadoop/mapred/JobConf.html#setMaxReduceTaskFailuresPercent(int)">setMaxReduceTaskFailuresPercent(int)</a>) 
+        etc.</p>
+<p>Of course, users can use 
+        <a href="api/org/apache/hadoop/conf/Configuration.html#set(java.lang.String, java.lang.String)">set(String, String)</a>/<a href="api/org/apache/hadoop/conf/Configuration.html#get(java.lang.String, java.lang.String)">get(String, String)</a>
+        to set/get arbitrary parameters needed by applications. However, use the 
+        <span class="codefrag">DistributedCache</span> for large amounts of (read-only) data.</p>
+<a name="N1082C"></a><a name="Job+Submission+and+Monitoring"></a>
+<h3 class="h4">Job Submission and Monitoring</h3>
+<p>
+<a href="api/org/apache/hadoop/mapred/JobClient.html">
+        JobClient</a> is the primary interface by which user-job interacts
+        with the <span class="codefrag">JobTracker</span>.</p>
+<p>
+<span class="codefrag">JobClient</span> provides facilities to submit jobs, track their 
+        progress, access component-tasks' reports/logs, get the Map-Reduce 
+        cluster's status information and so on.</p>
+<p>The job submission process involves:</p>
+<ol>
+          
+<li>Checking the input and output specifications of the job.</li>
+          
+<li>Computing the <span class="codefrag">InputSplit</span> values for the job.</li>
+          
+<li>
+            Setting up the requisite accounting information for the 
+            <span class="codefrag">DistributedCache</span> of the job, if necessary.
+          </li>
+          
+<li>
+            Copying the job's jar and configuration to the map-reduce system 
+            directory on the <span class="codefrag">FileSystem</span>.
+          </li>
+          
+<li>
+            Submitting the job to the <span class="codefrag">JobTracker</span> and optionally 
+            monitoring it's status.
+          </li>
+        
+</ol>
+<p>Normally the user creates the application, describes various facets 
+        of the job via <span class="codefrag">JobConf</span>, and then uses the 
+        <span class="codefrag">JobClient</span> to submit the job and monitor its progress.</p>
+<a name="N1086A"></a><a name="Job+Control"></a>
+<h4>Job Control</h4>
+<p>Users may need to chain map-reduce jobs to accomplish complex
+          tasks which cannot be done via a single map-reduce job. This is fairly
+          easy since the output of the job typically goes to distributed 
+          file-system, and the output, in turn, can be used as the input for the 
+          next job.</p>
+<p>However, this also means that the onus on ensuring jobs are 
+          complete (success/failure) lies squarely on the clients. In such 
+          cases, the various job-control options are:</p>
+<ul>
+            
+<li>
+              
+<a href="api/org/apache/hadoop/mapred/JobClient.html#runJob(org.apache.hadoop.mapred.JobConf)">
+              runJob(JobConf)</a> : Submits the job and returns only after the 
+              job has completed.
+            </li>
+            
+<li>
+              
+<a href="api/org/apache/hadoop/mapred/JobClient.html#submitJob(org.apache.hadoop.mapred.JobConf)">
+              submitJob(JobConf)</a> : Only submits the job, then poll the 
+              returned handle to the 
+              <a href="api/org/apache/hadoop/mapred/RunningJob.html">
+              RunningJob</a> to query status and make scheduling decisions.
+            </li>
+            
+<li>
+              
+<a href="api/org/apache/hadoop/mapred/JobConf.html#setJobEndNotificationURI(java.lang.String)">
+              JobConf.setJobEndNotificationURI(String)</a> : Sets up a 
+              notification upon job-completion, thus avoiding polling.
+            </li>
+          
+</ul>
+<a name="N10894"></a><a name="Job+Input"></a>
+<h3 class="h4">Job Input</h3>
+<p>
+<a href="api/org/apache/hadoop/mapred/InputFormat.html">
+        InputFormat</a> describes the input-specification for a Map-Reduce job.
+        </p>
+<p>The Map-Reduce framework relies on the <span class="codefrag">InputFormat</span> of 
+        the job to:</p>
+<ol>
+          
+<li>Validate the input-specification of the job.</li>
+          
+<li>
+            Split-up the input file(s) into logical <span class="codefrag">InputSplit</span> 
+            instances, each of which is then assigned to an individual 
+            <span class="codefrag">Mapper</span>.
+          </li>
+          
+<li>
+            Provide the <span class="codefrag">RecordReader</span> implementation used to
+            glean input records from the logical <span class="codefrag">InputSplit</span> for 
+            processing by the <span class="codefrag">Mapper</span>.
+          </li>
+        
+</ol>
+<p>The default behavior of file-based <span class="codefrag">InputFormat</span>
+        implementations, typically sub-classes of 
+        <a href="api/org/apache/hadoop/mapred/FileInputFormat.html">
+        FileInputFormat</a>, is to split the input into <em>logical</em> 
+        <span class="codefrag">InputSplit</span> instances based on the total size, in bytes, of 
+        the input files. However, the <span class="codefrag">FileSystem</span> blocksize of the 
+        input files is treated as an upper bound for input splits. A lower bound
+        on the split size can be set via <span class="codefrag">mapred.min.split.size</span>.</p>
+<p>Clearly, logical splits based on input-size is insufficient for many
+        applications since record boundaries must be respected. In such cases, 
+        the application should implement a <span class="codefrag">RecordReader</span>, who is 
+        responsible for respecting record-boundaries and presents a 
+        record-oriented view of the logical <span class="codefrag">InputSplit</span> to the 
+        individual task.</p>
+<p>
+<a href="api/org/apache/hadoop/mapred/TextInputFormat.html">
+        TextInputFormat</a> is the default <span class="codefrag">InputFormat</span>.
+        </p>
+<a name="N108E9"></a><a name="InputSplit"></a>
+<h4>InputSplit</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/InputSplit.html">
+          InputSplit</a> represents the data to be processed by an individual 
+          <span class="codefrag">Mapper</span>.</p>
+<p>Typically <span class="codefrag">InputSplit</span> presents a byte-oriented view of
+          the input, and it is the responsibility of <span class="codefrag">RecordReader</span>
+          to process and present a record-oriented view.</p>
+<p>
+<a href="api/org/apache/hadoop/mapred/FileSplit.html">
+          FileSplit</a> is the default <span class="codefrag">InputSplit</span>. It sets 
+          <span class="codefrag">map.input.file</span> to the path of the input file for the
+          logical split.</p>
+<a name="N1090E"></a><a name="RecordReader"></a>
+<h4>RecordReader</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/RecordReader.html">
+          RecordReader</a> reads <span class="codefrag">&lt;key, value&gt;</span> pairs from an 
+          <span class="codefrag">InputSplit</span>.</p>
+<p>Typically the <span class="codefrag">RecordReader</span> converts the byte-oriented 
+          view of the input, provided by the <span class="codefrag">InputSplit</span>, and 
+          presents a record-oriented to the <span class="codefrag">Mapper</span> implementations 
+          for processing. <span class="codefrag">RecordReader</span> thus assumes the 
+          responsibility of processing record boundaries and presents the tasks 
+          with keys and values.</p>
+<a name="N10931"></a><a name="Job+Output"></a>
+<h3 class="h4">Job Output</h3>
+<p>
+<a href="api/org/apache/hadoop/mapred/OutputFormat.html">
+        OutputFormat</a> describes the output-specification for a Map-Reduce 
+        job.</p>
+<p>The Map-Reduce framework relies on the <span class="codefrag">OutputFormat</span> of 
+        the job to:</p>
+<ol>
+          
+<li>
+            Validate the output-specification of the job; for example, check that 
+            the output directory doesn't already exist.
+          </li>
+          
+<li>
+            Provide the <span class="codefrag">RecordWriter</span> implementation used to 
+            write the output files of the job. Output files are stored in a 
+            <span class="codefrag">FileSystem</span>.
+          </li>
+        
+</ol>
+<p>
+<span class="codefrag">TextOutputFormat</span> is the default 
+        <span class="codefrag">OutputFormat</span>.</p>
+<a name="N1095A"></a><a name="Task+Side-Effect+Files"></a>
+<h4>Task Side-Effect Files</h4>
+<p>In some applications, component tasks need to create and/or write to
+          side-files, which differ from the actual job-output files.</p>
+<p>In such cases there could be issues with two instances of the same 
+          <span class="codefrag">Mapper</span> or <span class="codefrag">Reducer</span> running simultaneously (for
+          example, speculative tasks) trying to open and/or write to the same 
+          file (path) on the <span class="codefrag">FileSystem</span>. Hence the 
+          application-writer will have to pick unique names per task-attempt 
+          (using the taskid, say <span class="codefrag">task_200709221812_0001_m_000000_0</span>), 
+          not just per task.</p>
+<p>To avoid these issues the Map-Reduce framework maintains a special 
+          <span class="codefrag">${mapred.output.dir}/_${taskid}</span> sub-directory for each 
+          task-attempt on the <span class="codefrag">FileSystem</span> where the output of the 
+          task-attempt is stored. On successful completion of the task-attempt, 
+          the files in the <span class="codefrag">${mapred.output.dir}/_${taskid}</span> (only) 
+          are <em>promoted</em> to <span class="codefrag">${mapred.output.dir}</span>. Of course, 
+          the framework discards the sub-directory of unsuccessful task-attempts. 
+          This process is completely transparent to the application.</p>
+<p>The application-writer can take advantage of this feature by 
+          creating any side-files required in <span class="codefrag">${mapred.output.dir}</span> 
+          during execution of a task via 
+          <a href="api/org/apache/hadoop/mapred/JobConf.html#getOutputPath()">
+          JobConf.getOutputPath()</a>, and the framework will promote them 
+          similarly for succesful task-attempts, thus eliminating the need to 
+          pick unique paths per task-attempt.</p>
+<a name="N1098F"></a><a name="RecordWriter"></a>
+<h4>RecordWriter</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/RecordWriter.html">
+          RecordWriter</a> writes the output <span class="codefrag">&lt;key, value&gt;</span> 
+          pairs to an output file.</p>
+<p>RecordWriter implementations write the job outputs to the 
+          <span class="codefrag">FileSystem</span>.</p>
+<a name="N109A6"></a><a name="Other+Useful+Features"></a>
+<h3 class="h4">Other Useful Features</h3>
+<a name="N109AC"></a><a name="Counters"></a>
+<h4>Counters</h4>
+<p>
+<span class="codefrag">Counters</span> represent global counters, defined either by 
+          the Map-Reduce framework or applications. Each <span class="codefrag">Counter</span> can 
+          be of any <span class="codefrag">Enum</span> type. Counters of a particular 
+          <span class="codefrag">Enum</span> are bunched into groups of type 
+          <span class="codefrag">Counters.Group</span>.</p>
+<p>Applications can define arbitrary <span class="codefrag">Counters</span> (of type 
+          <span class="codefrag">Enum</span>) and update them via 
+          <a href="api/org/apache/hadoop/mapred/Reporter.html#incrCounter(java.lang.Enum, long)">
+          Reporter.incrCounter(Enum, long)</a> in the <span class="codefrag">map</span> and/or 
+          <span class="codefrag">reduce</span> methods. These counters are then globally 
+          aggregated by the framework.</p>
+<a name="N109D7"></a><a name="DistributedCache"></a>
+<h4>DistributedCache</h4>
+<p>
+<a href="api/org/apache/hadoop/filecache/DistributedCache.html">
+          DistributedCache</a> distributes application-specific, large, read-only 
+          files efficiently.</p>
+<p>
+<span class="codefrag">DistributedCache</span> is a facility provided by the 
+          Map-Reduce framework to cache files (text, archives, jars and so on) 
+          needed by applications.</p>
+<p>Applications specify the files to be cached via urls (hdfs:// or 
+          http://) in the <span class="codefrag">JobConf</span>. The <span class="codefrag">DistributedCache</span> 
+          assumes that the files specified via hdfs:// urls are already present 
+          on the <span class="codefrag">FileSystem</span>.</p>
+<p>The framework will copy the necessary files to the slave node 
+          before any tasks for the job are executed on that node. Its 
+          efficiency stems from the fact that the files are only copied once 
+          per job and the ability to cache archives which are un-archived on 
+          the slaves.</p>
+<p>
+<span class="codefrag">DistributedCache</span> can be used to distribute simple, 
+          read-only data/text files and more complex types such as archives and
+          jars. Archives (zip files) are <em>un-archived</em> at the slave nodes.
+          Jars maybe be optionally added to the classpath of the tasks, a
+          rudimentary <em>software distribution</em> mechanism.  Files have 
+          <em>execution permissions</em> set. Optionally users can also direct the
+          <span class="codefrag">DistributedCache</span> to <em>symlink</em> the cached file(s) 
+          into the working directory of the task.</p>
+<p>
+<span class="codefrag">DistributedCache</span> tracks the modification timestamps of 
+          the cached files. Clearly the cache files should not be modified by 
+          the application or externally while the job is executing.</p>
+<a name="N10A11"></a><a name="Tool"></a>
+<h4>Tool</h4>
+<p>The <a href="api/org/apache/hadoop/util/Tool.html">Tool</a> 
+          interface supports the handling of generic Hadoop command-line options.
+          </p>
+<p>
+<span class="codefrag">Tool</span> is the standard for any Map-Reduce tool or 
+          application. The application should delegate the handling of 
+          standard command-line options to 
+          <a href="api/org/apache/hadoop/util/GenericOptionsParser.html">
+          GenericOptionsParser</a> via          
+          <a href="api/org/apache/hadoop/util/ToolRunner.html#run(org.apache.hadoop.util.Tool, java.lang.String[])">
+          ToolRunner.run(Tool, String[])</a> and only handle its custom 
+          arguments.</p>
+<p>
+            The generic Hadoop command-line options are:<br>
+            
+<span class="codefrag">
+              -conf &lt;configuration file&gt;
+            </span>
+            
+<br>
+            
+<span class="codefrag">
+              -D &lt;property=value&gt;
+            </span>
+            
+<br>
+            
+<span class="codefrag">
+              -fs &lt;local|namenode:port&gt;
+            </span>
+            
+<br>
+            
+<span class="codefrag">
+              -jt &lt;local|jobtracker:port&gt;
+            </span>
+          
+</p>
+<a name="N10A43"></a><a name="IsolationRunner"></a>
+<h4>IsolationRunner</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/IsolationRunner.html">
+          IsolationRunner</a> is a utility to help debug Map-Reduce programs.</p>
+<p>To use the <span class="codefrag">IsolationRunner</span>, first set 
+          <span class="codefrag">keep.failed.tasks.files</span> to <span class="codefrag">true</span> 
+          (also see <span class="codefrag">keep.tasks.files.pattern</span>).</p>
+<p>
+            Next, go to the node on which the failed task ran and go to the 
+            <span class="codefrag">TaskTracker</span>'s local directory and run the 
+            <span class="codefrag">IsolationRunner</span>:<br>
+            
+<span class="codefrag">$ cd &lt;local path&gt;/taskTracker/${taskid}/work</span>
+<br>
+            
+<span class="codefrag">
+              $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
+            </span>
+          
+</p>
+<p>
+<span class="codefrag">IsolationRunner</span> will run the failed task in a single 
+          jvm, which can be in the debugger, over precisely the same input.</p>
+<a name="N10A76"></a><a name="JobControl"></a>
+<h4>JobControl</h4>
+<p>
+<a href="api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
+          JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
+          and their dependencies.</p>
+</div>
+
+    
+<a name="N10A85"></a><a name="Example%3A+WordCount+v2.0"></a>
+<h2 class="h3">Example: WordCount v2.0</h2>
+<div class="section">
+<p>Here is a more complete <span class="codefrag">WordCount</span> which uses many of the
+      features provided by the Map-Reduce framework we discussed so far:</p>
+<a name="N10A91"></a><a name="Source+Code-N10A91"></a>
+<h3 class="h4">Source Code</h3>
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+          
+<tr>
+            
+<th colspan="1" rowspan="1"></th>
+            <th colspan="1" rowspan="1">WordCount.java</th>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">1.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">package org.myorg;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">2.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">3.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import java.io.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">4.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import java.util.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">5.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">6.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.fs.Path;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">7.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.filecache.DistributedCache;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">8.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.conf.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">9.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.io.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">10.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.mapred.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">11.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">import org.apache.hadoop.util.*;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">12.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">13.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">public class WordCount extends Configured implements Tool {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">14.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">15.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static class MapClass extends MapReduceBase 
+                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">16.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">17.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                static enum Counters { INPUT_WORDS }
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">18.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">19.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                private final static IntWritable one = new IntWritable(1);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">20.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private Text word = new Text();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">21.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">22.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private boolean caseSensitive = true;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">23.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private Set&lt;String&gt; patternsToSkip = new HashSet&lt;String&gt;();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">24.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">25.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private long numRecords = 0;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">26.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private String inputFile;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">27.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">28.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">public void configure(JobConf job) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">29.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">30.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">inputFile = job.get("map.input.file");</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">31.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">32.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">Path[] patternsFiles = new Path[0];</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">33.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">try {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">34.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                patternsFiles = DistributedCache.getLocalCacheFiles(job);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">35.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">} catch (IOException ioe) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">36.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                System.err.println("Caught exception while getting cached files: " 
+                + StringUtils.stringifyException(ioe));
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">37.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">38.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">for (Path patternsFile : patternsFiles) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">39.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">parseSkipFile(patternsFile);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">40.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">41.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">42.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">43.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">private void parseSkipFile(Path patternsFile) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">44.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">try {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">45.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                BufferedReader fis = 
+                  new BufferedReader(new FileReader(patternsFile.toString()));
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">46.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">String pattern = null;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">47.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">while ((pattern = fis.readLine()) != null) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">48.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">patternsToSkip.add(pattern);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">49.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">50.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">} catch (IOException ioe) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">51.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                System.err.println("Caught exception while parsing the cached file '" +
+                                   patternsFile + "' : " + 
+                                   StringUtils.stringifyException(ioe));
+                
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">52.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">53.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">54.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">55.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                public void map(LongWritable key, Text value, 
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">56.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                String line = 
+                  (caseSensitive) ? value.toString() : 
+                                    value.toString().toLowerCase();
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">57.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">58.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">for (String pattern : patternsToSkip) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">59.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">line = line.replaceAll(pattern, "");</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">60.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">61.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">62.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">StringTokenizer tokenizer = new StringTokenizer(line);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">63.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">while (tokenizer.hasMoreTokens()) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">64.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">word.set(tokenizer.nextToken());</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">65.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">output.collect(word, one);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">66.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">reporter.incrCounter(Counters.INPUT_WORDS, 1);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">67.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">68.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">69.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">if ((++numRecords % 100) == 0) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">70.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                reporter.setStatus("Finished processing " + numRecords + 
+                                   " records " + "from the input file: " + 
+                                   inputFile);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">71.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">72.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">73.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">74.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">75.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static class Reduce extends MapReduceBase implements 
+                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">76.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">77.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">int sum = 0;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">78.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">while (values.hasNext()) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">79.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">sum += values.next().get();</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">80.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">81.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">output.collect(key, new IntWritable(sum));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">82.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">83.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">84.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">85.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">public int run(String[] args) throws Exception {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">86.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                JobConf conf = new JobConf(getConf(), WordCount.class);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">87.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setJobName("wordcount");</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">88.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">89.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputKeyClass(Text.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">90.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputValueClass(IntWritable.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">91.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">92.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setMapperClass(MapClass.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">93.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setCombinerClass(Reduce.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">94.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setReducerClass(Reduce.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">95.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">96.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setInputFormat(TextInputFormat.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">97.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputFormat(TextOutputFormat.class);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">98.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">99.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                List&lt;String&gt; other_args = new ArrayList&lt;String&gt;();
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">100.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">for (int i=0; i &lt; args.length; ++i) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">101.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">if ("-skip".equals(args[i]) {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">102.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">103.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">} else {</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">104.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">other_args.add(args[i]);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">105.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">106.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">107.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">108.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setInputPath(new Path(other_args[0]));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">109.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">conf.setOutputPath(new Path(other_args[1]));</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">110.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">111.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">JobClient.runJob(conf);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">112.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">return 0;</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">113.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">114.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">115.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">
+                public static void main(String[] args) throws Exception {
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">116.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">
+                int res = ToolRunner.run(new Configuration(), new WordCount(), 
+                                         args);
+              </span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">117.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <span class="codefrag">System.exit(res);</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">118.</td>
+            <td colspan="1" rowspan="1">
+              &nbsp;&nbsp;
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">119.</td>
+            <td colspan="1" rowspan="1">
+              <span class="codefrag">}</span>
+            </td>
+          
+</tr>
+          
+<tr>
+            
+<td colspan="1" rowspan="1">120.</td>
+            <td colspan="1" rowspan="1"></td>
+          
+</tr>
+        
+</table>
+<a name="N111C3"></a><a name="Sample+Runs"></a>
+<h3 class="h4">Sample Runs</h3>
+<p>Sample text-files as input:</p>
+<p>
+          
+<span class="codefrag">$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</span>
+<br>
+          
+<span class="codefrag">/usr/joe/wordcount/input/file01</span>
+<br>
+          
+<span class="codefrag">/usr/joe/wordcount/input/file02</span>
+<br>
+          
+<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</span>
+<br>
+          
+<span class="codefrag">Hello World, Bye World!</span>
+<br>
+          
+<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</span>
+<br>
+          
+<span class="codefrag">Hello Hadoop, Goodbye the Hadoop.</span>
+        
+</p>
+<p>Run the application:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              /usr/joe/wordcount/input /usr/joe/wordcount/output 
+          </span>
+        
+</p>
+<p>Output:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </span>
+          
+<br>
+          
+<span class="codefrag">Bye    1</span>
+<br>
+          
+<span class="codefrag">Goodbye    1</span>
+<br>
+          
+<span class="codefrag">Hadoop,    1</span>
+<br>
+          
+<span class="codefrag">Hadoop.    1</span>
+<br>
+          
+<span class="codefrag">Hello    2</span>
+<br>
+          
+<span class="codefrag">World!    1</span>
+<br>
+          
+<span class="codefrag">World,    1</span>
+<br>
+          
+<span class="codefrag">the    1</span>
+<br>
+        
+</p>
+<p>Notice that the inputs differ from the first version we looked at, 
+        and how they affect the outputs.</p>
+<p>Now, lets plug-in a pattern-file which lists the word-patterns to be 
+        ignored, via the <span class="codefrag">DistributedCache</span>.</p>
+<p>
+          
+<span class="codefrag">$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</span>
+<br>
+          
+<span class="codefrag">\.</span>
+<br>
+          
+<span class="codefrag">\,</span>
+<br>
+          
+<span class="codefrag">\!</span>
+<br>
+          
+<span class="codefrag">the</span>
+<br>
+        
+</p>
+<p>Run it again, this time with more options:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              -Dwordcount.case.sensitive=true /usr/joe/wordcount/input 
+              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
+          </span>
+        
+</p>
+<p>As expected, the output:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </span>
+          
+<br>
+          
+<span class="codefrag">Bye    1</span>
+<br>
+          
+<span class="codefrag">Goodbye    1</span>
+<br>
+          
+<span class="codefrag">Hadoop    2</span>
+<br>
+          
+<span class="codefrag">Hello    2</span>
+<br>
+          
+<span class="codefrag">World    2</span>
+<br>
+        
+</p>
+<p>Run it once more, this time switch-off case-sensitivity:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              -Dwordcount.case.sensitive=false /usr/joe/wordcount/input 
+              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
+          </span>
+        
+</p>
+<p>Sure enough, the output:</p>
+<p>
+          
+<span class="codefrag">
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </span>
+          
+<br>
+          
+<span class="codefrag">bye    1</span>
+<br>
+          
+<span class="codefrag">goodbye    1</span>
+<br>
+          
+<span class="codefrag">hadoop    2</span>
+<br>
+          
+<span class="codefrag">hello    2</span>
+<br>
+          
+<span class="codefrag">world    2</span>
+<br>
+        
+</p>
+<a name="N11293"></a><a name="Salient+Points"></a>
+<h3 class="h4">Salient Points</h3>
+<p>The second version of <span class="codefrag">WordCount</span> improves upon the 
+        previous one by using some features offered by the Map-Reduce framework:
+        </p>
+<ul>
+          
+<li>
+            Demonstrates how applications can access configuration parameters
+            in the <span class="codefrag">configure</span> method of the <span class="codefrag">Mapper</span> (and
+            <span class="codefrag">Reducer</span>) implementations (lines 28-41).
+          </li>
+          
+<li>
+            Demonstrates how the <span class="codefrag">DistributedCache</span> can be used to 
+            distribute read-only data needed by the jobs. Here it allows the user 
+            to specify word-patterns to skip while counting (line 102).
+          </li>
+          
+<li>
+            Demonstrates the utility of the <span class="codefrag">Tool</span> interface and the
+            <span class="codefrag">GenericOptionsParser</span> to handle generic Hadoop 
+            command-line options (lines 85-86, 116).
+          </li>
+          
+<li>
+            Demonstrates how applications can use <span class="codefrag">Counters</span> (line 66)
+            and how they can set application-specific status information via 
+            the <span class="codefrag">Reporter</span> instance passed to the <span class="codefrag">map</span> (and
+            <span class="codefrag">reduce</span>) method (line 70).
+          </li>
+        
+</ul>
+</div>
+
+    
+<p>
+      
+<em>Java and JNI are trademarks or registered trademarks of 
+      Sun Microsystems, Inc. in the United States and other countries.</em>
+    
+</p>
+    
+  
+</div>
+<!--+
+    |end content
+    +-->
+<div class="clearboth">&nbsp;</div>
+</div>
+<div id="footer">
+<!--+
+    |start bottomstrip
+    +-->
+<div class="lastmodified">
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<div class="copyright">
+        Copyright &copy;
+         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
+</div>
+<!--+
+    |end bottomstrip
+    +-->
+</div>
+</body>
+</html>

File diff suppressed because it is too large
+ 239 - 0
docs/mapred_tutorial.pdf


+ 573 - 0
docs/quickstart.html

@@ -0,0 +1,573 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<meta content="Apache Forrest" name="Generator">
+<meta name="Forrest-version" content="0.8">
+<meta name="Forrest-skin-name" content="pelt">
+<title>Hadoop Quickstart</title>
+<link type="text/css" href="skin/basic.css" rel="stylesheet">
+<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
+<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
+<link type="text/css" href="skin/profile.css" rel="stylesheet">
+<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="skin/fontsize.js" language="javascript" type="text/javascript"></script>
+<link rel="shortcut icon" href="images/favicon.ico">
+</head>
+<body onload="init()">
+<script type="text/javascript">ndeSetTextSize();</script>
+<div id="top">
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+<a href="http://www.apache.org/">Apache</a> &gt; <a href="http://lucene.apache.org/">Lucene</a> &gt; <a href="http://lucene.apache.org/hadoop/">Hadoop</a><script src="skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
+</div>
+<!--+
+    |header
+    +-->
+<div class="header">
+<!--+
+    |start group logo
+    +-->
+<div class="grouplogo">
+<a href="http://lucene.apache.org/"><img class="logoImage" alt="Lucene" src="images/lucene_green_150.gif" title="Apache Lucene"></a>
+</div>
+<!--+
+    |end group logo
+    +-->
+<!--+
+    |start Project Logo
+    +-->
+<div class="projectlogo">
+<a href="http://lucene.apache.org/hadoop/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
+</div>
+<!--+
+    |end Project Logo
+    +-->
+<!--+
+    |start Search
+    +-->
+<div class="searchbox">
+<form action="http://www.google.com/search" method="get" class="roundtopsmall">
+<input value="lucene.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">&nbsp; 
+                    <input name="Search" value="Search" type="submit">
+</form>
+</div>
+<!--+
+    |end search
+    +-->
+<!--+
+    |start Tabs
+    +-->
+<ul id="tabs">
+<li class="current">
+<a class="selected" href="index.html">Main</a>
+</li>
+<li>
+<a class="unselected" href="http://wiki.apache.org/lucene-hadoop">Wiki</a>
+</li>
+</ul>
+<!--+
+    |end Tabs
+    +-->
+</div>
+</div>
+<div id="main">
+<div id="publishedStrip">
+<!--+
+    |start Subtabs
+    +-->
+<div id="level2tabs"></div>
+<!--+
+    |end Endtabs
+    +-->
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<!--+
+    |breadtrail
+    +-->
+<div class="breadtrail">
+
+             &nbsp;
+           </div>
+<!--+
+    |start Menu, mainarea
+    +-->
+<!--+
+    |start Menu
+    +-->
+<div id="menu">
+<div onclick="SwitchMenu('menu_1.1', 'skin/')" id="menu_1.1Title" class="menutitle">Project</div>
+<div id="menu_1.1" class="menuitemgroup">
+<div class="menuitem">
+<a href="releases.html">Releases</a>
+</div>
+<div class="menuitem">
+<a href="releases.html#News">News</a>
+</div>
+<div class="menuitem">
+<a href="credits.html">Credits</a>
+</div>
+<div class="menuitem">
+<a href="http://www.cafepress.com/hadoop/">Buy Stuff</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_selected_1.2', 'skin/')" id="menu_selected_1.2Title" class="menutitle" style="background-image: url('skin/images/chapter_open.gif');">Documentation</div>
+<div id="menu_selected_1.2" class="selectedmenuitemgroup" style="display: block;">
+<div class="menuitem">
+<a href="documentation.html">Overview</a>
+</div>
+<div class="menupage">
+<div class="menupagetitle">Quickstart</div>
+</div>
+<div class="menuitem">
+<a href="cluster_setup.html">Cluster Setup</a>
+</div>
+<div class="menuitem">
+<a href="hdfs_design.html">HDFS Architecture</a>
+</div>
+<div class="menuitem">
+<a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
+</div>
+<div class="menuitem">
+<a href="api/index.html">API Docs</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/">Wiki</a>
+</div>
+<div class="menuitem">
+<a href="http://wiki.apache.org/lucene-hadoop/FAQ">FAQ</a>
+</div>
+<div class="menuitem">
+<a href="mailing_lists.html#Users">Mailing Lists</a>
+</div>
+</div>
+<div onclick="SwitchMenu('menu_1.3', 'skin/')" id="menu_1.3Title" class="menutitle">Developers</div>
+<div id="menu_1.3" class="menuitemgroup">
+<div class="menuitem">
+<a href="mailing_lists.html#Developers">Mailing Lists</a>
+</div>
+<div class="menuitem">
+<a href="issue_tracking.html">Issue Tracking</a>
+</div>
+<div class="menuitem">
+<a href="version_control.html">Version Control</a>
+</div>
+<div class="menuitem">
+<a href="http://lucene.zones.apache.org:8080/hudson/job/Hadoop-Nightly/">Nightly Build</a>
+</div>
+<div class="menuitem">
+<a href="irc.html">IRC Channel</a>
+</div>
+</div>
+<div id="credit"></div>
+<div id="roundbottom">
+<img style="display: none" class="corner" height="15" width="15" alt="" src="skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
+<!--+
+  |alternative credits
+  +-->
+<div id="credit2"></div>
+</div>
+<!--+
+    |end Menu
+    +-->
+<!--+
+    |start content
+    +-->
+<div id="content">
+<div title="Portable Document Format" class="pdflink">
+<a class="dida" href="quickstart.pdf"><img alt="PDF -icon" src="skin/images/pdfdoc.gif" class="skin"><br>
+        PDF</a>
+</div>
+<h1>Hadoop Quickstart</h1>
+<div id="minitoc-area">
+<ul class="minitoc">
+<li>
+<a href="#Purpose">Purpose</a>
+</li>
+<li>
+<a href="#PreReqs">Pre-requisites</a>
+<ul class="minitoc">
+<li>
+<a href="#Supported+Platforms">Supported Platforms</a>
+</li>
+<li>
+<a href="#Required+Software">Required Software</a>
+<ul class="minitoc">
+<li>
+<a href="#Additional+requirements+for+Windows">Additional requirements for Windows</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Installing+Software">Installing Software</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Download">Download</a>
+</li>
+<li>
+<a href="#Standalone+Operation">Standalone Operation</a>
+</li>
+<li>
+<a href="#SingleNodeSetup">Pseudo-Distributed Operation</a>
+<ul class="minitoc">
+<li>
+<a href="#Configuration">Configuration</a>
+</li>
+<li>
+<a href="#Setup+passphraseless">Setup passphraseless ssh</a>
+</li>
+<li>
+<a href="#Execution">Execution</a>
+</li>
+</ul>
+</li>
+<li>
+<a href="#Fully-Distributed+Operation">Fully-Distributed Operation</a>
+</li>
+</ul>
+</div>
+  
+    
+<a name="N1000C"></a><a name="Purpose"></a>
+<h2 class="h3">Purpose</h2>
+<div class="section">
+<p>The purpose of this document is to help users get a single-node Hadoop 
+      installation up and running very quickly so that users can get a flavour 
+      of the <a href="hdfs_design.html">Hadoop Distributed File System 
+      (<acronym title="Hadoop Distributed File System">HDFS</acronym>)</a> and 
+      the Map-Reduce framework i.e. perform simple operations on HDFS, run 
+      example/simple jobs etc.</p>
+</div>
+    
+    
+<a name="N1001E"></a><a name="PreReqs"></a>
+<h2 class="h3">Pre-requisites</h2>
+<div class="section">
+<a name="N10024"></a><a name="Supported+Platforms"></a>
+<h3 class="h4">Supported Platforms</h3>
+<ul>
+          
+<li>
+            Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
+          </li>
+          
+<li>
+            Win32 is supported as a <em>development platform</em>. Distributed 
+            operation has not been well tested on Win32, so this is not a 
+            <em>production platform</em>.
+          </li>
+        
+</ul>
+<a name="N1003A"></a><a name="Required+Software"></a>
+<h3 class="h4">Required Software</h3>
+<ol>
+          
+<li>
+            Java<sup>TM</sup> 1.5.x, preferably from Sun, must be installed. Set 
+            <span class="codefrag">JAVA_HOME</span> to the root of your Java installation.
+          </li>
+          
+<li>
+            
+<strong>ssh</strong> must be installed and <strong>sshd</strong> must 
+            be running to use the Hadoop scripts that manage remote Hadoop 
+            daemons.
+          </li>
+        
+</ol>
+<a name="N10055"></a><a name="Additional+requirements+for+Windows"></a>
+<h4>Additional requirements for Windows</h4>
+<ol>
+            
+<li>
+              
+<a href="http://www.cygwin.com/">Cygwin</a> - Required for shell 
+              support in addition to the required software above. 
+            </li>
+          
+</ol>
+<a name="N10067"></a><a name="Installing+Software"></a>
+<h3 class="h4">Installing Software</h3>
+<p>If your cluster doesn't have the requisite software you will need to
+        install it.</p>
+<p>For example on Ubuntu Linux:</p>
+<p>
+          
+<span class="codefrag">$ sudo apt-get install ssh</span>
+<br>
+          
+<span class="codefrag">$ sudo apt-get install rsync</span>
+        
+</p>
+<p>On Windows, if you did not install the required software when you 
+        installed cygwin, start the cygwin installer and select the packages:</p>
+<ul>
+          
+<li>openssh - the <em>Net</em> category</li>
+        
+</ul>
+</div>
+    
+    
+<a name="N1008B"></a><a name="Download"></a>
+<h2 class="h3">Download</h2>
+<div class="section">
+<p>
+        First, you need to get a Hadoop distribution: download a recent 
+        <a href="releases.html">stable release</a> and unpack it.
+      </p>
+<p>
+        Once done, in the distribution edit the file 
+        <span class="codefrag">conf/hadoop-env.sh</span> to define at least <span class="codefrag">JAVA_HOME</span>.
+      </p>
+<p>
+	    Try the following command:<br>
+        
+<span class="codefrag">$ bin/hadoop</span>
+<br>
+        This will display the usage documentation for the <strong>hadoop</strong> 
+        script.
+      </p>
+</div>
+    
+    
+<a name="N100AE"></a><a name="Standalone+Operation"></a>
+<h2 class="h3">Standalone Operation</h2>
+<div class="section">
+<p>By default, Hadoop is configured to run things in a non-distributed 
+      mode, as a single Java process. This is useful for debugging.</p>
+<p>
+        The following example copies the unpacked <span class="codefrag">conf</span> directory to 
+        use as input and then finds and displays every match of the given regular 
+        expression. Output is written to the given <span class="codefrag">output</span> directory.
+        <br>
+        
+<span class="codefrag">$ mkdir input</span>
+<br>
+        
+<span class="codefrag">$ cp conf/*.xml input</span>
+<br>
+        
+<span class="codefrag">
+          $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+        </span>
+<br>
+        
+<span class="codefrag">$ cat output/*</span>
+      
+</p>
+</div>
+    
+    
+<a name="N100D2"></a><a name="SingleNodeSetup"></a>
+<h2 class="h3">Pseudo-Distributed Operation</h2>
+<div class="section">
+<p>Hadoop can also be run on a single-node in a pseudo-distributed mode 
+	  where each Hadoop daemon runs in a separate Java process.</p>
+<a name="N100DB"></a><a name="Configuration"></a>
+<h3 class="h4">Configuration</h3>
+<p>Use the following <span class="codefrag">conf/hadoop-site.xml</span>:</p>
+<table class="ForrestTable" cellspacing="1" cellpadding="4">
+        
+<tr>
+<td colspan="1" rowspan="1">&lt;configuration&gt;</td>
+</tr>
+
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;fs.default.name&lt;/name&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9000&lt;/value&gt;</td>
+</tr>
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
+</tr>
+
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;mapred.job.tracker&lt;/name&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9001&lt;/value&gt;</td>
+</tr>
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
+</tr>
+
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;property&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;dfs.replication&lt;/name&gt;</td>
+</tr>
+            
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</td>
+</tr>
+          
+<tr>
+<td colspan="1" rowspan="1">&nbsp;&nbsp;&lt;/property&gt;</td>
+</tr>
+
+        
+<tr>
+<td colspan="1" rowspan="1">&lt;/configuration&gt;</td>
+</tr>
+        
+</table>
+<a name="N1013F"></a><a name="Setup+passphraseless"></a>
+<h3 class="h4">Setup passphraseless ssh</h3>
+<p>
+          Now check that you can ssh to the localhost without a passphrase:<br>
+          
+<span class="codefrag">$ ssh localhost</span>
+        
+</p>
+<p>
+          If you cannot ssh to localhost without a passphrase, execute the 
+          following commands:<br>
+   		  
+<span class="codefrag">$ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa</span>
+<br>
+		  
+<span class="codefrag">$ cat ~/.ssh/id_dsa.pub &gt;&gt; ~/.ssh/authorized_keys</span>
+		
+</p>
+<a name="N1015C"></a><a name="Execution"></a>
+<h3 class="h4">Execution</h3>
+<p>
+          Format a new distributed-filesystem:<br>
+          
+<span class="codefrag">$ bin/hadoop namenode -format</span>
+        
+</p>
+<p>
+		  Start The hadoop daemons:<br>
+          
+<span class="codefrag">$ bin/start-all.sh</span>
+        
+</p>
+<p>The hadoop daemon log output is written to the 
+        <span class="codefrag">${HADOOP_LOG_DIR}</span> directory (defaults to 
+        <span class="codefrag">${HADOOP_HOME}/logs</span>).</p>
+<p>Browse the web-interface for the NameNode and the JobTracker, by
+        default they are available at:</p>
+<ul>
+          
+<li>
+            
+<span class="codefrag">NameNode</span> - 
+            <a href="http://localhost:50070/">http://localhost:50070/</a>
+          
+</li>
+          
+<li>
+            
+<span class="codefrag">JobTracker</span> - 
+            <a href="http://localhost:50030/">http://localhost:50030/</a>
+          
+</li>
+        
+</ul>
+<p>
+          Copy the input files into the distributed filesystem:<br>
+		  
+<span class="codefrag">$ bin/hadoop dfs -put conf input</span>
+		
+</p>
+<p>
+          Run some of the examples provided:<br>
+          
+<span class="codefrag">
+            $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+          </span>
+        
+</p>
+<p>Examine the output files:</p>
+<p>
+          Copy the output files from the distributed filesystem to the local 
+          filesytem and examine them:<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -get output output</span>
+<br>
+          
+<span class="codefrag">$ cat output/*</span>
+        
+</p>
+<p> or </p>
+<p>
+          View the output files on the distributed filesystem:<br>
+          
+<span class="codefrag">$ bin/hadoop dfs -cat output/*</span>
+        
+</p>
+<p>
+		  When you're done, stop the daemons with:<br>
+		  
+<span class="codefrag">$ bin/stop-all.sh</span>
+		
+</p>
+</div>
+    
+    
+<a name="N101C9"></a><a name="Fully-Distributed+Operation"></a>
+<h2 class="h3">Fully-Distributed Operation</h2>
+<div class="section">
+<p>Information on setting up fully-distributed non-trivial clusters
+	  can be found <a href="cluster_setup.html">here</a>.</p>
+</div>
+    
+    
+<p>
+      
+<em>Java and JNI are trademarks or registered trademarks of 
+      Sun Microsystems, Inc. in the United States and other countries.</em>
+    
+</p>
+    
+  
+</div>
+<!--+
+    |end content
+    +-->
+<div class="clearboth">&nbsp;</div>
+</div>
+<div id="footer">
+<!--+
+    |start bottomstrip
+    +-->
+<div class="lastmodified">
+<script type="text/javascript"><!--
+document.write("Last Published: " + document.lastModified);
+//  --></script>
+</div>
+<div class="copyright">
+        Copyright &copy;
+         2007 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
+</div>
+<!--+
+    |end bottomstrip
+    +-->
+</div>
+</body>
+</html>

File diff suppressed because it is too large
+ 162 - 0
docs/quickstart.pdf


+ 414 - 0
src/docs/src/documentation/content/xdocs/cluster_setup.xml

@@ -0,0 +1,414 @@
+<?xml version="1.0"?>
+
+<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
+
+<document>
+  
+  <header>
+    <title>Hadoop Cluster Setup</title>
+  </header>
+  
+  <body>
+  
+    <section>
+      <title>Purpose</title>
+      
+      <p>This document describes how to install, configure and manage non-trivial
+      Hadoop clusters ranging from a few nodes to extremely large clusters with 
+      thousands of nodes.</p>
+      
+      <p>If you are looking to install Hadoop on a single machine to play
+      with it, you can find relevant details <a href="quickstart.html">here</a>.
+      </p>
+    </section>
+    
+    <section>
+      <title>Pre-requisites</title>
+      
+      <ol>
+        <li>
+          Make sure all <a href="quickstart.html#PreReqs">requisite</a> software 
+          is installed on all nodes in your cluster.
+        </li>
+        <li>
+          <a href="quickstart.html#GetHadoop">Get</a> the Hadoop software.
+        </li>
+      </ol>
+    </section>
+    
+    <section>
+      <title>Installation</title>
+      
+      <p>Installing a Hadoop cluster typically involves unpacking the software 
+      on all the machines in the cluster.</p>
+      
+      <p>Typically one machine in the cluster is designated as the 
+      <code>NameNode</code> and another machine the as <code>JobTracker</code>,
+      exclusively. These are the <em>masters</em>. The rest of the machines in 
+      the cluster act as both <code>DataNode</code> <em>and</em> 
+      <code>TaskTracker</code>. These are the <em>slaves</em>.</p>
+      
+      <p>The root of the distribution is referred to as 
+      <code>HADOOP_HOME</code>. All machines in the cluster usually have the same 
+      <code>HADOOP_HOME</code> path.</p>
+    </section>
+    
+    <section>
+      <title>Configuration</title>
+      
+      <p>The following sections describe how to configure a Hadoop cluster.</p>
+      
+      <section>
+        <title>Configuration Files</title>
+        
+        <p>Hadoop configuration is driven by two important configuration files
+        found in the <code>conf/</code> directory of the distribution:</p>
+        <ol>
+          <li>
+            <a href="ext:hadoop-default">hadoop-default.xml</a> - Read-only 
+            default configuration.
+          </li>
+          <li>
+            <em>hadoop-site.xml</em> - Site-specific configuration.
+          </li>
+        </ol>
+      
+        <p>To learn more about how the Hadoop framework is controlled by these 
+        configuration files, look <a href="ext:configuration">here</a>.</p>
+      
+        <p>Additionally, you can control the Hadoop scripts found in the 
+        <code>bin/</code> directory of the distribution, by setting site-specific 
+        values via the <code>conf/hadoop-env.sh</code>.</p>
+      </section>
+      
+      <section>
+        <title>Site Configuration</title>
+        
+        <p>To configure the the Hadoop cluster you will need to configure the
+        <em>environment</em> in which the Hadoop daemons execute as well as
+        the <em>configuration parameters</em> for the Hadoop daemons.</p>
+        
+        <p>The Hadoop daemons are <code>NameNode</code>/<code>DataNode</code> 
+        and <code>JobTracker</code>/<code>TaskTracker</code>.</p>
+        
+        <section>
+          <title>Configuring the Environment of the Hadoop Daemons</title>
+
+          <p>Administrators should use the <code>conf/hadoop-env.sh</code> script
+          to do site-specific customization of the Hadoop daemons' process 
+          environment.</p> 
+          
+          <p>At the very least you should specify the
+          <code>JAVA_HOME</code> so that it is correctly defined on each
+          remote node.</p>
+          
+          <p>Other useful configuration parameters that you can customize 
+          include:</p>
+          <ul>
+            <li>
+              <code>HADOOP_LOG_DIR</code> - The directory where the daemons'
+              log files are stored. They are automatically created if they don't
+              exist.
+            </li>
+            <li>
+              <code>HADOOP_HEAPSIZE</code> - The maximum amount of heapsize 
+              to use, in MB e.g. <code>2000MB</code>.
+            </li>
+          </ul>
+        </section>
+        
+        <section>
+          <title>Configuring the Hadoop Daemons</title>
+          
+          <p>This section deals with important parameters to be specified in the
+          <code>conf/hadoop-site.xml</code> for the Hadoop cluster.</p>
+
+		  <table>
+  		    <tr>
+		      <th>Parameter</th>
+		      <th>Value</th> 
+		      <th>Notes</th>
+		    </tr>
+  		    <tr>
+		      <td>fs.default.name</td>
+  		      <td>Hostname or IP address of <code>NameNode</code>.</td>
+		      <td><em>host:port</em> pair.</td>
+		    </tr>
+		    <tr>
+		      <td>mapred.job.tracker</td>
+		      <td>Hostname or IP address of <code>JobTracker</code>.</td>
+		      <td><em>host:port</em> pair.</td>
+		    </tr>
+		    <tr>
+		      <td>dfs.name.dir</td>
+		      <td>
+		        Path on the local filesystem where the <code>NameNode</code> 
+		        stores the namespace and transactions logs persistently.</td>
+		      <td>
+		        If this is a comma-delimited list of directories then the name 
+		        table is replicated in all of the directories, for redundancy.
+		      </td>
+		    </tr>
+		    <tr>
+		      <td>dfs.data.dir</td>
+		      <td>
+		        Comma separated list of paths on the local filesystem of a 
+		        <code>DataNode</code> where it should store its blocks.
+		      </td>
+		      <td>
+		        If this is a comma-delimited list of directories, then data will 
+		        be stored in all named directories, typically on different 
+		        devices.
+		      </td>
+		    </tr>
+		    <tr>
+		      <td>mapred.system.dir</td>
+		      <td>
+		        Path on the HDFS where where the Map-Reduce framework stores 
+		        system files e.g. <code>/hadoop/mapred/system/</code>.
+		      </td>
+		      <td>
+		        This is in the default filesystem (HDFS) and must be accessible 
+		        from both the server and client machines.
+		      </td>
+		    </tr>
+		    <tr>
+		      <td>mapred.local.dir</td>
+		      <td>
+		        Comma-separated list of paths on the local filesystem where 
+		        temporary Map-Reduce data is written.
+		      </td>
+		      <td>Multiple paths help spread disk i/o.</td>
+		    </tr>
+		    <tr>
+		      <td>mapred.tasktracker.tasks.maximum</td>
+		      <td>
+		        The maximum number of map and reduce tasks, which are run 
+		        simultaneously on a given <code>TaskTracker</code>, individually.
+		      </td>
+		      <td>
+		        Defaults to 2 (2 maps and 2 reduces), but vary it depending on 
+		        your hardware.
+		      </td>
+		    </tr>
+		    <tr>
+		      <td>dfs.hosts/dfs.hosts.exclude</td>
+		      <td>List of permitted/excluded DataNodes.</td>
+		      <td>
+		        If necessary, use these files to control the list of allowable 
+		        datanodes.
+		      </td>
+		    </tr>
+		    <tr>
+		      <td>mapred.hosts/mapred.hosts.exclude</td>
+		      <td>List of permitted/excluded TaskTrackers.</td>
+		      <td>
+		        If necessary, use these files to control the list of allowable 
+		        tasktrackers.
+		      </td>
+  		    </tr>
+		  </table>
+
+          <p>Typically all the above parameters are marked as 
+          <a href="api/index.html?org/apache/hadoop/conf/Configuration.html#FinalParameters">
+          final</a> to ensure that they cannot be overriden by user-applications.
+          </p>
+
+          <section>
+            <title>Real-World Cluster Configurations</title>
+            
+            <p>This section lists some non-default configuration parameters which 
+            have been used to run the <em>sort</em> benchmark on very large 
+            clusters.</p>
+            
+            <ul>
+              <li>
+                <p>Some non-default configuration values used to run sort900,
+                that is 9TB of data sorted on a cluster with 900 nodes:</p>
+                <table>
+  		          <tr>
+		            <th>Parameter</th>
+		            <th>Value</th> 
+		            <th>Notes</th>
+		          </tr>
+                  <tr>
+                    <td>dfs.block.size</td>
+                    <td>134217728</td>
+                    <td>HDFS blocksize of 128MB for large file-systems.</td>
+                  </tr>
+                  <tr>
+                    <td>dfs.namenode.handler.count</td>
+                    <td>40</td>
+                    <td>
+                      More NameNode server threads to handle RPCs from large 
+                      number of DataNodes.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>mapred.reduce.parallel.copies</td>
+                    <td>20</td>
+                    <td>
+                      Higher number of parallel copies run by reduces to fetch
+                      outputs from very large number of maps.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>mapred.child.java.opts</td>
+                    <td>-Xmx512M</td>
+                    <td>
+                      Larger heap-size for child jvms of maps/reduces.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>fs.inmemory.size.mb</td>
+                    <td>200</td>
+                    <td>
+                      Larger amount of memory allocated for the in-memory 
+                      file-system used to merge map-outputs at the reduces.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>io.sort.factor</td>
+                    <td>100</td>
+                    <td>More streams merged at once while sorting files.</td>
+                  </tr>
+                  <tr>
+                    <td>io.sort.mb</td>
+                    <td>200</td>
+                    <td>Higher memory-limit while sorting data.</td>
+                  </tr>
+                  <tr>
+                    <td>io.file.buffer.size</td>
+                    <td>131072</td>
+                    <td>Size of read/write buffer used in SequenceFiles.</td>
+                  </tr>
+                </table>
+              </li>
+              <li>
+                <p>Updates to some configuration values to run sort1400 and 
+                sort2000, that is 14TB of data sorted on 1400 nodes and 20TB of
+                data sorted on 2000 nodes:</p>
+                <table>
+  		          <tr>
+		            <th>Parameter</th>
+		            <th>Value</th> 
+		            <th>Notes</th>
+		          </tr>
+                  <tr>
+                    <td>mapred.job.tracker.handler.count</td>
+                    <td>60</td>
+                    <td>
+                      More JobTracker server threads to handle RPCs from large 
+                      number of TaskTrackers.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>mapred.reduce.parallel.copies</td>
+                    <td>50</td>
+                    <td></td>
+                  </tr>
+                  <tr>
+                    <td>tasktracker.http.threads</td>
+                    <td>50</td>
+                    <td>
+                      More worker threads for the TaskTracker's http server. The
+                      http server is used by reduces to fetch intermediate 
+                      map-outputs.
+                    </td>
+                  </tr>
+                  <tr>
+                    <td>mapred.child.java.opts</td>
+                    <td>-Xmx1024M</td>
+                    <td></td>
+                  </tr>
+                </table>
+              </li>
+            </ul>
+          </section>
+          
+        </section>
+        
+        <section>
+          <title>Slaves</title>
+          
+          <p>Typically you choose one machine in the cluster to act as the 
+          <code>NameNode</code> and one machine as to act as the 
+          <code>JobTracker</code>, exclusively. The rest of the machines act as 
+          both a <code>DataNode</code> and <code>TaskTracker</code> and are 
+          referred to as <em>slaves</em>.</p>
+          
+          <p>List all slave hostnames or IP addresses in your 
+          <code>conf/slaves</code> file, one per line.</p>
+        </section>
+        
+        <section>
+          <title>Logging</title>
+          
+          <p>Hadoop uses the <a href="http://logging.apache.org/log4j/">Apache 
+          log4j</a> via the <a href="http://commons.apache.org/logging/">Apache 
+          Commons Logging</a> framework for logging. Edit the 
+          <code>conf/log4j.properties</code> file to customize the Hadoop 
+          daemons' logging configuration (log-formats and so on).</p>
+        </section>
+      </section>
+      
+      <p>Once all the necessary configuration is complete, distribute the files
+      to the <code>HADOOP_CONF_DIR</code> directory on all the machines, 
+      typically <code>${HADOOP_HOME}/conf</code>.</p>
+    </section>
+    
+    <section>
+      <title>Hadoop Startup</title>
+      
+      <p>To start a Hadoop cluster you will need to start both the HDFS and 
+      Map-Reduce cluster.</p>
+
+      <p>
+        Format a new distributed filesystem:<br/>
+        <code>$ bin/hadoop namenode -format</code>
+      </p>
+      
+      <p>
+        Start the HDFS with the following command, run on the designated
+        <code>NameNode</code>:<br/>
+        <code>$ bin/start-dfs.sh</code>
+      </p>
+      <p>The <code>bin/start-dfs.sh</code> script also consults the 
+      <code>${HADOOP_CONF_DIR}/slaves</code> file on the <code>NameNode</code> 
+      and starts the <code>DataNode</code> daemon on all the listed slaves.</p>
+      
+      <p>
+        Start Map-Reduce with the following command, run on the designated
+        <code>JobTracker</code>:<br/>
+        <code>$ bin/start-mapred.sh</code>
+      </p>
+      <p>The <code>bin/start-mapred.sh</code> script also consults the 
+      <code>${HADOOP_CONF_DIR}/slaves</code> file on the <code>JobTracker</code> 
+      and starts the <code>TaskTracker</code> daemon on all the listed slaves.
+      </p>
+    </section>
+    
+    <section>
+      <title>Hadoop Shutdown</title>
+      
+      <p>
+        Stop HDFS with the following command, run on the designated 
+        <code>NameNode</code>:<br/>
+        <code>$ bin/stop-dfs.sh</code>
+      </p>
+      <p>The <code>bin/stop-dfs.sh</code> script also consults the 
+      <code>${HADOOP_CONF_DIR}/slaves</code> file on the <code>NameNode</code> 
+      and stops the <code>DataNode</code> daemon on all the listed slaves.</p>
+      
+      <p>
+        Stop Map-Reduce with the following command, run on the designated
+        the designated <code>JobTracker</code>:<br/>
+        <code>$ bin/stop-mapred.sh</code><br/>
+      </p>
+      <p>The <code>bin/stop-mapred.sh</code> script also consults the 
+      <code>${HADOOP_CONF_DIR}/slaves</code> file on the <code>JobTracker</code> 
+      and stops the <code>TaskTracker</code> daemon on all the listed slaves.</p>
+    </section>
+  </body>
+  
+</document>

+ 8 - 5
src/docs/src/documentation/content/xdocs/documentation.xml

@@ -10,13 +10,16 @@
   
   <body>
     <p>
-    The following documents provide concepts and procedures that will help you get started using Hadoop.
-    If you have more questions, you can ask the <a href="mailing_lists.html">mailing list</a> or browse the archives.
+    The following documents provide concepts and procedures that will help you 
+    get started using Hadoop. If you have more questions, you can ask the 
+    <a href="mailing_lists.html">mailing list</a> or browse the archives.
     </p>
     <ul>
-      <li><a href="hdfs_design.html">Hadoop Distributed File System (<acronym title="Hadoop Distributed File System">HDFS</acronym>)</a></li>
-      <li><a href="ext:overview">Install and configure</a></li>
-      <li><a href="ext:api">API Docs</a></li>
+      <li><a href="quickstart.html">Hadoop Quickstart</a></li>
+      <li><a href="cluster_setup.html">Hadoop Cluster Setup</a></li>
+      <li><a href="hdfs_design.html">Hadoop Distributed File System</a></li>
+      <li><a href="mapred_tutorial.html">Hadoop Map-Reduce Tutorial</a></li>
+      <li><a href="ext:api/index">API Docs</a></li>
       <li><a href="ext:wiki">Wiki</a></li>
       <li><a href="ext:faq">FAQ</a></li>
     </ul>

+ 2 - 1
src/docs/src/documentation/content/xdocs/index.xml

@@ -64,7 +64,8 @@
       <ol>
         <li><a href="documentation.html">Learn about</a> Hadoop by reading the documentation.</li>
         <li><a href="releases.html">Download</a> Hadoop from the release page.</li>
-        <li><a href="ext:overview">Install and configure</a> Hadoop. Scroll down the page.</li>
+        <li>Hadoop <a href="quickstart.html">Quickstart</a>.</li>
+        <li><a href="cluster_setup.html">Hadoop Cluster Setup</a>.</li>
         <li><a href="mailing_lists.html">Discuss it</a> on the mailing list.</li>
       </ol>
     </section>

+ 2304 - 0
src/docs/src/documentation/content/xdocs/mapred_tutorial.xml

@@ -0,0 +1,2304 @@
+<?xml version="1.0"?>
+
+<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
+
+<document>
+  
+  <header>
+    <title>Hadoop Map-Reduce Tutorial</title>
+  </header>
+  
+  <body>
+  
+    <section>
+      <title>Purpose</title>
+      
+      <p>This document comprehensively describes all user-facing facets of the 
+      Hadoop Map-Reduce framework and serve as a tutorial.
+      </p>
+    </section>
+    
+    <section>
+      <title>Pre-requisites</title>
+      
+      <p>Ensure that Hadoop is installed, configured and is running. More
+      details:</p> 
+      <ul>
+        <li>
+          Hadoop <a href="quickstart.html">Quickstart</a> for first-time users.
+        </li>
+        <li>
+          Hadoop <a href="cluster_setup.html">Cluster Setup</a> for large, 
+          distributed clusters.
+        </li>
+      </ul>
+    </section>
+    
+    <section>
+      <title>Overview</title>
+      
+      <p>Hadoop Map-Reduce is a software framework for easily writing 
+      applications which process vast amounts of data (multi-terabyte data-sets) 
+      in-parallel on large clusters (thousands of nodes) of commodity 
+      hardware in a reliable, fault-tolerant manner.</p>
+      
+      <p>A Map-Reduce <em>job</em> usually splits the input data-set into 
+      independent chunks which are processed by the <em>map tasks</em> in a
+      completely parallel manner. The framework sorts the outputs of the maps, 
+      which are then input to the <em>reduce tasks</em>. Typically both the 
+      input and the output of the job are stored in a file-system. The framework 
+      takes care of scheduling tasks, monitoring them and re-executes the failed
+      tasks.</p>
+      
+      <p>Typically the compute nodes and the storage nodes are the same, that is, 
+      the Map-Reduce framework and the <a href="hdfs_design.html">Distributed 
+      FileSystem</a> are running on the same set of nodes. This configuration
+      allows the framework to effectively schedule tasks on the nodes where data 
+      is already present, resulting in very high aggregate bandwidth across the 
+      cluster.</p>
+      
+      <p>The Map-Reduce framework consists of a single master 
+      <code>JobTracker</code> and one slave <code>TaskTracker</code> per 
+      cluster-node. The master is responsible for scheduling the jobs' component 
+      tasks on the slaves, monitoring them and re-executing the failed tasks. The 
+      slaves execute the tasks as directed by the master.</p>
+      
+      <p>Minimally, applications specify the input/output locations and supply
+      <em>map</em> and <em>reduce</em> functions via implementations of
+      appropriate interfaces and/or abstract-classes. These, and other job 
+      parameters, comprise the <em>job configuration</em>. The Hadoop 
+      <em>job client</em> then submits the job (jar/executable etc.) and 
+      configuration to the <code>JobTracker</code> which then assumes the 
+      responsibility of distributing the software/configuration to the slaves, 
+      scheduling tasks and monitoring them, providing status and diagnostic 
+      information to the job-client.</p>
+      
+      <p>Although the Hadoop framework is implemented in Java<sup>TM</sup>, 
+      Map-Reduce applications need not be written in Java.</p>
+      <ul>
+        <li>
+          <a href="ext:api/org/apache/hadoop/streaming/package-summary">
+          Hadoop Streaming</a> is a utility which allows users to create and run 
+          jobs with any executables (e.g. shell utilities) as the mapper and/or 
+          the reducer.
+        </li>
+        <li>
+          <a href="ext:api/org/apache/hadoop/mapred/pipes/package-summary">
+          Hadoop Pipes</a> is a <a href="http://www.swig.org/">SWIG</a>-
+          compatible <em>C++ API</em> to implement Map-Reduce applications (non 
+          JNI<sup>TM</sup> based).
+        </li>
+      </ul>
+    </section>
+    
+    <section>
+      <title>Inputs and Outputs</title>
+
+      <p>The Map-Reduce framework operates exclusively on 
+      <code>&lt;key, value&gt;</code> pairs, that is, the framework views the 
+      input to the job as a set of <code>&lt;key, value&gt;</code> pairs and 
+      produces a set of <code>&lt;key, value&gt;</code> pairs as the output of 
+      the job, conceivably of different types.</p> 
+      
+      <p>The <code>key</code> and <code>value</code> classes have to be 
+      serializable by the framework and hence need to implement the 
+      <a href="ext:api/org/apache/hadoop/io/writable">Writable</a> 
+      interface. Additionally, the <code>key</code> classes have to implement the
+      <a href="ext:api/org/apache/hadoop/io/writablecomparable">
+      WritableComparable</a> interface to facilitate sorting by the framework.
+      </p>
+
+      <p>Input and Output types of a Map-Reduce job:</p>
+      <p>
+        (input) <code>&lt;k1, v1&gt;</code> 
+        -&gt; 
+        <strong>map</strong> 
+        -&gt; 
+        <code>&lt;k2, v2&gt;</code> 
+        -&gt; 
+        <strong>combine</strong> 
+        -&gt; 
+        <code>&lt;k2, v2&gt;</code> 
+        -&gt; 
+        <strong>reduce</strong> 
+        -&gt; 
+        <code>&lt;k3, v3&gt;</code> (output)
+      </p>
+    </section>
+
+    <section>
+      <title>Example: WordCount v1.0</title>
+      
+      <p>Before we jump into the details, lets walk through an example Map-Reduce 
+      application to get a flavour for how they work.</p>
+      
+      <p><code>WordCount</code> is a simple application that counts the number of
+      occurences of each word in a given input set.</p>
+      
+      <section>
+        <title>Source Code</title>
+        
+        <table>
+          <tr>
+            <th></th>
+            <th>WordCount.java</th>
+          </tr>
+          <tr>
+            <td>1.</td>
+            <td>
+              <code>package org.myorg;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>2.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>3.</td>
+            <td>
+              <code>import java.io.Exception;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>4.</td>
+            <td>
+              <code>import java.util.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>5.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>6.</td>
+            <td>
+              <code>import org.apache.hadoop.fs.Path;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>7.</td>
+            <td>
+              <code>import org.apache.hadoop.conf.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>8.</td>
+            <td>
+              <code>import org.apache.hadoop.io.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>9.</td>
+            <td>
+              <code>import org.apache.hadoop.mapred.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>10.</td>
+            <td>
+              <code>import org.apache.hadoop.util.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>11.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>12.</td>
+            <td>
+              <code>public class WordCount {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>13.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>14.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static class MapClass extends MapReduceBase 
+                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>15.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                private final static IntWritable one = new IntWritable(1);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>16.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private Text word = new Text();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>17.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>18.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                public void map(LongWritable key, Text value, 
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>19.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>String line = value.toString();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>20.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>21.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>while (tokenizer.hasMoreTokens()) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>22.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>word.set(tokenizer.nextToken());</code>
+            </td>
+          </tr>
+          <tr>
+            <td>23.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>output.collect(word, one);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>24.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>25.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>26.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>27.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>28.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static class Reduce extends MapReduceBase implements 
+                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>29.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>30.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>int sum = 0;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>31.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>while (values.hasNext()) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>32.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>sum += values.next().get();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>33.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>34.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>output.collect(key, new IntWritable(sum));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>35.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>36.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>37.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>38.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static void main(String[] args) throws Exception {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>39.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                JobConf conf = new JobConf(WordCount.class);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>40.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setJobName("wordcount");</code>
+            </td>
+          </tr>
+          <tr>
+            <td>41.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>42.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputKeyClass(Text.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>43.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputValueClass(IntWritable.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>44.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>45.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setMapperClass(MapClass.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>46.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setCombinerClass(Reduce.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>47.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setReducerClass(Reduce.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>48.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>49.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setInputFormat(TextInputFormat.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>50.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputFormat(TextOutputFormat.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>51.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>52.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setInputPath(new Path(args[1]));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>53.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputPath(new Path(args[2]));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>54.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>55.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>JobClient.runJob(conf);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>57.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>58.</td>
+            <td>
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>59.</td>
+            <td></td>
+          </tr>
+        </table>
+      </section>
+        
+      <section>
+        <title>Usage</title>
+        
+        <p>Assuming <code>HADOOP_HOME</code> is the root of the installation and 
+        <code>HADOOP_VERSION</code> is the Hadoop version installed, compile 
+        <code>WordCount.java</code> and create a jar:</p>
+        <p>
+          <code>
+            $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar 
+              WordCount.java
+          </code><br/>
+          <code>$ jar -cvf /usr/joe/wordcount.jar WordCount.class</code> 
+        </p>
+        
+        <p>Assuming that:</p>
+        <ul>
+          <li>
+            <code>/usr/joe/wordcount/input</code>  - input directory in HDFS
+          </li>
+          <li>
+            <code>/usr/joe/wordcount/output</code> - output directory in HDFS
+          </li>
+        </ul>
+        
+        <p>Sample text-files as input:</p>
+        <p>
+          <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
+          <code>/usr/joe/wordcount/input/file01</code><br/>
+          <code>/usr/joe/wordcount/input/file02</code><br/>
+          <br/>
+          <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
+          <code>Hello World Bye World</code><br/>
+          <br/>
+          <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
+          <code>Hello Hadoop Goodbye Hadoop</code>
+        </p>
+
+        <p>Run the application:</p>
+        <p>
+          <code>
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              /usr/joe/wordcount/input /usr/joe/wordcount/output 
+          </code>
+        </p>
+
+        <p>Output:</p>
+        <p>
+          <code>
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </code>
+          <br/>
+          <code>Bye    1</code><br/>
+          <code>Goodbye    1</code><br/>
+          <code>Hadoop    2</code><br/>
+          <code>Hello    2</code><br/>
+          <code>World    2</code><br/>
+        </p>
+      </section>
+      
+      <section>
+        <title>Walk-through</title>
+        
+        <p>The <code>WordCount</code> application is quite straight-forward.</p>
+        
+        <p>The <code>Mapper</code> implementation (lines 14-26), via the 
+        <code>map</code> method (lines 18-25), processes one line at a time,
+        as provided by the specified <code>TextInputFormat</code> (line 49). 
+        It then splits the line into tokens separated by whitespaces, via the 
+        <code>StringTokenizer</code>, and emits a key-value pair of 
+        <code>&lt; &lt;word&gt;, 1&gt;</code>.</p>
+        
+        <p>
+          For the given sample input the first map emits:<br/>
+          <code>&lt; Hello, 1&gt;</code><br/>
+          <code>&lt; World, 1&gt;</code><br/>
+          <code>&lt; Bye, 1&gt;</code><br/>
+          <code>&lt; World, 1&gt;</code><br/>
+        </p>
+        
+        <p>
+          The second map emits:<br/>
+          <code>&lt; Hello, 1&gt;</code><br/>
+          <code>&lt; Hadoop, 1&gt;</code><br/>
+          <code>&lt; Goodbye, 1&gt;</code><br/>
+          <code>&lt; Hadoop, 1&gt;</code><br/>
+        </p>
+        
+        <p>We'll learn more about the number of maps spawned for a given job, and
+        how to control them in a fine-grained manner, a bit later in the 
+        tutorial.</p>
+        
+        <p><code>WordCount</code> also specifies a <code>combiner</code> (line 
+        46). Hence, the output of each map is passed through the local combiner 
+        (which is same as the <code>Reducer</code> as per the job 
+        configuration) for local aggregation, after being sorted on the 
+        <em>key</em>s.</p>
+
+        <p>
+          The output of the first map:<br/>
+          <code>&lt; Bye, 1&gt;</code><br/>
+          <code>&lt; Hello, 1&gt;</code><br/>
+          <code>&lt; World, 2&gt;</code><br/>
+        </p>
+        
+        <p>
+          The output of the second map:<br/>
+          <code>&lt; Goodbye, 1&gt;</code><br/>
+          <code>&lt; Hadoop, 2&gt;</code><br/>
+          <code>&lt; Hello, 1&gt;</code><br/>
+        </p>
+
+        <p>The <code>Reducer</code> implementation (lines 28-36), via the
+        <code>reduce</code> method (lines 29-35) just sums up the values,
+        which are the occurence counts for each key (i.e. words in this example).
+        </p>
+        
+        <p>
+          Thus the output of the job is:<br/>
+          <code>&lt; Bye, 1&gt;</code><br/>
+          <code>&lt; Goodbye, 1&gt;</code><br/>
+          <code>&lt; Hadoop, 2&gt;</code><br/>
+          <code>&lt; Hello, 2&gt;</code><br/>
+          <code>&lt; World, 2&gt;</code><br/>
+        </p>
+        
+        <p>The <code>run</code> method specifies various facets of the job, such 
+        as the input/output paths (passed via the command line), key/value 
+        types, input/output formats etc., in the <code>JobConf</code>.
+        It then calls the <code>JobClient.runJob</code> (line  55) to submit the
+        and monitor its progress.</p>
+
+        <p>We'll learn more about <code>JobConf</code>, <code>JobClient</code>,
+        <code>Tool</code> and other interfaces and classes a bit later in the 
+        tutorial.</p>
+      </section>
+    </section>
+    
+    <section>
+      <title>Map-Reduce - User Interfaces</title>
+      
+      <p>This section provides a reasonable amount of detail on every user-facing 
+      aspect of the Map-Reduce framwork. This should help users implement, 
+      configure and tune their jobs in a fine-grained manner. However, please 
+      note that the javadoc for each class/interface remains the most 
+      comprehensive documentation available; this is only meant to be a tutorial.
+      </p>
+      
+      <p>Let us first take the <code>Mapper</code> and <code>Reducer</code> 
+      interfaces. Applications typically implement them to provide the 
+      <code>map</code> and <code>reduce</code> methods.</p>
+      
+      <p>We will then discuss other core interfaces including 
+      <code>JobConf</code>, <code>JobClient</code>, <code>Partitioner</code>, 
+      <code>OutputCollector</code>, <code>Reporter</code>, 
+      <code>InputFormat</code>, <code>OutputFormat</code> and others.</p>
+      
+      <p>Finally, we will wrap up by discussing some useful features of the
+      framework such as the <code>DistributedCache</code>, 
+      <code>IsolationRunner</code> etc.</p>
+
+      <section>
+        <title>Payload</title>
+        
+        <p>Applications typically implement the <code>Mapper</code> and 
+        <code>Reducer</code> interfaces to provide the <code>map</code> and 
+        <code>reduce</code> methods. These form the core of the job.</p>
+        
+        <section>
+          <title>Mapper</title>
+
+          <p><a href="ext:api/org/apache/hadoop/mapred/mapper">
+          Mapper</a> maps input key/value pairs to a set of intermediate 
+          key/value pairs.</p>
+ 
+          <p>Maps are the individual tasks that transform input records into 
+          intermediate records. The transformed intermediate records do not need
+          to be of the same type as the input records. A given input pair may 
+          map to zero or many output pairs.</p> 
+ 
+          <p>The Hadoop Map-Reduce framework spawns one map task for each 
+          <code>InputSplit</code> generated by the <code>InputFormat</code> for 
+          the job.</p>
+          
+          <p>Overall, <code>Mapper</code> implementations are passed the 
+          <code>JobConf</code> for the job via the 
+          <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
+          JobConfigurable.configure(JobConf)</a> method and override it to 
+          initialize themselves. The framework then calls 
+          <a href="ext:api/org/apache/hadoop/mapred/mapper/map">
+          map(WritableComparable, Writable, OutputCollector, Reporter)</a> for 
+          each key/value pair in the <code>InputSplit</code> for that task.        
+          Applications can then override the
+          <a href="ext:api/org/apache/hadoop/io/closeable/close">
+          Closeable.close()</a> method to perform any required cleanup.</p>
+ 
+
+          <p>Output pairs do not need to be of the same types as input pairs. A 
+          given input pair may map to zero or many output pairs.  Output pairs 
+          are collected with calls to 
+          <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
+          OutputCollector.collect(WritableComparable,Writable)</a>.</p>
+
+          <p>Applications can use the <code>Reporter</code> to report 
+          progress, set application-level status messages and update 
+          <code>Counters</code>, or just indicate that they are alive.</p>
+ 
+          <p>All intermediate values associated with a given output key are 
+          subsequently grouped by the framework, and passed to the
+          <code>Reducer</code>(s) to  determine the final output. Users can 
+          control the grouping by specifying a <code>Comparator</code> via 
+          <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
+          JobConf.setOutputKeyComparatorClass(Class)</a>.</p>
+
+          <p>The <code>Mapper</code> outputs are sorted and then 
+          partitioned per <code>Reducer</code>. The total number of partitions is 
+          the same as the number of reduce tasks for the job. Users can control 
+          which keys (and hence records) go to which <code>Reducer</code> by 
+          implementing a custom <code>Partitioner</code>.</p>
+ 
+          <p>Users can optionally specify a <code>combiner</code>, via 
+          <a href="ext:api/org/apache/hadoop/mapred/jobconf/setcombinerclass">
+          JobConf.setCombinerClass(Class)</a>, to perform local aggregation of 
+          the intermediate outputs, which helps to cut down the amount of data 
+          transferred from the <code>Mapper</code> to the <code>Reducer</code>.
+          </p>
+ 
+          <p>The intermediate, sorted outputs are always stored in files of 
+          <a href="ext:api/org/apache/hadoop/io/sequencefile">
+          SequenceFile</a> format. Applications can control if, and how, the 
+          intermediate outputs are to be compressed and the 
+          <a href="ext:api/org/apache/hadoop/io/compress/compressioncodec">
+          CompressionCodec</a> to be used via the <code>JobConf</code>.
+          </p>
+          
+          <section>
+            <title>How Many Maps?</title>
+             
+            <p>The number of maps is usually driven by the total size of the 
+            inputs, that is, the total number of blocks of the input files.</p>
+  
+            <p>The right level of parallelism for maps seems to be around 10-100 
+            maps per-node, although it has been set up to 300 maps for very 
+            cpu-light map tasks. Task setup takes awhile, so it is best if the 
+            maps take at least a minute to execute.</p>
+ 
+            <p>Thus, if you expect 10TB of input data and have a blocksize of 
+            <code>128MB</code>, you'll end up with 82,000 maps, unless 
+            <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
+            setNumMapTasks(int)</a> (which only provides a hint to the framework) 
+            is used to set it even higher.</p>
+          </section>
+        </section>
+        
+        <section>
+          <title>Reducer</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/reducer">
+          Reducer</a> reduces a set of intermediate values which share a key to
+          a smaller set of values.</p>
+          
+          <p>The number of reduces for the job is set by the user 
+          via <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
+          JobConf.setNumReduceTasks(int)</a>.</p>
+          
+          <p>Overall, <code>Reducer</code> implementations are passed the 
+          <code>JobConf</code> for the job via the 
+          <a href="ext:api/org/apache/hadoop/mapred/jobconfigurable/configure">
+          JobConfigurable.configure(JobConf)</a> method and can override it to 
+          initialize themselves. The framework then calls   
+          <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
+          reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
+          method for each <code>&lt;key, (list of values)&gt;</code> 
+          pair in the grouped inputs. Applications can then override the           
+          <a href="ext:api/org/apache/hadoop/io/closeable/close">
+          Closeable.close()</a> method to perform any required cleanup.</p>
+
+          <p><code>Reducer</code> has 3 primary phases: shuffle, sort and reduce.
+          </p>
+          
+          <section>
+            <title>Shuffle</title>
+   
+            <p>Input to the <code>Reducer</code> is the sorted output of the
+            mappers. In this phase the framework fetches the relevant partition 
+            of the output of all the mappers, via HTTP.</p>
+          </section>
+   
+          <section>
+            <title>Sort</title>
+   
+            <p>The framework groups <code>Reducer</code> inputs by keys (since 
+            different mappers may have output the same key) in this stage.</p>
+   
+            <p>The shuffle and sort phases occur simultaneously; while 
+            map-outputs are being fetched they are merged.</p>
+      
+            <section>
+              <title>Secondary Sort</title>
+   
+              <p>If equivalence rules for grouping the intermediate keys are 
+              required to be different from those for grouping keys before 
+              reduction, then one may specify a <code>Comparator</code> via 
+              <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputvaluegroupingcomparator">
+              JobConf.setOutputValueGroupingComparator(Class)</a>. Since 
+              <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputkeycomparatorclass">
+              JobConf.setOutputKeyComparatorClass(Class)</a> can be used to 
+              control how intermediate keys are grouped, these can be used in 
+              conjunction to simulate <em>secondary sort on values</em>.</p>
+            </section>
+          </section>
+   
+          <section>   
+            <title>Reduce</title>
+   
+            <p>In this phase the 
+            <a href="ext:api/org/apache/hadoop/mapred/reducer/reduce">
+            reduce(WritableComparable, Iterator, OutputCollector, Reporter)</a>
+            method is called for each <code>&lt;key, (list of values)&gt;</code> 
+            pair in the grouped inputs.</p>
+            
+            <p>The output of the reduce task is typically written to the 
+            <a href="ext:api/org/apache/hadoop/fs/filesystem">
+            FileSystem</a> via 
+            <a href="ext:api/org/apache/hadoop/mapred/outputcollector/collect">
+            OutputCollector.collect(WritableComparable, Writable)</a>.</p>
+   
+            <p>Applications can use the <code>Reporter</code> to report 
+            progress, set application-level status messages and update 
+            <code>Counters</code>, or just indicate that they are alive.</p>
+ 
+           <p>The output of the <code>Reducer</code> is <em>not sorted</em>.</p>
+          </section>
+          
+          <section>
+            <title>How Many Reduces?</title>
+ 
+            <p>The right number of reduces seems to be <code>0.95</code> or 
+            <code>1.75</code> multiplied by (&lt;<em>no. of nodes</em>&gt; * 
+            <code>mapred.tasktracker.tasks.maximum</code>).</p>
+ 
+            <p>With <code>0.95</code> all of the reduces can launch immediately 
+            and start transfering map outputs as the maps finish. With 
+            <code>1.75</code> the faster nodes will finish their first round of 
+            reduces and launch a second wave of reduces doing a much better job 
+            of load balancing.</p>
+ 
+            <p>Increasing the number of reduces increases the framework overhead, 
+            but increases load balancing and lowers the cost of failures.</p>
+ 
+            <p>The scaling factors above are slightly less than whole numbers to 
+            reserve a few reduce slots in the framework for speculative-tasks and
+            failed tasks.</p>
+          </section>
+          
+          <section>
+            <title>Reducer NONE</title>
+            
+            <p>It is legal to set the number of reduce-tasks to <em>zero</em> if 
+            no reduction is desired.</p>
+ 
+            <p>In this case the outputs of the map-tasks go directly to the
+            <code>FileSystem</code>, into the output path set by 
+            <a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputpath">
+            setOutputPath(Path)</a>. The framework does not sort the 
+            map-outputs before writing them out to the <code>FileSystem</code>.
+            </p>
+          </section>
+        </section>
+        
+        <section>
+          <title>Partitioner</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/partitioner">
+          Partitioner</a> partitions the key space.</p>
+
+          <p>Partitioner controls the partitioning of the keys of the 
+          intermediate map-outputs. The key (or a subset of the key) is used to 
+          derive the partition, typically by a <em>hash function</em>. The total 
+          number of partitions is the same as the number of reduce tasks for the 
+          job. Hence this controls which of the <code>m</code> reduce tasks the 
+          intermediate key (and hence the record) is sent to for reduction.</p>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/lib/hashpartitioner">
+          HashPartitioner</a> is the default <code>Partitioner</code>.</p>
+        </section>
+        
+        <section>
+          <title>Reporter</title>
+        
+          <p><a href="ext:api/org/apache/hadoop/mapred/reporter">
+          Reporter</a> is a facility for Map-Reduce applications to report 
+          progress, set application-level status messages and update 
+          <code>Counters</code>.</p>
+ 
+          <p><code>Mapper</code> and <code>Reducer</code> implementations can use 
+          the <code>Reporter</code> to report progress or just indicate 
+          that they are alive. In scenarios where the application takes a
+          significant amount of time to process individual key/value pairs, 
+          this is crucial since the framework might assume that the task has 
+          timed-out and kill that task. Another way to avoid this is to 
+          set the configuration parameter <code>mapred.task.timeout</code> to a
+          high-enough value (or even set it to <em>zero</em> for no time-outs).
+          </p>
+
+          <p>Applications can also update <code>Counters</code> using the 
+          <code>Reporter</code>.</p>
+        </section>
+      
+        <section>
+          <title>OutputCollector</title>
+        
+          <p><a href="ext:api/org/apache/hadoop/mapred/outputcollector">
+          OutputCollector</a> is a generalization of the facility provided by
+          the Map-Reduce framework to collect data output by the 
+          <code>Mapper</code> or the <code>Reducer</code> (either the 
+          intermediate outputs or the output of the job).</p>
+        </section>
+      
+        <p>Hadoop Map-Reduce comes bundled with a 
+        <a href="ext:api/org/apache/hadoop/mapred/lib/package-summary">
+        library</a> of generally useful mappers, reducers, and partitioners.</p>
+      </section>
+      
+      <section>
+        <title>Job Configuration</title>
+        
+        <p><a href="ext:api/org/apache/hadoop/mapred/jobconf">
+        JobConf</a> represents a Map-Reduce job configuration.</p>
+ 
+        <p><code>JobConf</code> is the primary interface for a user to describe
+        a map-reduce job to the Hadoop framework for execution. The framework 
+        tries to faithfully execute the job as described by <code>JobConf</code>, 
+        however:</p> 
+        <ul>
+          <li>f
+            Some configuration parameters may have been marked as 
+            <a href="ext:api/org/apache/hadoop/conf/configuration/final">
+            final</a> by administrators and hence cannot be altered.
+          </li>
+          <li>
+            While some job parameters are straight-forward to set (e.g. 
+            <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnumreducetasks">
+            setNumReduceTasks(int)</a>), other parameters interact subtly with 
+            the rest of the framework and/or job configuration and are 
+            more complex to set (e.g. 
+            <a href="ext:api/org/apache/hadoop/mapred/jobconf/setnummaptasks">
+            setNumMapTasks(int)</a>).
+          </li>
+        </ul>
+ 
+        <p><code>JobConf</code> is typically used to specify the 
+        <code>Mapper</code>, combiner (if any), <code>Partitioner</code>, 
+        <code>Reducer</code>, <code>InputFormat</code> and 
+        <code>OutputFormat</code> implementations. <code>JobConf</code> also 
+        indicates the set of input files 
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setinputpath">setInputPath(Path)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/addinputpath">addInputPath(Path)</a>)
+        and where the output files should be written
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setoutputpath">setOutputPath(Path)</a>).</p>
+
+        <p>Optionally, <code>JobConf</code> is used to specify other advanced 
+        facets of the job such as the <code>Comparator</code> to be used, files 
+        to be put in the <code>DistributedCache</code>, whether intermediate 
+        and/or job outputs are to be compressed (and how), debugging via 
+        user-provided scripts
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmapdebugscript">setMapDebugScript(String)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setreducedebugscript">setReduceDebugScript(String)</a>) 
+        , whether job tasks can be executed in a <em>speculative</em> manner 
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setspeculativeexecution">setSpeculativeExecution(boolean)</a>)
+        , maximum number of attempts per task
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmapattempts">setMaxMapAttempts(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreduceattempts">setMaxReduceAttempts(int)</a>) 
+        , percentage of tasks failure which can be tolerated by the job
+        (<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxmaptaskfailurespercent">setMaxMapTaskFailuresPercent(int)</a>/<a href="ext:api/org/apache/hadoop/mapred/jobconf/setmaxreducetaskfailurespercent">setMaxReduceTaskFailuresPercent(int)</a>) 
+        etc.</p>
+        
+        <p>Of course, users can use 
+        <a href="ext:api/org/apache/hadoop/conf/configuration/set">set(String, String)</a>/<a href="ext:api/org/apache/hadoop/conf/configuration/get">get(String, String)</a>
+        to set/get arbitrary parameters needed by applications. However, use the 
+        <code>DistributedCache</code> for large amounts of (read-only) data.</p>
+      </section>
+
+      <section>
+        <title>Job Submission and Monitoring</title>
+        
+        <p><a href="ext:api/org/apache/hadoop/mapred/jobclient">
+        JobClient</a> is the primary interface by which user-job interacts
+        with the <code>JobTracker</code>.</p>
+ 
+        <p><code>JobClient</code> provides facilities to submit jobs, track their 
+        progress, access component-tasks' reports/logs, get the Map-Reduce 
+        cluster's status information and so on.</p>
+ 
+        <p>The job submission process involves:</p>
+        <ol>
+          <li>Checking the input and output specifications of the job.</li>
+          <li>Computing the <code>InputSplit</code> values for the job.</li>
+          <li>
+            Setting up the requisite accounting information for the 
+            <code>DistributedCache</code> of the job, if necessary.
+          </li>
+          <li>
+            Copying the job's jar and configuration to the map-reduce system 
+            directory on the <code>FileSystem</code>.
+          </li>
+          <li>
+            Submitting the job to the <code>JobTracker</code> and optionally 
+            monitoring it's status.
+          </li>
+        </ol>
+  
+        <p>Normally the user creates the application, describes various facets 
+        of the job via <code>JobConf</code>, and then uses the 
+        <code>JobClient</code> to submit the job and monitor its progress.</p>
+
+        <section>
+          <title>Job Control</title>
+ 
+          <p>Users may need to chain map-reduce jobs to accomplish complex
+          tasks which cannot be done via a single map-reduce job. This is fairly
+          easy since the output of the job typically goes to distributed 
+          file-system, and the output, in turn, can be used as the input for the 
+          next job.</p>
+ 
+          <p>However, this also means that the onus on ensuring jobs are 
+          complete (success/failure) lies squarely on the clients. In such 
+          cases, the various job-control options are:</p>
+          <ul>
+            <li>
+              <a href="ext:api/org/apache/hadoop/mapred/jobclient/runjob">
+              runJob(JobConf)</a> : Submits the job and returns only after the 
+              job has completed.
+            </li>
+            <li>
+              <a href="ext:api/org/apache/hadoop/mapred/jobclient/submitjob">
+              submitJob(JobConf)</a> : Only submits the job, then poll the 
+              returned handle to the 
+              <a href="ext:api/org/apache/hadoop/mapred/runningjob">
+              RunningJob</a> to query status and make scheduling decisions.
+            </li>
+            <li>
+              <a href="ext:api/org/apache/hadoop/mapred/jobconf/setjobendnotificationuri">
+              JobConf.setJobEndNotificationURI(String)</a> : Sets up a 
+              notification upon job-completion, thus avoiding polling.
+            </li>
+          </ul>
+        </section>
+      </section>
+
+      <section>
+        <title>Job Input</title>
+        
+        <p><a href="ext:api/org/apache/hadoop/mapred/inputformat">
+        InputFormat</a> describes the input-specification for a Map-Reduce job.
+        </p> 
+ 
+        <p>The Map-Reduce framework relies on the <code>InputFormat</code> of 
+        the job to:</p>
+        <ol>
+          <li>Validate the input-specification of the job.</li>
+          <li>
+            Split-up the input file(s) into logical <code>InputSplit</code> 
+            instances, each of which is then assigned to an individual 
+            <code>Mapper</code>.
+          </li>
+          <li>
+            Provide the <code>RecordReader</code> implementation used to
+            glean input records from the logical <code>InputSplit</code> for 
+            processing by the <code>Mapper</code>.
+          </li>
+        </ol>
+ 
+        <p>The default behavior of file-based <code>InputFormat</code>
+        implementations, typically sub-classes of 
+        <a href="ext:api/org/apache/hadoop/mapred/fileinputformat">
+        FileInputFormat</a>, is to split the input into <em>logical</em> 
+        <code>InputSplit</code> instances based on the total size, in bytes, of 
+        the input files. However, the <code>FileSystem</code> blocksize of the 
+        input files is treated as an upper bound for input splits. A lower bound
+        on the split size can be set via <code>mapred.min.split.size</code>.</p>
+ 
+        <p>Clearly, logical splits based on input-size is insufficient for many
+        applications since record boundaries must be respected. In such cases, 
+        the application should implement a <code>RecordReader</code>, who is 
+        responsible for respecting record-boundaries and presents a 
+        record-oriented view of the logical <code>InputSplit</code> to the 
+        individual task.</p>
+
+        <p><a href="ext:api/org/apache/hadoop/mapred/textinputformat">
+        TextInputFormat</a> is the default <code>InputFormat</code>.
+        </p>
+        
+        <section>
+          <title>InputSplit</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/inputsplit">
+          InputSplit</a> represents the data to be processed by an individual 
+          <code>Mapper</code>.</p>
+
+          <p>Typically <code>InputSplit</code> presents a byte-oriented view of
+          the input, and it is the responsibility of <code>RecordReader</code>
+          to process and present a record-oriented view.</p>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/filesplit">
+          FileSplit</a> is the default <code>InputSplit</code>. It sets 
+          <code>map.input.file</code> to the path of the input file for the
+          logical split.</p>
+        </section>
+        
+        <section>
+          <title>RecordReader</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/recordreader">
+          RecordReader</a> reads <code>&lt;key, value&gt;</code> pairs from an 
+          <code>InputSplit</code>.</p>
+
+          <p>Typically the <code>RecordReader</code> converts the byte-oriented 
+          view of the input, provided by the <code>InputSplit</code>, and 
+          presents a record-oriented to the <code>Mapper</code> implementations 
+          for processing. <code>RecordReader</code> thus assumes the 
+          responsibility of processing record boundaries and presents the tasks 
+          with keys and values.</p>
+        </section>
+      </section>
+
+      <section>
+        <title>Job Output</title>
+        
+        <p><a href="ext:api/org/apache/hadoop/mapred/outputformat">
+        OutputFormat</a> describes the output-specification for a Map-Reduce 
+        job.</p>
+
+        <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of 
+        the job to:</p>
+        <ol>
+          <li>
+            Validate the output-specification of the job; for example, check that 
+            the output directory doesn't already exist.
+          </li>
+          <li>
+            Provide the <code>RecordWriter</code> implementation used to 
+            write the output files of the job. Output files are stored in a 
+            <code>FileSystem</code>.
+          </li>
+        </ol>
+ 
+        <p><code>TextOutputFormat</code> is the default 
+        <code>OutputFormat</code>.</p>
+ 
+        <section>
+          <title>Task Side-Effect Files</title>
+ 
+          <p>In some applications, component tasks need to create and/or write to
+          side-files, which differ from the actual job-output files.</p>
+ 
+          <p>In such cases there could be issues with two instances of the same 
+          <code>Mapper</code> or <code>Reducer</code> running simultaneously (for
+          example, speculative tasks) trying to open and/or write to the same 
+          file (path) on the <code>FileSystem</code>. Hence the 
+          application-writer will have to pick unique names per task-attempt 
+          (using the taskid, say <code>task_200709221812_0001_m_000000_0</code>), 
+          not just per task.</p> 
+ 
+          <p>To avoid these issues the Map-Reduce framework maintains a special 
+          <code>${mapred.output.dir}/_${taskid}</code> sub-directory for each 
+          task-attempt on the <code>FileSystem</code> where the output of the 
+          task-attempt is stored. On successful completion of the task-attempt, 
+          the files in the <code>${mapred.output.dir}/_${taskid}</code> (only) 
+          are <em>promoted</em> to <code>${mapred.output.dir}</code>. Of course, 
+          the framework discards the sub-directory of unsuccessful task-attempts. 
+          This process is completely transparent to the application.</p>
+ 
+          <p>The application-writer can take advantage of this feature by 
+          creating any side-files required in <code>${mapred.output.dir}</code> 
+          during execution of a task via 
+          <a href="ext:api/org/apache/hadoop/mapred/jobconf/getoutputpath">
+          JobConf.getOutputPath()</a>, and the framework will promote them 
+          similarly for succesful task-attempts, thus eliminating the need to 
+          pick unique paths per task-attempt.</p>
+        </section>
+        
+        <section>
+          <title>RecordWriter</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/recordwriter">
+          RecordWriter</a> writes the output <code>&lt;key, value&gt;</code> 
+          pairs to an output file.</p>
+
+          <p>RecordWriter implementations write the job outputs to the 
+          <code>FileSystem</code>.</p>
+        </section>
+      </section>
+      
+      <section>
+        <title>Other Useful Features</title>
+ 
+        <section>
+          <title>Counters</title>
+          
+          <p><code>Counters</code> represent global counters, defined either by 
+          the Map-Reduce framework or applications. Each <code>Counter</code> can 
+          be of any <code>Enum</code> type. Counters of a particular 
+          <code>Enum</code> are bunched into groups of type 
+          <code>Counters.Group</code>.</p>
+          
+          <p>Applications can define arbitrary <code>Counters</code> (of type 
+          <code>Enum</code>) and update them via 
+          <a href="ext:api/org/apache/hadoop/mapred/reporter/incrcounter">
+          Reporter.incrCounter(Enum, long)</a> in the <code>map</code> and/or 
+          <code>reduce</code> methods. These counters are then globally 
+          aggregated by the framework.</p>
+        </section>       
+        
+        <section>
+          <title>DistributedCache</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/filecache/distributedcache">
+          DistributedCache</a> distributes application-specific, large, read-only 
+          files efficiently.</p>
+ 
+          <p><code>DistributedCache</code> is a facility provided by the 
+          Map-Reduce framework to cache files (text, archives, jars and so on) 
+          needed by applications.</p>
+ 
+          <p>Applications specify the files to be cached via urls (hdfs:// or 
+          http://) in the <code>JobConf</code>. The <code>DistributedCache</code> 
+          assumes that the files specified via hdfs:// urls are already present 
+          on the <code>FileSystem</code>.</p>
+
+          <p>The framework will copy the necessary files to the slave node 
+          before any tasks for the job are executed on that node. Its 
+          efficiency stems from the fact that the files are only copied once 
+          per job and the ability to cache archives which are un-archived on 
+          the slaves.</p> 
+
+          <p><code>DistributedCache</code> can be used to distribute simple, 
+          read-only data/text files and more complex types such as archives and
+          jars. Archives (zip files) are <em>un-archived</em> at the slave nodes.
+          Jars maybe be optionally added to the classpath of the tasks, a
+          rudimentary <em>software distribution</em> mechanism.  Files have 
+          <em>execution permissions</em> set. Optionally users can also direct the
+          <code>DistributedCache</code> to <em>symlink</em> the cached file(s) 
+          into the working directory of the task.</p>
+ 
+          <p><code>DistributedCache</code> tracks the modification timestamps of 
+          the cached files. Clearly the cache files should not be modified by 
+          the application or externally while the job is executing.</p>
+        </section>
+        
+        <section>
+          <title>Tool</title>
+          
+          <p>The <a href="ext:api/org/apache/hadoop/util/tool">Tool</a> 
+          interface supports the handling of generic Hadoop command-line options.
+          </p>
+          
+          <p><code>Tool</code> is the standard for any Map-Reduce tool or 
+          application. The application should delegate the handling of 
+          standard command-line options to 
+          <a href="ext:api/org/apache/hadoop/util/genericoptionsparser">
+          GenericOptionsParser</a> via          
+          <a href="ext:api/org/apache/hadoop/util/toolrunner/run">
+          ToolRunner.run(Tool, String[])</a> and only handle its custom 
+          arguments.</p>
+          
+          <p>
+            The generic Hadoop command-line options are:<br/>
+            <code>
+              -conf &lt;configuration file&gt;
+            </code>
+            <br/>
+            <code>
+              -D &lt;property=value&gt;
+            </code>
+            <br/>
+            <code>
+              -fs &lt;local|namenode:port&gt;
+            </code>
+            <br/>
+            <code>
+              -jt &lt;local|jobtracker:port&gt;
+            </code>
+          </p>
+        </section>
+        
+        <section>
+          <title>IsolationRunner</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/isolationrunner">
+          IsolationRunner</a> is a utility to help debug Map-Reduce programs.</p>
+          
+          <p>To use the <code>IsolationRunner</code>, first set 
+          <code>keep.failed.tasks.files</code> to <code>true</code> 
+          (also see <code>keep.tasks.files.pattern</code>).</p>
+          
+          <p>
+            Next, go to the node on which the failed task ran and go to the 
+            <code>TaskTracker</code>'s local directory and run the 
+            <code>IsolationRunner</code>:<br/>
+            <code>$ cd &lt;local path&gt;/taskTracker/${taskid}/work</code><br/>
+            <code>
+              $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
+            </code>
+          </p>
+          
+          <p><code>IsolationRunner</code> will run the failed task in a single 
+          jvm, which can be in the debugger, over precisely the same input.</p>
+        </section>
+        
+        <section>
+          <title>JobControl</title>
+          
+          <p><a href="ext:api/org/apache/hadoop/mapred/jobcontrol/package-summary">
+          JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
+          and their dependencies.</p>
+        </section>
+      </section>
+    </section>
+
+    <section>
+      <title>Example: WordCount v2.0</title>
+      
+      <p>Here is a more complete <code>WordCount</code> which uses many of the
+      features provided by the Map-Reduce framework we discussed so far:</p>
+      
+      <section>
+        <title>Source Code</title>
+        
+        <table>
+          <tr>
+            <th></th>
+            <th>WordCount.java</th>
+          </tr>
+          <tr>
+            <td>1.</td>
+            <td>
+              <code>package org.myorg;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>2.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>3.</td>
+            <td>
+              <code>import java.io.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>4.</td>
+            <td>
+              <code>import java.util.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>5.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>6.</td>
+            <td>
+              <code>import org.apache.hadoop.fs.Path;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>7.</td>
+            <td>
+              <code>import org.apache.hadoop.filecache.DistributedCache;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>8.</td>
+            <td>
+              <code>import org.apache.hadoop.conf.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>9.</td>
+            <td>
+              <code>import org.apache.hadoop.io.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>10.</td>
+            <td>
+              <code>import org.apache.hadoop.mapred.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>11.</td>
+            <td>
+              <code>import org.apache.hadoop.util.*;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>12.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>13.</td>
+            <td>
+              <code>public class WordCount extends Configured implements Tool {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>14.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>15.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static class MapClass extends MapReduceBase 
+                implements Mapper&lt;LongWritable, Text, Text, IntWritable&gt; {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>16.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>17.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                static enum Counters { INPUT_WORDS }
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>18.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>19.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                private final static IntWritable one = new IntWritable(1);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>20.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private Text word = new Text();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>21.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>22.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private boolean caseSensitive = true;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>23.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private Set&lt;String&gt; patternsToSkip = new HashSet&lt;String&gt;();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>24.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>25.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private long numRecords = 0;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>26.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private String inputFile;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>27.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>28.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>public void configure(JobConf job) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>29.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>30.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>inputFile = job.get("map.input.file");</code>
+            </td>
+          </tr>
+          <tr>
+            <td>31.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>32.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>Path[] patternsFiles = new Path[0];</code>
+            </td>
+          </tr>
+          <tr>
+            <td>33.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>try {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>34.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                patternsFiles = DistributedCache.getLocalCacheFiles(job);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>35.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>} catch (IOException ioe) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>36.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                System.err.println("Caught exception while getting cached files: " 
+                + StringUtils.stringifyException(ioe));
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>37.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>38.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>for (Path patternsFile : patternsFiles) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>39.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>parseSkipFile(patternsFile);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>40.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>41.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>42.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>43.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>private void parseSkipFile(Path patternsFile) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>44.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>try {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>45.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                BufferedReader fis = 
+                  new BufferedReader(new FileReader(patternsFile.toString()));
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>46.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>String pattern = null;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>47.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>while ((pattern = fis.readLine()) != null) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>48.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>patternsToSkip.add(pattern);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>49.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>50.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>} catch (IOException ioe) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>51.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                System.err.println("Caught exception while parsing the cached file '" +
+                                   patternsFile + "' : " + 
+                                   StringUtils.stringifyException(ioe));
+                
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>52.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>53.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>54.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>55.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                public void map(LongWritable key, Text value, 
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>56.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                String line = 
+                  (caseSensitive) ? value.toString() : 
+                                    value.toString().toLowerCase();
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>57.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>58.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>for (String pattern : patternsToSkip) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>59.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>line = line.replaceAll(pattern, "");</code>
+            </td>
+          </tr>
+          <tr>
+            <td>60.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>61.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>62.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>StringTokenizer tokenizer = new StringTokenizer(line);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>63.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>while (tokenizer.hasMoreTokens()) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>64.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>word.set(tokenizer.nextToken());</code>
+            </td>
+          </tr>
+          <tr>
+            <td>65.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>output.collect(word, one);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>66.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>reporter.incrCounter(Counters.INPUT_WORDS, 1);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>67.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>68.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>69.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>if ((++numRecords % 100) == 0) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>70.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                reporter.setStatus("Finished processing " + numRecords + 
+                                   " records " + "from the input file: " + 
+                                   inputFile);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>71.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>72.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>73.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>74.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>75.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static class Reduce extends MapReduceBase implements 
+                Reducer&lt;Text, IntWritable, Text, IntWritable&gt; {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>76.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                public void reduce(Text key, Iterator&lt;IntWritable&gt; values,
+                OutputCollector&lt;Text, IntWritable&gt; output, 
+                Reporter reporter) throws IOException {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>77.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>int sum = 0;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>78.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>while (values.hasNext()) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>79.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>sum += values.next().get();</code>
+            </td>
+          </tr>
+          <tr>
+            <td>80.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>81.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>output.collect(key, new IntWritable(sum));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>82.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>83.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>84.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>85.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>public int run(String[] args) throws Exception {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>86.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                JobConf conf = new JobConf(getConf(), WordCount.class);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>87.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setJobName("wordcount");</code>
+            </td>
+          </tr>
+          <tr>
+            <td>88.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>89.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputKeyClass(Text.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>90.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputValueClass(IntWritable.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>91.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>92.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setMapperClass(MapClass.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>93.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setCombinerClass(Reduce.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>94.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setReducerClass(Reduce.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>95.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>96.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setInputFormat(TextInputFormat.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>97.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputFormat(TextOutputFormat.class);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>98.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>99.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                List&lt;String&gt; other_args = new ArrayList&lt;String&gt;();
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>100.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>for (int i=0; i &lt; args.length; ++i) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>101.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>if ("-skip".equals(args[i]) {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>102.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>103.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>} else {</code>
+            </td>
+          </tr>
+          <tr>
+            <td>104.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>other_args.add(args[i]);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>105.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>106.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>107.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>108.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setInputPath(new Path(other_args[0]));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>109.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>conf.setOutputPath(new Path(other_args[1]));</code>
+            </td>
+          </tr>
+          <tr>
+            <td>110.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>111.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>JobClient.runJob(conf);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>112.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>return 0;</code>
+            </td>
+          </tr>
+          <tr>
+            <td>113.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>114.</td>
+            <td></td>
+          </tr>
+          <tr>
+            <td>115.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>
+                public static void main(String[] args) throws Exception {
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>116.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>
+                int res = ToolRunner.run(new Configuration(), new WordCount(), 
+                                         args);
+              </code>
+            </td>
+          </tr>
+          <tr>
+            <td>117.</td>
+            <td>
+              &nbsp;&nbsp;&nbsp;&nbsp;
+              <code>System.exit(res);</code>
+            </td>
+          </tr>
+          <tr>
+            <td>118.</td>
+            <td>
+              &nbsp;&nbsp;
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>119.</td>
+            <td>
+              <code>}</code>
+            </td>
+          </tr>
+          <tr>
+            <td>120.</td>
+            <td></td>
+          </tr>
+        </table>
+      </section>
+        
+      <section>
+        <title>Sample Runs</title>
+        
+        <p>Sample text-files as input:</p>
+        <p>
+          <code>$ bin/hadoop dfs -ls /usr/joe/wordcount/input/</code><br/>
+          <code>/usr/joe/wordcount/input/file01</code><br/>
+          <code>/usr/joe/wordcount/input/file02</code><br/>
+          <br/>
+          <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01</code><br/>
+          <code>Hello World, Bye World!</code><br/>
+          <br/>
+          <code>$ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02</code><br/>
+          <code>Hello Hadoop, Goodbye the Hadoop.</code>
+        </p>
+        
+        <p>Run the application:</p>
+        <p>
+          <code>
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              /usr/joe/wordcount/input /usr/joe/wordcount/output 
+          </code>
+        </p>
+
+        <p>Output:</p>
+        <p>
+          <code>
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </code>
+          <br/>
+          <code>Bye    1</code><br/>
+          <code>Goodbye    1</code><br/>
+          <code>Hadoop,    1</code><br/>
+          <code>Hadoop.    1</code><br/>
+          <code>Hello    2</code><br/>
+          <code>World!    1</code><br/>
+          <code>World,    1</code><br/>
+          <code>the    1</code><br/>
+        </p>
+        
+        <p>Notice that the inputs differ from the first version we looked at, 
+        and how they affect the outputs.</p>
+
+        <p>Now, lets plug-in a pattern-file which lists the word-patterns to be 
+        ignored, via the <code>DistributedCache</code>.</p>
+        
+        <p>
+          <code>$ hadoop dfs -cat /user/joe/wordcount/patterns.txt</code><br/>
+          <code>\.</code><br/>
+          <code>\,</code><br/>
+          <code>\!</code><br/>
+          <code>the</code><br/>
+        </p>
+        
+        <p>Run it again, this time with more options:</p>
+        <p>
+          <code>
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              -Dwordcount.case.sensitive=true /usr/joe/wordcount/input 
+              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
+          </code>
+        </p>
+        
+        <p>As expected, the output:</p>
+        <p>
+          <code>
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </code>
+          <br/>
+          <code>Bye    1</code><br/>
+          <code>Goodbye    1</code><br/>
+          <code>Hadoop    2</code><br/>
+          <code>Hello    2</code><br/>
+          <code>World    2</code><br/>
+        </p>
+        
+        <p>Run it once more, this time switch-off case-sensitivity:</p>
+        <p>
+          <code>
+            $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount 
+              -Dwordcount.case.sensitive=false /usr/joe/wordcount/input 
+              /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt
+          </code>
+        </p>
+        
+        <p>Sure enough, the output:</p>
+        <p>
+          <code>
+            $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000
+          </code>
+          <br/>
+          <code>bye    1</code><br/>
+          <code>goodbye    1</code><br/>
+          <code>hadoop    2</code><br/>
+          <code>hello    2</code><br/>
+          <code>world    2</code><br/>
+        </p>
+      </section>
+      
+      <section>
+        <title>Salient Points</title>
+        
+        <p>The second version of <code>WordCount</code> improves upon the 
+        previous one by using some features offered by the Map-Reduce framework:
+        </p>
+        <ul>
+          <li>
+            Demonstrates how applications can access configuration parameters
+            in the <code>configure</code> method of the <code>Mapper</code> (and
+            <code>Reducer</code>) implementations (lines 28-41).
+          </li>
+          <li>
+            Demonstrates how the <code>DistributedCache</code> can be used to 
+            distribute read-only data needed by the jobs. Here it allows the user 
+            to specify word-patterns to skip while counting (line 102).
+          </li>
+          <li>
+            Demonstrates the utility of the <code>Tool</code> interface and the
+            <code>GenericOptionsParser</code> to handle generic Hadoop 
+            command-line options (lines 85-86, 116).
+          </li>
+          <li>
+            Demonstrates how applications can use <code>Counters</code> (line 66)
+            and how they can set application-specific status information via 
+            the <code>Reporter</code> instance passed to the <code>map</code> (and
+            <code>reduce</code>) method (line 70).
+          </li>
+        </ul>
+        
+      </section>
+    </section>
+
+    <p>
+      <em>Java and JNI are trademarks or registered trademarks of 
+      Sun Microsystems, Inc. in the United States and other countries.</em>
+    </p>
+    
+  </body>
+  
+</document>

+ 255 - 0
src/docs/src/documentation/content/xdocs/quickstart.xml

@@ -0,0 +1,255 @@
+<?xml version="1.0"?>
+
+<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
+
+<document>
+  
+  <header>
+    <title>Hadoop Quickstart</title>
+  </header>
+  
+  <body>
+  
+    <section>
+      <title>Purpose</title>
+      
+      <p>The purpose of this document is to help users get a single-node Hadoop 
+      installation up and running very quickly so that users can get a flavour 
+      of the <a href="hdfs_design.html">Hadoop Distributed File System 
+      (<acronym title="Hadoop Distributed File System">HDFS</acronym>)</a> and 
+      the Map-Reduce framework i.e. perform simple operations on HDFS, run 
+      example/simple jobs etc.</p>
+    </section>
+    
+    <section id="PreReqs">
+      <title>Pre-requisites</title>
+      
+      <section>
+        <title>Supported Platforms</title>
+        
+        <ul>
+          <li>
+            Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
+          </li>
+          <li>
+            Win32 is supported as a <em>development platform</em>. Distributed 
+            operation has not been well tested on Win32, so this is not a 
+            <em>production platform</em>.
+          </li>
+        </ul>        
+      </section>
+      
+      <section>
+        <title>Required Software</title>
+        
+        <ol>
+          <li>
+            Java<sup>TM</sup> 1.5.x, preferably from Sun, must be installed. Set 
+            <code>JAVA_HOME</code> to the root of your Java installation.
+          </li>
+          <li>
+            <strong>ssh</strong> must be installed and <strong>sshd</strong> must 
+            be running to use the Hadoop scripts that manage remote Hadoop 
+            daemons.
+          </li>
+        </ol>
+        
+        <section>
+          <title>Additional requirements for Windows</title>
+          
+          <ol>
+            <li>
+              <a href="http://www.cygwin.com/">Cygwin</a> - Required for shell 
+              support in addition to the required software above. 
+            </li>
+          </ol>
+        </section>
+        
+      </section>
+
+      <section>
+        <title>Installing Software</title>
+          
+        <p>If your cluster doesn't have the requisite software you will need to
+        install it.</p>
+          
+        <p>For example on Ubuntu Linux:</p>
+        <p>
+          <code>$ sudo apt-get install ssh</code><br/>
+          <code>$ sudo apt-get install rsync</code>
+        </p>
+          
+        <p>On Windows, if you did not install the required software when you 
+        installed cygwin, start the cygwin installer and select the packages:</p>
+        <ul>
+          <li>openssh - the <em>Net</em> category</li>
+        </ul>
+      </section>
+      
+    </section>
+    
+    <section>
+      <title>Download</title>
+      
+      <p>
+        First, you need to get a Hadoop distribution: download a recent 
+        <a href="releases.html">stable release</a> and unpack it.
+      </p>
+
+      <p>
+        Once done, in the distribution edit the file 
+        <code>conf/hadoop-env.sh</code> to define at least <code>JAVA_HOME</code>.
+      </p>
+
+	  <p>
+	    Try the following command:<br/>
+        <code>$ bin/hadoop</code><br/>
+        This will display the usage documentation for the <strong>hadoop</strong> 
+        script.
+      </p>
+    </section>
+    
+    <section>
+      <title>Standalone Operation</title>
+      
+      <p>By default, Hadoop is configured to run things in a non-distributed 
+      mode, as a single Java process. This is useful for debugging.</p>
+      
+      <p>
+        The following example copies the unpacked <code>conf</code> directory to 
+        use as input and then finds and displays every match of the given regular 
+        expression. Output is written to the given <code>output</code> directory.
+        <br/>
+        <code>$ mkdir input</code><br/>
+        <code>$ cp conf/*.xml input</code><br/>
+        <code>
+          $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+        </code><br/>
+        <code>$ cat output/*</code>
+      </p>
+    </section>
+    
+    <section id="SingleNodeSetup">
+      <title>Pseudo-Distributed Operation</title>
+
+	  <p>Hadoop can also be run on a single-node in a pseudo-distributed mode 
+	  where each Hadoop daemon runs in a separate Java process.</p>
+	  
+      <section>
+        <title>Configuration</title>
+        <p>Use the following <code>conf/hadoop-site.xml</code>:</p>
+        <table>
+        <tr><td>&lt;configuration&gt;</td></tr>
+
+          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;fs.default.name&lt;/name&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9000&lt;/value&gt;</td></tr>
+          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
+
+          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;mapred.job.tracker&lt;/name&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;localhost:9001&lt;/value&gt;</td></tr>
+          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
+
+          <tr><td>&nbsp;&nbsp;&lt;property&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;name&gt;dfs.replication&lt;/name&gt;</td></tr>
+            <tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&lt;value&gt;1&lt;/value&gt;</td></tr>
+          <tr><td>&nbsp;&nbsp;&lt;/property&gt;</td></tr>
+
+        <tr><td>&lt;/configuration&gt;</td></tr>
+        </table>
+      </section>
+
+      <section>
+        <title>Setup passphraseless <em>ssh</em></title>
+        
+        <p>
+          Now check that you can ssh to the localhost without a passphrase:<br/>
+          <code>$ ssh localhost</code>
+        </p>
+        
+        <p>
+          If you cannot ssh to localhost without a passphrase, execute the 
+          following commands:<br/>
+   		  <code>$ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa</code><br/>
+		  <code>$ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys</code>
+		</p>
+      </section>
+    
+      <section>
+        <title>Execution</title>
+        
+        <p>
+          Format a new distributed-filesystem:<br/>
+          <code>$ bin/hadoop namenode -format</code>
+        </p>
+
+		<p>
+		  Start The hadoop daemons:<br/>
+          <code>$ bin/start-all.sh</code>
+        </p>
+
+        <p>The hadoop daemon log output is written to the 
+        <code>${HADOOP_LOG_DIR}</code> directory (defaults to 
+        <code>${HADOOP_HOME}/logs</code>).</p>
+
+        <p>Browse the web-interface for the NameNode and the JobTracker, by
+        default they are available at:</p>
+        <ul>
+          <li>
+            <code>NameNode</code> - 
+            <a href="http://localhost:50070/">http://localhost:50070/</a>
+          </li>
+          <li>
+            <code>JobTracker</code> - 
+            <a href="http://localhost:50030/">http://localhost:50030/</a>
+          </li>
+        </ul>
+        
+        <p>
+          Copy the input files into the distributed filesystem:<br/>
+		  <code>$ bin/hadoop dfs -put conf input</code>
+		</p>
+		
+        <p>
+          Run some of the examples provided:<br/>
+          <code>
+            $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
+          </code>
+        </p>
+        
+        <p>Examine the output files:</p>
+        <p>
+          Copy the output files from the distributed filesystem to the local 
+          filesytem and examine them:<br/>
+          <code>$ bin/hadoop dfs -get output output</code><br/>
+          <code>$ cat output/*</code>
+        </p>
+        <p> or </p>
+        <p>
+          View the output files on the distributed filesystem:<br/>
+          <code>$ bin/hadoop dfs -cat output/*</code>
+        </p>
+
+		<p>
+		  When you're done, stop the daemons with:<br/>
+		  <code>$ bin/stop-all.sh</code>
+		</p>
+      </section>
+    </section>
+    
+    <section>
+      <title>Fully-Distributed Operation</title>
+      
+	  <p>Information on setting up fully-distributed non-trivial clusters
+	  can be found <a href="cluster_setup.html">here</a>.</p>  
+    </section>
+    
+    <p>
+      <em>Java and JNI are trademarks or registered trademarks of 
+      Sun Microsystems, Inc. in the United States and other countries.</em>
+    </p>
+    
+  </body>
+  
+</document>

+ 115 - 5
src/docs/src/documentation/content/xdocs/site.xml

@@ -24,9 +24,12 @@ See http://forrest.apache.org/docs/linking.html for more info.
   </project>
 
   <docs label="Documentation"> 
-    <hdfs      label="Hadoop File System" href="hdfs_design.html" />
-    <install   label="Install and Configure" href="ext:overview" />
-    <api       label="API Docs"           href="ext:api" />
+    <overview  label="Overview"           href="documentation.html" />
+    <quickstart label="Quickstart"        href="quickstart.html" />
+    <setup     label="Cluster Setup"      href="cluster_setup.html" />
+    <hdfs      label="HDFS Architecture"  href="hdfs_design.html" />
+    <mapred    label="Map-Reduce Tutorial" href="mapred_tutorial.html" />
+    <api       label="API Docs"           href="ext:api/index" />
     <wiki      label="Wiki"               href="ext:wiki" />
     <faq       label="FAQ"                href="ext:faq" />
     <usermail  label="Mailing Lists"      href="mailing_lists.html#Users" />
@@ -46,10 +49,117 @@ See http://forrest.apache.org/docs/linking.html for more info.
     <nightly   href="http://lucene.zones.apache.org:8080/hudson/job/Hadoop-Nightly/" />
     <releases  href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/" />
     <store     href="http://www.cafepress.com/hadoop/" />
-    <api       href="api/index.html" />
-    <overview  href="api/overview-summary.html#overview_description" />
     <lucene    href="http://lucene.apache.org/" />
     <nutch     href="http://lucene.apache.org/nutch/" />
+    <hadoop-default href="http://lucene.apache.org/hadoop/hadoop-default.html" />
+    <api href="api/">
+      <index href="index.html" />
+      <org href="org/">
+        <apache href="apache/">
+          <hadoop href="hadoop/">
+            <conf href="conf/">
+              <configuration href="Configuration.html">
+                <final href="#FinalParams" />
+                <get href="#get(java.lang.String, java.lang.String)" />
+                <set href="#set(java.lang.String, java.lang.String)" />
+              </configuration>
+            </conf>
+            <filecache href="filecache/">
+              <distributedcache href="DistributedCache.html" />
+            </filecache>
+            <fs href="fs/">
+              <filesystem href="FileSystem.html" />
+            </fs>
+            <io href="io/">
+              <closeable href="Closeable.html">
+                <close href="#close()" />
+              </closeable>
+              <sequencefile href="SequenceFile.html" />
+              <writable href="Writable.html" />
+              <writablecomparable href="WritableComparable.html" />
+              <compress href="compress/">
+                <compressioncodec href="CompressionCodec.html" />
+              </compress>
+            </io>
+            <mapred href="mapred/">
+              <clusterstatus href="ClusterStatus.html" />
+              <counters href="Counters.html" />
+              <fileinputformat href="FileInputFormat.html" />
+              <filesplit href="FileSplit.html" />
+              <inputformat href="InputFormat.html" />
+              <inputsplit href="InputSplit.html" />
+              <isolationrunner href="IsolationRunner.html" />
+              <jobclient href="JobClient.html">
+                <runjob href="#runJob(org.apache.hadoop.mapred.JobConf)" />
+                <submitjob href="#submitJob(org.apache.hadoop.mapred.JobConf)" />
+              </jobclient>
+              <jobconf href="JobConf.html">
+                <setnummaptasks href="#setNumMapTasks(int)" />
+                <setnumreducetasks href="#setNumReduceTasks(int)" />
+                <setoutputkeycomparatorclass href="#setOutputKeyComparatorClass(java.lang.Class)" />
+                <setoutputvaluegroupingcomparator href="#setOutputValueGroupingComparator(java.lang.Class)" />
+                <setinputpath href="#setInputPath(org.apache.hadoop.fs.Path)" />
+                <addinputpath href="#addInputPath(org.apache.hadoop.fs.Path)" />
+                <getoutputpath href="#getOutputPath()" />
+                <setoutputpath href="#setOutputPath(org.apache.hadoop.fs.Path)" />
+                <setcombinerclass href="#setCombinerClass(java.lang.Class)" />
+                <setmapdebugscript href="#setMapDebugScript(java.lang.String)" />
+                <setreducedebugscript href="#setReduceDebugScript(java.lang.String)" />
+                <setspeculativeexecution href="#setSpeculativeExecution(boolean)" />
+                <setmaxmapattempts href="#setMaxMapAttempts(int)" />
+                <setmaxreduceattempts href="#setMaxReduceAttempts(int)" />
+                <setmaxmaptaskfailurespercent href="#setMaxMapTaskFailuresPercent(int)" />
+                <setmaxreducetaskfailurespercent href="#setMaxReduceTaskFailuresPercent(int)" />
+                <setjobendnotificationuri href="#setJobEndNotificationURI(java.lang.String)" />
+              </jobconf>
+              <jobconfigurable href="JobConfigurable.html">
+                <configure href="#configure(org.apache.hadoop.mapred.JobConf)" />
+              </jobconfigurable>
+              <jobcontrol href="jobcontrol/">
+                <package-summary href="package-summary.html" />
+              </jobcontrol>
+              <mapper href="Mapper.html">
+                <map href="#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
+              </mapper>
+              <outputcollector href="OutputCollector.html">
+                <collect href="#collect(K, V)" />
+              </outputcollector>
+              <outputformat href="OutputFormat.html" />
+              <partitioner href="Partitioner.html" />
+              <recordreader href="RecordReader.html" />
+              <recordwriter href="RecordWriter.html" />
+              <reducer href="Reducer.html">
+                <reduce href="#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
+              </reducer>
+              <reporter href="Reporter.html">
+                <incrcounter href="#incrCounter(java.lang.Enum, long)" />
+              </reporter>
+              <runningjob href="RunningJob.html" />
+              <textinputformat href="TextInputFormat.html" />
+              <textoutputformat href="TextOutputFormat.html" />
+              <lib href="lib/">
+                <package-summary href="package-summary.html" />
+                <hashpartitioner href="HashPartitioner.html" />
+              </lib>
+              <pipes href="pipes/">
+                <package-summary href="package-summary.html" />
+              </pipes>
+            </mapred>
+            <streaming href="streaming/">
+              <package-summary href="package-summary.html" />
+            </streaming>
+            <util href="util/">
+              <genericoptionsparser href="GenericOptionsParser.html" />
+              <progress href="Progress.html" />
+              <tool href="Tool.html" />
+              <toolrunner href="ToolRunner.html">
+                <run href="#run(org.apache.hadoop.util.Tool, java.lang.String[])" />
+              </toolrunner>
+            </util>
+          </hadoop>
+        </apache>
+      </org>
+    </api>
   </external-refs>
  
 </site>

Some files were not shown because too many files changed in this diff