Browse Source

HADOOP-3317. Add default port for HDFS namenode.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@653951 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 17 years ago
parent
commit
230ac40185
46 changed files with 593 additions and 512 deletions
  1. 4 0
      CHANGES.txt
  2. 42 16
      docs/changes.html
  3. 13 14
      docs/cluster_setup.html
  4. 3 3
      docs/cluster_setup.pdf
  5. 25 7
      docs/hadoop-default.html
  6. 2 2
      docs/hdfs_design.html
  7. 3 3
      docs/hdfs_permissions_guide.html
  8. 124 188
      docs/hdfs_shell.html
  9. 32 32
      docs/hdfs_shell.pdf
  10. 4 3
      docs/hdfs_user_guide.html
  11. 1 1
      docs/hdfs_user_guide.pdf
  12. 2 2
      docs/hod.html
  13. 2 2
      docs/hod_admin_guide.html
  14. 2 2
      docs/hod_config_guide.html
  15. 2 2
      docs/hod_user_guide.html
  16. 2 2
      docs/index.html
  17. 4 4
      docs/linkmap.html
  18. 12 12
      docs/linkmap.pdf
  19. 2 2
      docs/mapred_tutorial.html
  20. 2 2
      docs/native_libraries.html
  21. 2 2
      docs/quickstart.html
  22. BIN
      docs/skin/images/rc-b-l-15-1body-2menu-3menu.png
  23. BIN
      docs/skin/images/rc-b-r-15-1body-2menu-3menu.png
  24. BIN
      docs/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png
  25. BIN
      docs/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png
  26. BIN
      docs/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png
  27. BIN
      docs/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png
  28. BIN
      docs/skin/images/rc-t-r-15-1body-2menu-3menu.png
  29. BIN
      docs/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png
  30. BIN
      docs/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png
  31. BIN
      docs/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png
  32. 3 3
      docs/streaming.html
  33. 89 89
      docs/streaming.pdf
  34. 3 3
      src/docs/src/documentation/content/xdocs/cluster_setup.xml
  35. 66 54
      src/docs/src/documentation/content/xdocs/hdfs_shell.xml
  36. 1 1
      src/docs/src/documentation/content/xdocs/site.xml
  37. 1 1
      src/docs/src/documentation/content/xdocs/streaming.xml
  38. 2 6
      src/java/org/apache/hadoop/dfs/DFSClient.java
  39. 1 2
      src/java/org/apache/hadoop/dfs/DataNode.java
  40. 8 10
      src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
  41. 18 3
      src/java/org/apache/hadoop/dfs/NameNode.java
  42. 2 2
      src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
  43. 24 5
      src/java/org/apache/hadoop/net/NetUtils.java
  44. 30 30
      src/java/overview.html
  45. 1 2
      src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
  46. 59 0
      src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java

+ 4 - 0
CHANGES.txt

@@ -55,6 +55,10 @@ Trunk (unreleased changes)
     HADOOP-2857. Allow libhdfs to set jvm options. (Craig Macdonald
     HADOOP-2857. Allow libhdfs to set jvm options. (Craig Macdonald
     via omalley)
     via omalley)
 
 
+    HADOOP-3317. Add default port for HDFS namenode.  The port in
+    "hdfs:" URIs now defaults to 8020, so that one may simply use URIs
+    of the form "hdfs://example.com/dir/file".
+
   IMPROVEMENTS
   IMPROVEMENTS
    
    
     HADOOP-2928. Remove deprecated FileSystem.getContentLength().
     HADOOP-2928. Remove deprecated FileSystem.getContentLength().

+ 42 - 16
docs/changes.html

@@ -56,7 +56,7 @@
 </a></h2>
 </a></h2>
 <ul id="trunk_(unreleased_changes)_">
 <ul id="trunk_(unreleased_changes)_">
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._incompatible_changes_')">  INCOMPATIBLE CHANGES
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._incompatible_changes_')">  INCOMPATIBLE CHANGES
-</a>&nbsp;&nbsp;&nbsp;(5)
+</a>&nbsp;&nbsp;&nbsp;(7)
     <ol id="trunk_(unreleased_changes)_._incompatible_changes_">
     <ol id="trunk_(unreleased_changes)_._incompatible_changes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2703">HADOOP-2703</a>.  The default options to fsck skips checking files
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2703">HADOOP-2703</a>.  The default options to fsck skips checking files
 that are being written to. The output of fsck is incompatible
 that are being written to. The output of fsck is incompatible
@@ -80,6 +80,14 @@ TaskCompletionEvent that use string arguments are deprecated in favor
 of the corresponding ones that use ID objects. Applications can use
 of the corresponding ones that use ID objects. Applications can use
 xxxID.toString() and xxxID.forName() methods to convert/restore objects
 xxxID.toString() and xxxID.forName() methods to convert/restore objects
 to/from strings.<br />(Enis Soztutar via ddas)</li>
 to/from strings.<br />(Enis Soztutar via ddas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2188">HADOOP-2188</a>. RPC client sends a ping rather than throw timeouts.
+RPC server does not throw away old RPCs. If clients and the server are on
+different versions, they are not able to function well. In addition,
+The property ipc.client.timeout is removed from the default hadoop
+configuration. It also removes metrics RpcOpsDiscardedOPsNum.<br />(hairong)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2181">HADOOP-2181</a>. This issue adds logging for input splits in Jobtracker log
+and jobHistory log. Also adds web UI for viewing input splits in job UI
+and history UI.<br />(Amareshwari Sriramadasu via ddas)</li>
     </ol>
     </ol>
   </li>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._new_features_')">  NEW FEATURES
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._new_features_')">  NEW FEATURES
@@ -96,7 +104,7 @@ via omalley)</li>
     </ol>
     </ol>
   </li>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._improvements_')">  IMPROVEMENTS
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(7)
+</a>&nbsp;&nbsp;&nbsp;(10)
     <ol id="trunk_(unreleased_changes)_._improvements_">
     <ol id="trunk_(unreleased_changes)_._improvements_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2928">HADOOP-2928</a>. Remove deprecated FileSystem.getContentLength().<br />(Lohit Vjayarenu via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2928">HADOOP-2928</a>. Remove deprecated FileSystem.getContentLength().<br />(Lohit Vjayarenu via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3130">HADOOP-3130</a>. Make the connect timeout smaller for getFile.<br />(Amar Ramesh Kamat via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3130">HADOOP-3130</a>. Make the connect timeout smaller for getFile.<br />(Amar Ramesh Kamat via ddas)</li>
@@ -113,10 +121,18 @@ partition.<br />(cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2461">HADOOP-2461</a>. Trim property names in configuration.
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2461">HADOOP-2461</a>. Trim property names in configuration.
 (Tsz Wo (Nicholas), SZE via shv)
 (Tsz Wo (Nicholas), SZE via shv)
 </li>
 </li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2799">HADOOP-2799</a>. Deprecate o.a.h.io.Closable in favor of java.io.Closable.
+(Tsz Wo (Nicholas), SZE via cdouglas)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3345">HADOOP-3345</a>. Enhance the hudson-test-patch target to cleanup messages,
+fix minor defects, and add eclipse plugin and python unit tests.<br />(nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3144">HADOOP-3144</a>. Improve robustness of LineRecordReader by defining a maximum
+line length (mapred.linerecordreader.maxlength), thereby avoiding reading
+too far into the following split.<br />(Zheng Shao via cdouglas)</li>
     </ol>
     </ol>
   </li>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._optimizations_')">  OPTIMIZATIONS
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._optimizations_')">  OPTIMIZATIONS
-</a>&nbsp;&nbsp;&nbsp;(4)
+</a>&nbsp;&nbsp;&nbsp;(5)
     <ol id="trunk_(unreleased_changes)_._optimizations_">
     <ol id="trunk_(unreleased_changes)_._optimizations_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3274">HADOOP-3274</a>. The default constructor of BytesWritable creates empty
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3274">HADOOP-3274</a>. The default constructor of BytesWritable creates empty
 byte array. (Tsz Wo (Nicholas), SZE via shv)
 byte array. (Tsz Wo (Nicholas), SZE via shv)
@@ -127,10 +143,11 @@ Vijaya Renu via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3164">HADOOP-3164</a>. Reduce DataNode CPU usage by using FileChannel.tranferTo().
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3164">HADOOP-3164</a>. Reduce DataNode CPU usage by using FileChannel.tranferTo().
 On Linux DataNode takes 5 times less CPU while serving data. Results may
 On Linux DataNode takes 5 times less CPU while serving data. Results may
 vary on other platforms.<br />(rangadi)</li>
 vary on other platforms.<br />(rangadi)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3248">HADOOP-3248</a>. Optimization of saveFSImage.<br />(Dhruba via shv)</li>
     </ol>
     </ol>
   </li>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._bug_fixes_')">  BUG FIXES
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(14)
+</a>&nbsp;&nbsp;&nbsp;(23)
     <ol id="trunk_(unreleased_changes)_._bug_fixes_">
     <ol id="trunk_(unreleased_changes)_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2905">HADOOP-2905</a>. 'fsck -move' triggers NPE in NameNode.<br />(Lohit Vjayarenu via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2905">HADOOP-2905</a>. 'fsck -move' triggers NPE in NameNode.<br />(Lohit Vjayarenu via rangadi)</li>
       <li>Increment ClientProtocol.versionID missed by <a href="http://issues.apache.org/jira/browse/HADOOP-2585">HADOOP-2585</a>.<br />(shv)</li>
       <li>Increment ClientProtocol.versionID missed by <a href="http://issues.apache.org/jira/browse/HADOOP-2585">HADOOP-2585</a>.<br />(shv)</li>
@@ -159,6 +176,25 @@ in RPC::Invoker.<br />(cdouglas)</li>
 support Soylatte.<br />(Sam Pullara via omalley)</li>
 support Soylatte.<br />(Sam Pullara via omalley)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3301">HADOOP-3301</a>. Fix misleading error message when S3 URI hostname
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3301">HADOOP-3301</a>. Fix misleading error message when S3 URI hostname
 contains an underscore.<br />(tomwhite via omalley)</li>
 contains an underscore.<br />(tomwhite via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3338">HADOOP-3338</a>. Fix Eclipse plugin to compile after <a href="http://issues.apache.org/jira/browse/HADOOP-544">HADOOP-544</a> was
+committed. Updated all references to use the new JobID representation.<br />(taton via nigel)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3337">HADOOP-3337</a>. Loading FSEditLog was broken by <a href="http://issues.apache.org/jira/browse/HADOOP-3283">HADOOP-3283</a> since it
+changed Writable serialization of DatanodeInfo. This patch handles it.
+(Tsz Wo (Nicholas), SZE via rangadi)
+</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3101">HADOOP-3101</a>. Prevent JobClient from throwing an exception when printing
+usage.<br />(Edward J. Yoon via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3031">HADOOP-3031</a>. Fix javac warnings in test classes.<br />(cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3119">HADOOP-3119</a>. Update javadoc for Text::getBytes to better describe its
+behavior.<br />(Tim Nelson via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2294">HADOOP-2294</a>. Fix documentation in libhdfs to refer to the correct free
+function.<br />(Craig Macdonald via cdouglas)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3335">HADOOP-3335</a>. Prevent the libhdfs build from deleting the wrong
+files on make clean.<br />(cutting via omalley)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2930">HADOOP-2930</a>. Make {start,stop}-balancer.sh work even if hadoop-daemon.sh
+is not in the PATH.<br />(Spiros Papadimitriou via hairong)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3085">HADOOP-3085</a>. Catch Exception in metrics util classes to ensure that
+misconfigured metrics don't prevent others from updating.<br />(cdouglas)</li>
     </ol>
     </ol>
   </li>
   </li>
 </ul>
 </ul>
@@ -342,7 +378,7 @@ records/log).<br />(Zheng Shao via omalley)</li>
     </ol>
     </ol>
   </li>
   </li>
   <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._bug_fixes_')">  BUG FIXES
   <li><a href="javascript:toggleList('release_0.17.0_-_unreleased_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(101)
+</a>&nbsp;&nbsp;&nbsp;(98)
     <ol id="release_0.17.0_-_unreleased_._bug_fixes_">
     <ol id="release_0.17.0_-_unreleased_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
 errors.<br />(Mahadev Konar via rangadi)</li>
 errors.<br />(Mahadev Konar via rangadi)</li>
@@ -468,8 +504,6 @@ first key arrives.<br />(Rick Cox via tomwhite)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3146">HADOOP-3146</a>. A DFSOutputStream.flush method is renamed as
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3146">HADOOP-3146</a>. A DFSOutputStream.flush method is renamed as
 DFSOutputStream.fsync.<br />(dhruba)</li>
 DFSOutputStream.fsync.<br />(dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3165">HADOOP-3165</a>. -put/-copyFromLocal did not treat input file "-" as stdin.<br />(Lohit Vijayarenu via rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3165">HADOOP-3165</a>. -put/-copyFromLocal did not treat input file "-" as stdin.<br />(Lohit Vijayarenu via rangadi)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
-already exists.<br />(rangadi)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3041">HADOOP-3041</a>. Deprecate JobConf.setOutputPath and JobConf.getOutputPath.
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3041">HADOOP-3041</a>. Deprecate JobConf.setOutputPath and JobConf.getOutputPath.
 Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats
 Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats
 extending OutputFormatBase, now extend FileOutputFormat. Add the following
 extending OutputFormatBase, now extend FileOutputFormat. Add the following
@@ -520,16 +554,10 @@ use FileSystem.getUri instead of FileSystem.getName.<br />(Arun Murthy via ddas)
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3237">HADOOP-3237</a>. Fixes TestDFSShell.testErrOutPut on Windows platform.<br />(Mahadev Konar via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3237">HADOOP-3237</a>. Fixes TestDFSShell.testErrOutPut on Windows platform.<br />(Mahadev Konar via ddas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3279">HADOOP-3279</a>. TaskTracker checks for SUCCEEDED task status in addition to
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3279">HADOOP-3279</a>. TaskTracker checks for SUCCEEDED task status in addition to
 COMMIT_PENDING status when it fails maps due to lost map.<br />(Devaraj Das)</li>
 COMMIT_PENDING status when it fails maps due to lost map.<br />(Devaraj Das)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3186">HADOOP-3186</a>. Fix incorrect permission checkding for mv and renameTo
-in HDFS. (Tsz Wo (Nicholas), SZE via rangadi)
-</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3286">HADOOP-3286</a>. Prevent collisions in gridmix output dirs by increasing the
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3286">HADOOP-3286</a>. Prevent collisions in gridmix output dirs by increasing the
 granularity of the timestamp.<br />(Runping Qi via cdouglas)</li>
 granularity of the timestamp.<br />(Runping Qi via cdouglas)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3285">HADOOP-3285</a>. Fix input split locality when the splits align to
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3285">HADOOP-3285</a>. Fix input split locality when the splits align to
 fs blocks.<br />(omalley)</li>
 fs blocks.<br />(omalley)</li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3294">HADOOP-3294</a>. Fix distcp to check the destination length and retry the copy
-if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via cdouglas)
-</li>
     </ol>
     </ol>
   </li>
   </li>
 </ul>
 </ul>
@@ -539,15 +567,13 @@ if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via cdouglas)
 </a></h3>
 </a></h3>
 <ul id="release_0.16.4_-_2008-05-05_">
 <ul id="release_0.16.4_-_2008-05-05_">
   <li><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_._bug_fixes_')">  BUG FIXES
   <li><a href="javascript:toggleList('release_0.16.4_-_2008-05-05_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(4)
+</a>&nbsp;&nbsp;&nbsp;(3)
     <ol id="release_0.16.4_-_2008-05-05_._bug_fixes_">
     <ol id="release_0.16.4_-_2008-05-05_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3138">HADOOP-3138</a>. DFS mkdirs() should not throw an exception if the directory
 already exists.<br />(rangadi via mukund)</li>
 already exists.<br />(rangadi via mukund)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3294">HADOOP-3294</a>. Fix distcp to check the destination length and retry the copy
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3294">HADOOP-3294</a>. Fix distcp to check the destination length and retry the copy
 if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund)
 if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund)
 </li>
 </li>
-      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3304">HADOOP-3304</a>. [HOD] Fixes the way the logcondense.py utility searches
-for log files that need to be deleted.<br />(yhemanth via mukund)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3186">HADOOP-3186</a>. Fix incorrect permission checkding for mv and renameTo
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-3186">HADOOP-3186</a>. Fix incorrect permission checkding for mv and renameTo
 in HDFS. (Tsz Wo (Nicholas), SZE via mukund)
 in HDFS. (Tsz Wo (Nicholas), SZE via mukund)
 </li>
 </li>

+ 13 - 14
docs/cluster_setup.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
@@ -347,15 +347,15 @@ document.write("Last Published: " + document.lastModified);
 <tr>
 <tr>
 		      
 		      
 <td colspan="1" rowspan="1">fs.default.name</td>
 <td colspan="1" rowspan="1">fs.default.name</td>
-  		      <td colspan="1" rowspan="1">Hostname or IP address of <span class="codefrag">NameNode</span>.</td>
-		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
+  		      <td colspan="1" rowspan="1">URI of <span class="codefrag">NameNode</span>.</td>
+		      <td colspan="1" rowspan="1"><em>hdfs://hostname/</em></td>
 		    
 		    
 </tr>
 </tr>
 		    
 		    
 <tr>
 <tr>
 		      
 		      
 <td colspan="1" rowspan="1">mapred.job.tracker</td>
 <td colspan="1" rowspan="1">mapred.job.tracker</td>
-		      <td colspan="1" rowspan="1">Hostname or IP address of <span class="codefrag">JobTracker</span>.</td>
+		      <td colspan="1" rowspan="1">Host or IP and port of <span class="codefrag">JobTracker</span>.</td>
 		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
 		      <td colspan="1" rowspan="1"><em>host:port</em> pair.</td>
 		    
 		    
 </tr>
 </tr>
@@ -454,7 +454,7 @@ document.write("Last Published: " + document.lastModified);
           <a href="api/org/apache/hadoop/conf/Configuration.html#FinalParams">
           <a href="api/org/apache/hadoop/conf/Configuration.html#FinalParams">
           final</a> to ensure that they cannot be overriden by user-applications.
           final</a> to ensure that they cannot be overriden by user-applications.
           </p>
           </p>
-<a name="N101BD"></a><a name="Real-World+Cluster+Configurations"></a>
+<a name="N101BC"></a><a name="Real-World+Cluster+Configurations"></a>
 <h5>Real-World Cluster Configurations</h5>
 <h5>Real-World Cluster Configurations</h5>
 <p>This section lists some non-default configuration parameters which 
 <p>This section lists some non-default configuration parameters which 
             have been used to run the <em>sort</em> benchmark on very large 
             have been used to run the <em>sort</em> benchmark on very large 
@@ -511,8 +511,7 @@ document.write("Last Published: " + document.lastModified);
 <td colspan="1" rowspan="1">mapred.child.java.opts</td>
 <td colspan="1" rowspan="1">mapred.child.java.opts</td>
                     <td colspan="1" rowspan="1">-Xmx512M</td>
                     <td colspan="1" rowspan="1">-Xmx512M</td>
                     <td colspan="1" rowspan="1">
                     <td colspan="1" rowspan="1">
-                      Larger heap-size for child jvms of maps/reduces. Also controls the amount 
-                      of virtual memory that a streaming/pipes task gets.
+                      Larger heap-size for child jvms of maps/reduces. 
                     </td>
                     </td>
                   
                   
 </tr>
 </tr>
@@ -616,7 +615,7 @@ document.write("Last Published: " + document.lastModified);
 </li>
 </li>
             
             
 </ul>
 </ul>
-<a name="N102DA"></a><a name="Slaves"></a>
+<a name="N102D9"></a><a name="Slaves"></a>
 <h4>Slaves</h4>
 <h4>Slaves</h4>
 <p>Typically you choose one machine in the cluster to act as the 
 <p>Typically you choose one machine in the cluster to act as the 
           <span class="codefrag">NameNode</span> and one machine as to act as the 
           <span class="codefrag">NameNode</span> and one machine as to act as the 
@@ -625,14 +624,14 @@ document.write("Last Published: " + document.lastModified);
           referred to as <em>slaves</em>.</p>
           referred to as <em>slaves</em>.</p>
 <p>List all slave hostnames or IP addresses in your 
 <p>List all slave hostnames or IP addresses in your 
           <span class="codefrag">conf/slaves</span> file, one per line.</p>
           <span class="codefrag">conf/slaves</span> file, one per line.</p>
-<a name="N102F9"></a><a name="Logging"></a>
+<a name="N102F8"></a><a name="Logging"></a>
 <h4>Logging</h4>
 <h4>Logging</h4>
 <p>Hadoop uses the <a href="http://logging.apache.org/log4j/">Apache 
 <p>Hadoop uses the <a href="http://logging.apache.org/log4j/">Apache 
           log4j</a> via the <a href="http://commons.apache.org/logging/">Apache 
           log4j</a> via the <a href="http://commons.apache.org/logging/">Apache 
           Commons Logging</a> framework for logging. Edit the 
           Commons Logging</a> framework for logging. Edit the 
           <span class="codefrag">conf/log4j.properties</span> file to customize the Hadoop 
           <span class="codefrag">conf/log4j.properties</span> file to customize the Hadoop 
           daemons' logging configuration (log-formats and so on).</p>
           daemons' logging configuration (log-formats and so on).</p>
-<a name="N1030D"></a><a name="History+Logging"></a>
+<a name="N1030C"></a><a name="History+Logging"></a>
 <h5>History Logging</h5>
 <h5>History Logging</h5>
 <p> The job history files are stored in central location 
 <p> The job history files are stored in central location 
             <span class="codefrag"> hadoop.job.history.location </span> which can be on DFS also,
             <span class="codefrag"> hadoop.job.history.location </span> which can be on DFS also,
@@ -666,7 +665,7 @@ document.write("Last Published: " + document.lastModified);
 </div>
 </div>
     
     
     
     
-<a name="N10345"></a><a name="Hadoop+Rack+Awareness"></a>
+<a name="N10344"></a><a name="Hadoop+Rack+Awareness"></a>
 <h2 class="h3">Hadoop Rack Awareness</h2>
 <h2 class="h3">Hadoop Rack Awareness</h2>
 <div class="section">
 <div class="section">
 <p>The HDFS and the Map-Reduce components are rack-aware.</p>
 <p>The HDFS and the Map-Reduce components are rack-aware.</p>
@@ -689,7 +688,7 @@ document.write("Last Published: " + document.lastModified);
 </div>
 </div>
     
     
     
     
-<a name="N1036B"></a><a name="Hadoop+Startup"></a>
+<a name="N1036A"></a><a name="Hadoop+Startup"></a>
 <h2 class="h3">Hadoop Startup</h2>
 <h2 class="h3">Hadoop Startup</h2>
 <div class="section">
 <div class="section">
 <p>To start a Hadoop cluster you will need to start both the HDFS and 
 <p>To start a Hadoop cluster you will need to start both the HDFS and 
@@ -724,7 +723,7 @@ document.write("Last Published: " + document.lastModified);
 </div>
 </div>
     
     
     
     
-<a name="N103B1"></a><a name="Hadoop+Shutdown"></a>
+<a name="N103B0"></a><a name="Hadoop+Shutdown"></a>
 <h2 class="h3">Hadoop Shutdown</h2>
 <h2 class="h3">Hadoop Shutdown</h2>
 <div class="section">
 <div class="section">
 <p>
 <p>

File diff suppressed because it is too large
+ 3 - 3
docs/cluster_setup.pdf


+ 25 - 7
docs/hadoop-default.html

@@ -154,6 +154,15 @@ creations/deletions), or "all".</td>
   </td>
   </td>
 </tr>
 </tr>
 <tr>
 <tr>
+<td><a name="dfs.datanode.ipc.address">dfs.datanode.ipc.address</a></td><td>0.0.0.0:50020</td><td>
+    The datanode ipc server address and port.
+    If the port is 0 then the server will start on a free port.
+  </td>
+</tr>
+<tr>
+<td><a name="dfs.datanode.handler.count">dfs.datanode.handler.count</a></td><td>3</td><td>The number of server threads for the datanode.</td>
+</tr>
+<tr>
 <td><a name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
 <td><a name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
     The address and the base port where the dfs namenode web ui will listen on.
     The address and the base port where the dfs namenode web ui will listen on.
     If the port is 0 then the server will start on a free port.
     If the port is 0 then the server will start on a free port.
@@ -455,8 +464,20 @@ creations/deletions), or "all".</td>
   For example, to enable verbose gc logging to a file named for the taskid in
   For example, to enable verbose gc logging to a file named for the taskid in
   /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
   /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
         -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
         -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-  The value of -Xmx will also directly influence the amount of virtual memory
-  that a streaming/pipes task gets during execution.
+  
+  The configuration variable mapred.child.ulimit can be used to control the
+  maximum virtual memory of the child processes. 
+  </td>
+</tr>
+<tr>
+<td><a name="mapred.child.ulimit">mapred.child.ulimit</a></td><td></td><td>The maximum virtual memory, in KB, of a process launched by the 
+  Map-Reduce framework. This can be used to control both the Mapper/Reducer 
+  tasks and applications using Hadoop Pipes, Hadoop Streaming etc. 
+  By default it is left unspecified to let cluster admins control it via 
+  limits.conf and other such relevant mechanisms.
+  
+  Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
+  JavaVM, else the VM might not start. 
   </td>
   </td>
 </tr>
 </tr>
 <tr>
 <tr>
@@ -635,15 +656,12 @@ creations/deletions), or "all".</td>
     </td>
     </td>
 </tr>
 </tr>
 <tr>
 <tr>
-<td><a name="ipc.client.timeout">ipc.client.timeout</a></td><td>60000</td><td>Defines the timeout for IPC calls in milliseconds.</td>
-</tr>
-<tr>
 <td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
 <td><a name="ipc.client.idlethreshold">ipc.client.idlethreshold</a></td><td>4000</td><td>Defines the threshold number of connections after which
                connections will be inspected for idleness.
                connections will be inspected for idleness.
   </td>
   </td>
 </tr>
 </tr>
 <tr>
 <tr>
-<td><a name="ipc.client.maxidletime">ipc.client.maxidletime</a></td><td>120000</td><td>Defines the maximum idle time for a connected client after 
+<td><a name="ipc.client.maxidletime">ipc.client.maxidletime</a></td><td>120000</td><td>Defines the maximum idle time in msec for a connected client after 
                which it may be disconnected.
                which it may be disconnected.
   </td>
   </td>
 </tr>
 </tr>
@@ -652,7 +670,7 @@ creations/deletions), or "all".</td>
   </td>
   </td>
 </tr>
 </tr>
 <tr>
 <tr>
-<td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>1000</td><td>The maximum time after which a client will bring down the
+<td><a name="ipc.client.connection.maxidletime">ipc.client.connection.maxidletime</a></td><td>10000</td><td>The maximum time in msec after which a client will bring down the
                connection to the server.
                connection to the server.
   </td>
   </td>
 </tr>
 </tr>

+ 2 - 2
docs/hdfs_design.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 3 - 3
docs/hdfs_permissions_guide.html

@@ -121,13 +121,13 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
-<div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
-</div>
 <div class="menupage">
 <div class="menupage">
 <div class="menupagetitle">HDFS Permissions Guide</div>
 <div class="menupagetitle">HDFS Permissions Guide</div>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
+<a href="hdfs_shell.html">FS Shell Guide</a>
+</div>
+<div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">

+ 124 - 188
docs/hdfs_shell.html

@@ -119,12 +119,12 @@ document.write("Last Published: " + document.lastModified);
 <div class="menuitem">
 <div class="menuitem">
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
-<div class="menupage">
-<div class="menupagetitle">HDFS Shell Guide</div>
-</div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 <a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
+<div class="menupage">
+<div class="menupagetitle">FS Shell Guide</div>
+</div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 </div>
 </div>
@@ -176,8 +176,8 @@ document.write("Last Published: " + document.lastModified);
 <div id="minitoc-area">
 <div id="minitoc-area">
 <ul class="minitoc">
 <ul class="minitoc">
 <li>
 <li>
-<a href="#DFShell"> DFShell </a>
-</li>
+<a href="#FS+Shell"> FS Shell </a>
+<ul class="minitoc">
 <li>
 <li>
 <a href="#cat"> cat </a>
 <a href="#cat"> cat </a>
 </li>
 </li>
@@ -257,24 +257,35 @@ document.write("Last Published: " + document.lastModified);
 <a href="#touchz"> touchz </a>
 <a href="#touchz"> touchz </a>
 </li>
 </li>
 </ul>
 </ul>
+</li>
+</ul>
 </div>
 </div>
 		
 		
-<a name="N1000D"></a><a name="DFShell"></a>
-<h2 class="h3"> DFShell </h2>
+<a name="N1000D"></a><a name="FS+Shell"></a>
+<h2 class="h3"> FS Shell </h2>
 <div class="section">
 <div class="section">
 <p>
 <p>
-      The HDFS shell is invoked by 
-      <span class="codefrag">bin/hadoop dfs &lt;args&gt;</span>.
-      All the HDFS shell commands take path URIs as arguments. The URI format is <em>scheme://autority/path</em>. For HDFS the scheme is <em>hdfs</em>, and for the local filesystem the scheme is <em>file</em>. The scheme and authority are optional. If not specified, the default scheme specified in the configuration is used. An HDFS file or directory such as <em>/parent/child</em> can be specified as <em>hdfs://namenode:namenodeport/parent/child</em> or simply as <em>/parent/child</em> (given that your configuration is set to point to <em>namenode:namenodeport</em>). Most of the commands in HDFS shell behave like corresponding Unix commands. Differences are described with each of the commands. Error information is sent to <em>stderr</em> and the output is sent to <em>stdout</em>. 
+      The FileSystem (FS) shell is invoked by 
+      <span class="codefrag">bin/hadoop fs &lt;args&gt;</span>.
+      All the FS shell commands take path URIs as arguments. The URI
+      format is <em>scheme://autority/path</em>. For HDFS the scheme
+      is <em>hdfs</em>, and for the local filesystem the scheme
+      is <em>file</em>. The scheme and authority are optional. If not
+      specified, the default scheme specified in the configuration is
+      used. An HDFS file or directory such as <em>/parent/child</em>
+      can be specified as <em>hdfs://namenodehost/parent/child</em> or
+      simply as <em>/parent/child</em> (given that your configuration
+      is set to point to <em>hdfs://namenodehost</em>). Most of the
+      commands in FS shell behave like corresponding Unix
+      commands. Differences are described with each of the
+      commands. Error information is sent to <em>stderr</em> and the
+      output is sent to <em>stdout</em>.
   </p>
   </p>
-</div>
-		
-<a name="N10035"></a><a name="cat"></a>
-<h2 class="h3"> cat </h2>
-<div class="section">
+<a name="N10034"></a><a name="cat"></a>
+<h3 class="h4"> cat </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -cat URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -cat URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -285,14 +296,14 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -cat hdfs://host1:port1/file1 hdfs://host2:port2/file2 
+<span class="codefrag"> hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2 
 		   </span>
 		   </span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag">hadoop dfs -cat file:///file3 /user/hadoop/file4 </span>
+<span class="codefrag">hadoop fs -cat file:///file3 /user/hadoop/file4 </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -301,75 +312,57 @@ document.write("Last Published: " + document.lastModified);
 		   
 		   
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 </p>
 </p>
-</div>
-		
-<a name="N10061"></a><a name="chgrp"></a>
-<h2 class="h3"> chgrp </h2>
-<div class="section">
+<a name="N10060"></a><a name="chgrp"></a>
+<h3 class="h4"> chgrp </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -chgrp [-R] GROUP URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -chgrp [-R] GROUP URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	    Change group association of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change group association of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    </p>
 	    </p>
-</div>
-		
-<a name="N10078"></a><a name="chmod"></a>
-<h2 class="h3"> chmod </h2>
-<div class="section">
+<a name="N10077"></a><a name="chmod"></a>
+<h3 class="h4"> chmod </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	    Change the permissions of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change the permissions of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    </p>
 	    </p>
-</div>
-		
-<a name="N1008F"></a><a name="chown"></a>
-<h2 class="h3"> chown </h2>
-<div class="section">
+<a name="N1008E"></a><a name="chown"></a>
+<h3 class="h4"> chown </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</span>
+<span class="codefrag">Usage: hadoop fs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	    Change the owner of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change the owner of files. With <span class="codefrag">-R</span>, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    </p>
 	    </p>
-</div>
-		
-<a name="N100A6"></a><a name="copyFromLocal"></a>
-<h2 class="h3">copyFromLocal</h2>
-<div class="section">
+<a name="N100A5"></a><a name="copyFromLocal"></a>
+<h3 class="h4">copyFromLocal</h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -copyFromLocal &lt;localsrc&gt; URI</span>
+<span class="codefrag">Usage: hadoop fs -copyFromLocal &lt;localsrc&gt; URI</span>
 			
 			
 </p>
 </p>
 <p>Similar to <a href="#putlink"><strong>put</strong></a> command, except that the source is restricted to a local file reference. </p>
 <p>Similar to <a href="#putlink"><strong>put</strong></a> command, except that the source is restricted to a local file reference. </p>
-</div>
-		
-<a name="N100BB"></a><a name="copyToLocal"></a>
-<h2 class="h3"> copyToLocal</h2>
-<div class="section">
+<a name="N100BA"></a><a name="copyToLocal"></a>
+<h3 class="h4"> copyToLocal</h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</span>
+<span class="codefrag">Usage: hadoop fs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</span>
 			
 			
 </p>
 </p>
 <p> Similar to <a href="#getlink"><strong>get</strong></a> command, except that the destination is restricted to a local file reference.</p>
 <p> Similar to <a href="#getlink"><strong>get</strong></a> command, except that the destination is restricted to a local file reference.</p>
-</div>
-		
-<a name="N100D0"></a><a name="cp"></a>
-<h2 class="h3"> cp </h2>
-<div class="section">
+<a name="N100CF"></a><a name="cp"></a>
+<h3 class="h4"> cp </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -cp URI [URI &hellip;] &lt;dest&gt;</span>
+<span class="codefrag">Usage: hadoop fs -cp URI [URI &hellip;] &lt;dest&gt;</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -380,13 +373,13 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2</span>
+<span class="codefrag"> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2</span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </span>
+<span class="codefrag"> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -397,58 +390,46 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N100FA"></a><a name="du"></a>
-<h2 class="h3">du</h2>
-<div class="section">
+<a name="N100F9"></a><a name="du"></a>
+<h3 class="h4">du</h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -du URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -du URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	     Displays aggregate length of  files contained in the directory or the length of a file in case its just a file.<br>
 	     Displays aggregate length of  files contained in the directory or the length of a file in case its just a file.<br>
 	     Example:<br>
 	     Example:<br>
-<span class="codefrag">hadoop dfs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://host:port/user/hadoop/dir1</span>
+<span class="codefrag">hadoop fs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://nn.example.com/user/hadoop/dir1</span>
 <br>
 <br>
 	     Exit Code:<br>
 	     Exit Code:<br>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <br>
 <br>
 </p>
 </p>
-</div>
-		
-<a name="N10115"></a><a name="dus"></a>
-<h2 class="h3"> dus </h2>
-<div class="section">
+<a name="N10114"></a><a name="dus"></a>
+<h3 class="h4"> dus </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -dus &lt;args&gt;</span>
+<span class="codefrag">Usage: hadoop fs -dus &lt;args&gt;</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	    Displays a summary of file lengths.
 	    Displays a summary of file lengths.
 	   </p>
 	   </p>
-</div>
-		
-<a name="N10125"></a><a name="expunge"></a>
-<h2 class="h3"> expunge </h2>
-<div class="section">
+<a name="N10124"></a><a name="expunge"></a>
+<h3 class="h4"> expunge </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -expunge</span>
+<span class="codefrag">Usage: hadoop fs -expunge</span>
 			
 			
 </p>
 </p>
 <p>Empty the Trash. Refer to <a href="hdfs_design.html">HDFS Design</a> for more information on Trash feature.
 <p>Empty the Trash. Refer to <a href="hdfs_design.html">HDFS Design</a> for more information on Trash feature.
 	   </p>
 	   </p>
-</div>
-		
-<a name="N10139"></a><a name="get"></a>
-<h2 class="h3"> get </h2>
-<div class="section">
+<a name="N10138"></a><a name="get"></a>
+<h3 class="h4"> get </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</span>
+<span class="codefrag">Usage: hadoop fs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</span>
 				
 				
 <br>
 <br>
 			
 			
@@ -463,13 +444,13 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -get /user/hadoop/file localfile </span>
+<span class="codefrag"> hadoop fs -get /user/hadoop/file localfile </span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -get hdfs://host:port/user/hadoop/file localfile</span>
+<span class="codefrag"> hadoop fs -get hdfs://nn.example.com/user/hadoop/file localfile</span>
 				
 				
 </li>
 </li>
 			
 			
@@ -480,27 +461,21 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N1016D"></a><a name="getmerge"></a>
-<h2 class="h3"> getmerge </h2>
-<div class="section">
+<a name="N1016C"></a><a name="getmerge"></a>
+<h3 class="h4"> getmerge </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</span>
+<span class="codefrag">Usage: hadoop fs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally <span class="codefrag">addnl</span> can be set to enable adding a newline character at the end of each file.  
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally <span class="codefrag">addnl</span> can be set to enable adding a newline character at the end of each file.  
 	  </p>
 	  </p>
-</div>
-		
-<a name="N10180"></a><a name="ls"></a>
-<h2 class="h3"> ls </h2>
-<div class="section">
+<a name="N1017F"></a><a name="ls"></a>
+<h3 class="h4"> ls </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -ls &lt;args&gt;</span>
+<span class="codefrag">Usage: hadoop fs -ls &lt;args&gt;</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -512,30 +487,24 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag">dirname &lt;dir&gt; modification_time modification_time permissions userid groupid</span>
 <span class="codefrag">dirname &lt;dir&gt; modification_time modification_time permissions userid groupid</span>
 <br>
 <br>
 	         Example:<br>
 	         Example:<br>
-<span class="codefrag">hadoop dfs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://host:port/user/hadoop/dir1 /nonexistentfile</span>
+<span class="codefrag">hadoop fs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://nn.example.com/user/hadoop/dir1 /nonexistentfile</span>
 <br>
 <br>
 	         Exit Code:<br>
 	         Exit Code:<br>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <br>
 <br>
 </p>
 </p>
-</div>
-		
-<a name="N101A3"></a><a name="lsr"></a>
-<h2 class="h3">lsr</h2>
-<div class="section">
+<a name="N101A2"></a><a name="lsr"></a>
+<h3 class="h4">lsr</h3>
 <p>
 <p>
-<span class="codefrag">Usage: hadoop dfs -lsr &lt;args&gt;</span>
+<span class="codefrag">Usage: hadoop fs -lsr &lt;args&gt;</span>
 <br>
 <br>
 	      Recursive version of <span class="codefrag">ls</span>. Similar to Unix <span class="codefrag">ls -R</span>.
 	      Recursive version of <span class="codefrag">ls</span>. Similar to Unix <span class="codefrag">ls -R</span>.
 	      </p>
 	      </p>
-</div>
-		
-<a name="N101B6"></a><a name="mkdir"></a>
-<h2 class="h3"> mkdir </h2>
-<div class="section">
+<a name="N101B5"></a><a name="mkdir"></a>
+<h3 class="h4"> mkdir </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -mkdir &lt;paths&gt;</span>
+<span class="codefrag">Usage: hadoop fs -mkdir &lt;paths&gt;</span>
 				
 				
 <br>
 <br>
 			
 			
@@ -548,13 +517,13 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag">hadoop dfs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </span>
+<span class="codefrag">hadoop fs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag">hadoop dfs -mkdir hdfs://host1:port1/user/hadoop/dir hdfs://host2:port2/user/hadoop/dir
+<span class="codefrag">hadoop fs -mkdir hdfs://nn1.example.com/user/hadoop/dir hdfs://nn2.example.com/user/hadoop/dir
 	  </span>
 	  </span>
 				
 				
 </li>
 </li>
@@ -566,11 +535,8 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag">Returns 0 on success and -1 on error.</span>
 <span class="codefrag">Returns 0 on success and -1 on error.</span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N101E3"></a><a name="movefromLocal"></a>
-<h2 class="h3"> movefromLocal </h2>
-<div class="section">
+<a name="N101E2"></a><a name="movefromLocal"></a>
+<h3 class="h4"> movefromLocal </h3>
 <p>
 <p>
 				
 				
 <span class="codefrag">Usage: dfs -moveFromLocal &lt;src&gt; &lt;dst&gt;</span>
 <span class="codefrag">Usage: dfs -moveFromLocal &lt;src&gt; &lt;dst&gt;</span>
@@ -578,14 +544,11 @@ document.write("Last Published: " + document.lastModified);
 </p>
 </p>
 <p>Displays a "not implemented" message.
 <p>Displays a "not implemented" message.
 	   </p>
 	   </p>
-</div>
-		
-<a name="N101F3"></a><a name="mv"></a>
-<h2 class="h3"> mv </h2>
-<div class="section">
+<a name="N101F2"></a><a name="mv"></a>
+<h3 class="h4"> mv </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -mv URI [URI &hellip;] &lt;dest&gt;</span>
+<span class="codefrag">Usage: hadoop fs -mv URI [URI &hellip;] &lt;dest&gt;</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -597,13 +560,13 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -mv /user/hadoop/file1 /user/hadoop/file2</span>
+<span class="codefrag"> hadoop fs -mv /user/hadoop/file1 /user/hadoop/file2</span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -mv hdfs://host:port/file1 hdfs://host:port/file2 hdfs://host:port/file3 hdfs://host:port/dir1</span>
+<span class="codefrag"> hadoop fs -mv hdfs://nn.example.com/file1 hdfs://nn.example.com/file2 hdfs://nn.example.com/file3 hdfs://nn.example.com/dir1</span>
 				
 				
 </li>
 </li>
 			
 			
@@ -614,14 +577,11 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N1021D"></a><a name="put"></a>
-<h2 class="h3"> put </h2>
-<div class="section">
+<a name="N1021C"></a><a name="put"></a>
+<h3 class="h4"> put </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -put &lt;localsrc&gt; ... &lt;dst&gt;</span>
+<span class="codefrag">Usage: hadoop fs -put &lt;localsrc&gt; ... &lt;dst&gt;</span>
 			
 			
 </p>
 </p>
 <p>Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.<br>
 <p>Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.<br>
@@ -631,24 +591,24 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -put localfile /user/hadoop/hadoopfile</span>
+<span class="codefrag"> hadoop fs -put localfile /user/hadoop/hadoopfile</span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -put localfile1 localfile2 /user/hadoop/hadoopdir</span>
+<span class="codefrag"> hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir</span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -put localfile hdfs://host:port/hadoop/hadoopfile</span>
+<span class="codefrag"> hadoop fs -put localfile hdfs://nn.example.com/hadoop/hadoopfile</span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
-<span class="codefrag">hadoop dfs -put - hdfs://host:port/hadoop/hadoopfile</span>
+<span class="codefrag">hadoop fs -put - hdfs://nn.example.com/hadoop/hadoopfile</span>
 <br>Reads the input from stdin.</li>
 <br>Reads the input from stdin.</li>
 			
 			
 </ul>
 </ul>
@@ -658,14 +618,11 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N10254"></a><a name="rm"></a>
-<h2 class="h3"> rm </h2>
-<div class="section">
+<a name="N10253"></a><a name="rm"></a>
+<h3 class="h4"> rm </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -rm URI [URI &hellip;] </span>
+<span class="codefrag">Usage: hadoop fs -rm URI [URI &hellip;] </span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -676,7 +633,7 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -rm hdfs://host:port/file /user/hadoop/emptydir </span>
+<span class="codefrag"> hadoop fs -rm hdfs://nn.example.com/file /user/hadoop/emptydir </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -687,14 +644,11 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N10278"></a><a name="rmr"></a>
-<h2 class="h3"> rmr </h2>
-<div class="section">
+<a name="N10277"></a><a name="rmr"></a>
+<h3 class="h4"> rmr </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -rmr URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -rmr URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>Recursive version of delete.<br>
 <p>Recursive version of delete.<br>
@@ -704,13 +658,13 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -rmr /user/hadoop/dir </span>
+<span class="codefrag"> hadoop fs -rmr /user/hadoop/dir </span>
 				
 				
 </li>
 </li>
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -rmr hdfs://host:port/user/hadoop/dir </span>
+<span class="codefrag"> hadoop fs -rmr hdfs://nn.example.com/user/hadoop/dir </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -721,14 +675,11 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 <span class="codefrag"> Returns 0 on success and -1 on error. </span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N102A2"></a><a name="setrep"></a>
-<h2 class="h3"> setrep </h2>
-<div class="section">
+<a name="N102A1"></a><a name="setrep"></a>
+<h3 class="h4"> setrep </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -setrep [-R] &lt;path&gt;</span>
+<span class="codefrag">Usage: hadoop fs -setrep [-R] &lt;path&gt;</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -739,7 +690,7 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -setrep -w 3 -R /user/hadoop/dir1 </span>
+<span class="codefrag"> hadoop fs -setrep -w 3 -R /user/hadoop/dir1 </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -750,14 +701,11 @@ document.write("Last Published: " + document.lastModified);
 <span class="codefrag">Returns 0 on success and -1 on error. </span>
 <span class="codefrag">Returns 0 on success and -1 on error. </span>
 			
 			
 </p>
 </p>
-</div>
-		
-<a name="N102C7"></a><a name="stat"></a>
-<h2 class="h3"> stat </h2>
-<div class="section">
+<a name="N102C6"></a><a name="stat"></a>
+<h3 class="h4"> stat </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -stat URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -stat URI [URI &hellip;]</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -768,7 +716,7 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -stat path </span>
+<span class="codefrag"> hadoop fs -stat path </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -777,14 +725,11 @@ document.write("Last Published: " + document.lastModified);
 	   
 	   
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 </p>
 </p>
-</div>
-		
-<a name="N102EA"></a><a name="tail"></a>
-<h2 class="h3"> tail </h2>
-<div class="section">
+<a name="N102E9"></a><a name="tail"></a>
+<h3 class="h4"> tail </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -tail [-f] URI </span>
+<span class="codefrag">Usage: hadoop fs -tail [-f] URI </span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -795,7 +740,7 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -tail pathname </span>
+<span class="codefrag"> hadoop fs -tail pathname </span>
 				
 				
 </li>
 </li>
 			
 			
@@ -804,14 +749,11 @@ document.write("Last Published: " + document.lastModified);
 	   
 	   
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 <span class="codefrag"> Returns 0 on success and -1 on error.</span>
 </p>
 </p>
-</div>
-		
-<a name="N1030D"></a><a name="test"></a>
-<h2 class="h3"> test </h2>
-<div class="section">
+<a name="N1030C"></a><a name="test"></a>
+<h3 class="h4"> test </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -test -[ezd] URI</span>
+<span class="codefrag">Usage: hadoop fs -test -[ezd] URI</span>
 			
 			
 </p>
 </p>
 <p>
 <p>
@@ -825,19 +767,16 @@ document.write("Last Published: " + document.lastModified);
 				
 				
 <li>
 <li>
 					
 					
-<span class="codefrag"> hadoop dfs -test -e filename </span>
+<span class="codefrag"> hadoop fs -test -e filename </span>
 				
 				
 </li>
 </li>
 			
 			
 </ul>
 </ul>
-</div>
-		
-<a name="N10330"></a><a name="text"></a>
-<h2 class="h3"> text </h2>
-<div class="section">
+<a name="N1032F"></a><a name="text"></a>
+<h3 class="h4"> text </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -text &lt;src&gt;</span>
+<span class="codefrag">Usage: hadoop fs -text &lt;src&gt;</span>
 				
 				
 <br>
 <br>
 			
 			
@@ -845,14 +784,11 @@ document.write("Last Published: " + document.lastModified);
 <p>
 <p>
 	   Takes a source file and outputs the file in text format. The allowed formats are zip and TextRecordInputStream.
 	   Takes a source file and outputs the file in text format. The allowed formats are zip and TextRecordInputStream.
 	  </p>
 	  </p>
-</div>
-		
-<a name="N10342"></a><a name="touchz"></a>
-<h2 class="h3"> touchz </h2>
-<div class="section">
+<a name="N10341"></a><a name="touchz"></a>
+<h3 class="h4"> touchz </h3>
 <p>
 <p>
 				
 				
-<span class="codefrag">Usage: hadoop dfs -touchz URI [URI &hellip;]</span>
+<span class="codefrag">Usage: hadoop fs -touchz URI [URI &hellip;]</span>
 				
 				
 <br>
 <br>
 			
 			

File diff suppressed because it is too large
+ 32 - 32
docs/hdfs_shell.pdf


+ 4 - 3
docs/hdfs_user_guide.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <div class="menupagetitle">HDFS User Guide</div>
 <div class="menupagetitle">HDFS User Guide</div>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
@@ -561,7 +561,8 @@ document.write("Last Published: " + document.lastModified);
       files, for e.g. missing blocks for a file or under replicated
       files, for e.g. missing blocks for a file or under replicated
       blocks. Unlike a traditional fsck utility for native filesystems,
       blocks. Unlike a traditional fsck utility for native filesystems,
       this command does not correct the errors it detects. Normally Namenode
       this command does not correct the errors it detects. Normally Namenode
-      automatically corrects most of the recoverable failures.
+      automatically corrects most of the recoverable failures. By default
+      fsck ignores open files but provides an option to select during reporting.
       HDFS' fsck is not a
       HDFS' fsck is not a
       Hadoop shell command. It can be run as '<span class="codefrag">bin/hadoop fsck</span>'.
       Hadoop shell command. It can be run as '<span class="codefrag">bin/hadoop fsck</span>'.
       Fsck can be run on the whole filesystem or on a subset of files.
       Fsck can be run on the whole filesystem or on a subset of files.

File diff suppressed because it is too large
+ 1 - 1
docs/hdfs_user_guide.pdf


+ 2 - 2
docs/hod.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 2 - 2
docs/hod_admin_guide.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 2 - 2
docs/hod_config_guide.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 2 - 2
docs/hod_user_guide.html

@@ -122,10 +122,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 2 - 2
docs/index.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 4 - 4
docs/linkmap.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
@@ -221,13 +221,13 @@ document.write("Last Published: " + document.lastModified);
     
     
 <ul>
 <ul>
 <li>
 <li>
-<a href="hdfs_shell.html">HDFS Shell Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
 </li>
 </li>
 </ul>
 </ul>
     
     
 <ul>
 <ul>
 <li>
 <li>
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>hdfs</em>
+<a href="hdfs_shell.html">FS Shell Guide</a>&nbsp;&nbsp;___________________&nbsp;&nbsp;<em>fs</em>
 </li>
 </li>
 </ul>
 </ul>
     
     

+ 12 - 12
docs/linkmap.pdf

@@ -5,10 +5,10 @@
 /Producer (FOP 0.20.5) >>
 /Producer (FOP 0.20.5) >>
 endobj
 endobj
 5 0 obj
 5 0 obj
-<< /Length 1008 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1006 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
  >>
 stream
 stream
-Gatn&?#SI?'Sc)J.t$#VPQ:u)?`*4rDX)F$m:)V5C6KLbYu;UNg:n^fBah+K7rAthiIH=4#CI"$NYdm41Z-(Q5]<ot^ud;A5]8MG*n4QX784jK&GRgK(2NC5_-AI0kT/V)e0>4]14NEt:"QZ.pR*mF/A(UIOXlQplHoFg/UhAEl60u.*B>#@J3dZ*(9+AR(UP&?YhQg+D@)81,Y_L>U,1e<]ubr5H7EW54ARLSAP#n%XCG7s!pnQ:%m,b9'Q2Um0W+;4(;XTc."d&_bk?uC7FB>5_H-I#gX".gHfGYB#Xf8J?(*eI6X\J1&-O\/Y`B>NP<UYYHEH'D=0fofUA;_dBXU`8`:!j3@SI*HqR6^sl:I49;EU#;U-A&YF#AiGdigK&6.ePJ;p4s0;S5uhfr/%.>NO7Q4$Z;&^WW?QF=13s.8b6h4r:4?*Ho.,]4U^U`QEEFJn1[8e5q`EVmCY=3%a/i7rk)QUHtX\0G:ssBsJ]g6:,@(I^kn4.skR:eM?q#/cgnd=+!6f-E6aVSYm[T5)km0(1:3l5@2EW1Weal(?J_#Q/7@t:SK2MIL(Z\_d:iA)"_Fm"lC<UMPQ97o4:e"U6IbZ9a[BZ-*FC7qFZkYZa-eod4Y+;ra^$:%\^"Q0(2G>8l:QP`t\;Pgk")oS4(&Y2clj6CRM@iZH)f*G"&FVlO[8>3^$pKC([Z?Y'heNVUcS%"r\Cg=$8//=@r^CZYXsdh;W4rjfqI9gLJD=iu:!LLEk<<9u#bA*"CCKPO&1XFZ75Pi80E\9lE4O@ln&ga'fllWV!%'"dbWKJmH5oCA\GJFKr#n)+QD;*etkdd[LcV+qqVpG+$N?NSdV2J@($"UDTmk?\oTCYD^M4H^/=2EWcIU:li!;&7Fdg_HTcQ-HF@G#jZK7]`NmI+Omt'Tr&<0e;J]>"!_ncD#T]?Q)a7iF,)40/.>rp?&;JF68[NffXBt^;EaX"lBrB0T!ZHpYT>TuZnYbOb-H^R3hA_@\uG]7Ul%C[~>
+Gatn&>>O!-(l%MV.rkq4GWt8/=^W8>34I?h`\a+%D(O,2`"$J\IS!c_)\c0R>,enMom7d1rE^S+(R$L.iaE1e[ocC5o+b&o\!eEMeo0V<c'V9M+F`N+f&#kJ\^]Wc42O9c5<1]iq88Y>\q(%"c`H<KZ>6'H\JD_VLYa+knY9,s*l:D"O^gZWHq97P,*f/7_3u_VG+3^#/V3.'8^l7ikW3^ghIJrW,>-ZQSgThe[\T-?X&rH"!'>AR_p0G;Kj474N7g:V7Bf>ko-'b[ZT[>?;)E?&YSI07p9b:l^Bo\u"JQ16+jD#(+gBih:]rIoY`B>NP<UYYC0gjX>g"<5Maf*LBXU`8`:"ECns]Z7ld$WZX!^b.8b"p3M@i+8mqNQKTL36$K<U-tVNUJ%d_+)ifpKmKXn$.Z*MB/Nl!3k<mNeit(1gS*F.oY^AOl$kCU3+O.MHe-d\gBI^4JI/S]s75Bc3gk-+>KH%2q[NVkL/[-Hk?lUm1QK_r^fqm(@eJAF]b+$@M!Jiqq[mVVV^T>*Ng%<:+Mp8^s!S5D#TPYTGapHi7J2cVos;T9\FZ!eaHQHOj-i#/0H9i1$KGO]8Tn[k'"7P`f[SZO32MV64^Lm?Jk5B(a&JP?lb?qNJn/gOQa2F*V-`;@59"*&BipE<fr[E#SD\DQZXKf.p]]A:5a,]-=853lKU8H&opN-1NJRg8ACsc5@kMTS>qg<DSN9;Meeb\#.d7/\ZC(*h-'_DHVu:hoJ)E*2-[trK7\31f0A_2u7S7DMT[U]oJLTDa&te#@I&B@@I%WdV"1?/i.!hSRId)rs&b4>n%gnbX<G&,dc/6[iuJj=f]bXTuX>b,bXOubu'EuT!-'?k1@T5Z+\/aB6"+W&290Rg(7ZSQ,6a#DC3EKo`s*p_=m^=id`fMp4JeiR*WF$!#?EFP@F-ePcIc-pR41f:tNSr#1doF[4Xhq%?KoU]NFH3?<SS[U<8\nTFIupRGH/1>&@jD7=F*=&ddd!rZ@bY5&^~>
 endstream
 endstream
 endobj
 endobj
 6 0 obj
 6 0 obj
@@ -72,17 +72,17 @@ endobj
 xref
 xref
 0 12
 0 12
 0000000000 65535 f 
 0000000000 65535 f 
-0000001832 00000 n 
-0000001890 00000 n 
-0000001940 00000 n 
+0000001830 00000 n 
+0000001888 00000 n 
+0000001938 00000 n 
 0000000015 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000000071 00000 n 
-0000001171 00000 n 
-0000001277 00000 n 
-0000001389 00000 n 
-0000001498 00000 n 
-0000001608 00000 n 
-0000001716 00000 n 
+0000001169 00000 n 
+0000001275 00000 n 
+0000001387 00000 n 
+0000001496 00000 n 
+0000001606 00000 n 
+0000001714 00000 n 
 trailer
 trailer
 <<
 <<
 /Size 12
 /Size 12
@@ -90,5 +90,5 @@ trailer
 /Info 4 0 R
 /Info 4 0 R
 >>
 >>
 startxref
 startxref
-2060
+2058
 %%EOF
 %%EOF

+ 2 - 2
docs/mapred_tutorial.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menupage">
 <div class="menupage">
 <div class="menupagetitle">Map-Reduce Tutorial</div>
 <div class="menupagetitle">Map-Reduce Tutorial</div>

+ 2 - 2
docs/native_libraries.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

+ 2 - 2
docs/quickstart.html

@@ -120,10 +120,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>

BIN
docs/skin/images/rc-b-l-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-b-r-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png


BIN
docs/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png


BIN
docs/skin/images/rc-t-r-15-1body-2menu-3menu.png


BIN
docs/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png


BIN
docs/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png


BIN
docs/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png


+ 3 - 3
docs/streaming.html

@@ -123,10 +123,10 @@ document.write("Last Published: " + document.lastModified);
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 <a href="hdfs_user_guide.html">HDFS User Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_shell.html">HDFS Shell Guide</a>
+<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
-<a href="hdfs_permissions_guide.html">HDFS Permissions Guide</a>
+<a href="hdfs_shell.html">FS Shell Guide</a>
 </div>
 </div>
 <div class="menuitem">
 <div class="menuitem">
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
 <a href="mapred_tutorial.html">Map-Reduce Tutorial</a>
@@ -403,7 +403,7 @@ $HADOOP_HOME/bin/hadoop  jar $HADOOP_HOME/hadoop-streaming.jar \
                   -mapper "xargs cat"  \
                   -mapper "xargs cat"  \
                   -reducer "cat"  \
                   -reducer "cat"  \
                   -output "/user/me/samples/cachefile/out" \  
                   -output "/user/me/samples/cachefile/out" \  
-                  -cacheArchive 'hdfs://hadoop-nn1.example.com:8020/user/me/samples/cachefile/cachedir.jar#testlink' \  
+                  -cacheArchive 'hdfs://hadoop-nn1.example.com/user/me/samples/cachefile/cachedir.jar#testlink' \  
                   -jobconf mapred.map.tasks=1 \
                   -jobconf mapred.map.tasks=1 \
                   -jobconf mapred.reduce.tasks=1 \ 
                   -jobconf mapred.reduce.tasks=1 \ 
                   -jobconf mapred.job.name="Experiment"
                   -jobconf mapred.job.name="Experiment"

+ 89 - 89
docs/streaming.pdf

@@ -387,10 +387,10 @@ endobj
 >>
 >>
 endobj
 endobj
 69 0 obj
 69 0 obj
-<< /Length 1542 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1538 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
  >>
 stream
 stream
-GauGaCN%otn2nD%id**M^j$r-gD_H$d^3^lWClW::-'Dnb*W@&K$Dsq:&b*NNK)Po4F0.:;1`Gno7$$7D[)bVK?c\%C/+6eI7FnmQKu-Wh\!hK?Z6N<cE1&KUX!eK2b+/oiW+&/cP(7<NsLYo8%HZt:Cf1GPtnI[07SIiSKbnaPnkrX%43)n.Rroo>&.$V#@CScL%SO9e_ToQqtn]Hn*WERZU[l/c[p/"/C1C]AY_<@HO*N&gbmG>^-:8snl,dN.%Wckk\tcYrG<!-DIc]1(W*]n;Qb`JUNj>l)j7A;b#E+/Paqu9j]lA?(ka\&M<;r5O$`i1G;\4Ma^>`0Y@K29j\ff=Fe=8-RPf%$SA^R_?s^f-<T@*3icu^],/KrfP@TrNCcn<V*qcUr&k!JcIS"mWL9p)@D4N;#5kkuI.L`Lnj%4E3fC=f0$L4V?!Q&AemLr&"kZ0m,UU5jV0=f6rGnbaTD3X9Vn>ie[Zb_nLCH"I'UG.NPNSt^HF?SOej?`Jk];3K<iY%pV#fCdI:ZmR'a2AR]LkdrlW6=V@-d9>=U#I8N#-VtIU@QI-L<WsrdDDqkdCluNSmet(>ZSk=-k\*J9,q.\%FfVnMB1r`8e)pjHFYlkDQs'YUUE#e9VNP!JQ+'8N5o\7q_4$WaMpd5;dau?L4/649=r'l@5!KWoO[X8am_u^E">T7HctL>CtK]-aZRXeL%qhTPsH:K!r!0R_[^JB*R]ZMZSl9b2M-Om)pD#LY@+GT/TRs?A<-(Qd2Rd\$?A(d%h$Flfg)HWJ"pPoJ5eh5)j%1eJq8M/@+7TL[nsSFi5cr1=pAdus.F?!1N4j7%;(ab%fQ33Q0FfOC+DhYFRg.;c7$eG+R+sN]\e+QJV]pp3`FZXa8a*ac@Q"/?Kp;;n@`QNaZ)7[6JWt=?#AtPLiToEN9$4U>Ij^:E-iIuH(Pd8>0"A(8rm<o0Dm]c8s;4]3V^4Sq>V/c[ZUjb^'!Fbd[>@j;"'*Yd(]0(N+4k6\$b-R3=3,p_Z:#2[QZ4^nD9M.$DY^aaeZp^j/nD58K>qng^O`%d<(WX`JXs!8!$3V#lWYR-:unk+-TQmDn?qn_tNEm?G-qIJgnoI)hG5tlP]&_5-VUkr0aW)H"!?pQ`Y+?&:q[%ctq@pY$iQ7djf-YXK0TF($[!W]"E:#l>"&I/NdOa&L)1mW2utqfQ5k_5Ikr7klGSTN8lBrHhOV.?<G(SP'AXpFA\rtmbu+LeT:E7f!Tc%:a"UaBm-!YNmga/K0(U-iq^=,+KOOmd"+O5S@4ck,b79+/8rqD_,--Ao4S7K`]-U]q=CMIBHM^/K.6P4Co,n$LW@r`l.kIB2NlVm_)<<e=["fk.QHO9j'%gU5ZuXDn*8h@KSm5I)bD(+<p!l%YB,PFi/&oeb>eZYE)J&+h%hJ:(rs<_3oG@Mbg4+)[3W>Pd[Mm"^rM`k['("=&0Pu0`0O3bq<95/5,ZeMQC%OId[*YS#/ZVu)JCg'%<0"^3cG7P8LF9-+Or1-oinj<d)bA@"t4%q)Q`E`Hc2R6^AA^1^KHg~>
+GauGaCN%rcn@O1(i,u!i_'b?Z757=7UhS;b5`oqA"q:*"a>.T_[3\V3e)]ET,YJnaduUY!&gZ&!Gjbn+r+=<$2sC1g+k@A-hKPatNobO-rc`YXguh@]@*D6.2<t+jH#o4cF^1Trp3?m*=#euU5S,8UnL5#Z=m=Gis75pYegV6u>A95ZeR6gF9#VeJ-.\O7otfhB?t'#aHZaQ2pAY'_f=UoUn*EmcgTt:X,g/!E:T.23mQC\%6J`Vo*FBoUB6gM/72/OkF`]T*Zi7B;+#gK^oF'X%#:\9Aqbb"2jUFf5?=bUo(Wt$T`H&+:3t/uoRRD4.QU`3Kb.F*[qd`M0KN0cC_/cM3]t.!*3mAR&$r18',Z%@C9\"A&[WnCb`0&u@MFk+r:Gs9?G)ps89*r6<CVQ[Y:;^6,W4#Hf3P^%L?584HZgf;&\*J:Y0`grHl2Y7u31s=J1lg!.nA"$6bNDb#mBof]OPj1Pfr)P36.R*+84I##:E#]WMWf2u&'0Ci>BWSuqnmK*O<Y1cTfeknef3^@GTn3g@%f3bn")6+U4nJ_n/8mU*8m)`?8ZQ]>':ZfrJL$NNV!q^7-1^0(.EXbBisBY)JPka6^$n/D\[9e3707Q$$V$hh\`c';.(%(8chbQIDL00`ju]@BS(&ZA3L%:2ItH\cF:/2C`W5D\3XI[O3AK5BbBcA<G?BN*1dUd@l9YZ\ZQM@[@f9',7#uC8/Z_U)f\Cf`m*6,IG/WAhJP#+h7S?Ed'^s&qN!YJ4%4'%c3VMI4?b;9>@mu.deJfb.0ZUV6J[0.XV;0XKE<GHMS4_\,R%9;R+F*$PPGH;1S98#3GL7+1jA00&$&Ce4B#HMd\;r'R!2"k*X,,a7-FO2Es<-S$)KG3D0#P]N*m(HO62_a4V(o!7Zipd>:%d9ZI7:SDMKWAMp+^e(jM>#%h#k56(7P=i^`=e(L$(.X5\iJ#!?T47jS7e2`(K+_V8,>+Pq=YH'*3G_?D,i-jT-D@::/!iOQ&o_kDq9[2L[-Gdm?[3J$q=/6*N]SkCCfmM^"t4A+U@hV`9j@#1\\1alZekWaYU$:a7Z"uT!##5FREl?q>l2CQ"@cPQ705TEI+g0+]jKjE#4&+P5%?/j1@e]8+#9t>(W>D!1iL4s6W*d8&4Bq/4,b@=gpVQ*H/n^Za?j&K1PokiY*6]O,GLPj6CC4JJ!SbA6d(Qec4j(994*P,nFC?^EuN8'/5q0e12KlhkZX/P4HXbCQ\r1QBgNRI/Q,93W;\d!AeQc8QEM*Y1_3M?O8Y;pIgB1Y.<?s2?\CP/<!X67k95)aKINq2@/-.+pU/d>8B?.MWWIh.U@M:9#]Nbp&D"bLVQ#do&!oi,nRou=<l]7A8.BA_7U`s<Du.5m>M\uj2qEaS!m,=(`J"j9mK'6$hs+P@OQ;&I3a9=j&>9'5;EW`g#u:$KoH#2j"7jk;WuKQE57@:ID-p&&oq/>(C)3m10FId'MJ8HWBnMZt*D<!-AYWt&CN?]n"[4\.G(H2X&r">mF[]E(XXR0G1SbT8JeNUQiMPEf74~>
 endstream
 endstream
 endobj
 endobj
 70 0 obj
 70 0 obj
@@ -1014,68 +1014,68 @@ endobj
 xref
 xref
 0 128
 0 128
 0000000000 65535 f 
 0000000000 65535 f 
-0000045516 00000 n 
-0000045666 00000 n 
-0000045758 00000 n 
+0000045512 00000 n 
+0000045662 00000 n 
+0000045754 00000 n 
 0000000015 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000000071 00000 n 
 0000002128 00000 n 
 0000002128 00000 n 
 0000002248 00000 n 
 0000002248 00000 n 
 0000002420 00000 n 
 0000002420 00000 n 
-0000045910 00000 n 
+0000045906 00000 n 
 0000002555 00000 n 
 0000002555 00000 n 
-0000045973 00000 n 
+0000045969 00000 n 
 0000002692 00000 n 
 0000002692 00000 n 
-0000046039 00000 n 
+0000046035 00000 n 
 0000002829 00000 n 
 0000002829 00000 n 
-0000046104 00000 n 
+0000046100 00000 n 
 0000002966 00000 n 
 0000002966 00000 n 
-0000046170 00000 n 
+0000046166 00000 n 
 0000003103 00000 n 
 0000003103 00000 n 
-0000046236 00000 n 
+0000046232 00000 n 
 0000003239 00000 n 
 0000003239 00000 n 
-0000046300 00000 n 
+0000046296 00000 n 
 0000003375 00000 n 
 0000003375 00000 n 
-0000046366 00000 n 
+0000046362 00000 n 
 0000003512 00000 n 
 0000003512 00000 n 
-0000046431 00000 n 
+0000046427 00000 n 
 0000003648 00000 n 
 0000003648 00000 n 
-0000046495 00000 n 
+0000046491 00000 n 
 0000003784 00000 n 
 0000003784 00000 n 
-0000046560 00000 n 
+0000046556 00000 n 
 0000003919 00000 n 
 0000003919 00000 n 
-0000046626 00000 n 
+0000046622 00000 n 
 0000004058 00000 n 
 0000004058 00000 n 
 0000004193 00000 n 
 0000004193 00000 n 
-0000046690 00000 n 
+0000046686 00000 n 
 0000004330 00000 n 
 0000004330 00000 n 
-0000046755 00000 n 
+0000046751 00000 n 
 0000004467 00000 n 
 0000004467 00000 n 
-0000046820 00000 n 
+0000046816 00000 n 
 0000004604 00000 n 
 0000004604 00000 n 
-0000046886 00000 n 
+0000046882 00000 n 
 0000004743 00000 n 
 0000004743 00000 n 
-0000046950 00000 n 
+0000046946 00000 n 
 0000004879 00000 n 
 0000004879 00000 n 
-0000047016 00000 n 
+0000047012 00000 n 
 0000005015 00000 n 
 0000005015 00000 n 
-0000047082 00000 n 
+0000047078 00000 n 
 0000005154 00000 n 
 0000005154 00000 n 
 0000005288 00000 n 
 0000005288 00000 n 
-0000047148 00000 n 
+0000047144 00000 n 
 0000005424 00000 n 
 0000005424 00000 n 
 0000006221 00000 n 
 0000006221 00000 n 
 0000006344 00000 n 
 0000006344 00000 n 
 0000006399 00000 n 
 0000006399 00000 n 
-0000047213 00000 n 
+0000047209 00000 n 
 0000006531 00000 n 
 0000006531 00000 n 
-0000047279 00000 n 
+0000047275 00000 n 
 0000006663 00000 n 
 0000006663 00000 n 
-0000047345 00000 n 
+0000047341 00000 n 
 0000006796 00000 n 
 0000006796 00000 n 
-0000047411 00000 n 
+0000047407 00000 n 
 0000006929 00000 n 
 0000006929 00000 n 
-0000047475 00000 n 
+0000047471 00000 n 
 0000007062 00000 n 
 0000007062 00000 n 
 0000009221 00000 n 
 0000009221 00000 n 
 0000009329 00000 n 
 0000009329 00000 n 
@@ -1083,64 +1083,64 @@ xref
 0000011368 00000 n 
 0000011368 00000 n 
 0000013582 00000 n 
 0000013582 00000 n 
 0000013690 00000 n 
 0000013690 00000 n 
-0000015325 00000 n 
-0000015433 00000 n 
-0000018246 00000 n 
-0000018369 00000 n 
-0000018396 00000 n 
-0000018587 00000 n 
-0000020793 00000 n 
-0000020916 00000 n 
-0000020943 00000 n 
-0000021136 00000 n 
-0000023205 00000 n 
-0000023313 00000 n 
-0000025283 00000 n 
-0000025406 00000 n 
-0000025433 00000 n 
-0000025684 00000 n 
-0000027857 00000 n 
-0000027965 00000 n 
-0000030251 00000 n 
-0000030374 00000 n 
-0000030401 00000 n 
-0000030606 00000 n 
-0000032515 00000 n 
-0000032623 00000 n 
-0000033991 00000 n 
-0000047541 00000 n 
-0000034099 00000 n 
-0000034285 00000 n 
-0000034526 00000 n 
-0000034832 00000 n 
-0000035140 00000 n 
-0000035338 00000 n 
-0000035651 00000 n 
-0000036028 00000 n 
-0000036466 00000 n 
-0000036706 00000 n 
-0000036968 00000 n 
-0000037394 00000 n 
-0000038228 00000 n 
-0000038768 00000 n 
-0000039150 00000 n 
-0000039441 00000 n 
-0000040021 00000 n 
-0000040348 00000 n 
-0000040648 00000 n 
-0000041499 00000 n 
-0000042080 00000 n 
-0000043125 00000 n 
-0000043502 00000 n 
-0000043901 00000 n 
-0000044364 00000 n 
-0000044736 00000 n 
-0000044850 00000 n 
-0000044961 00000 n 
-0000045073 00000 n 
-0000045182 00000 n 
-0000045289 00000 n 
-0000045406 00000 n 
+0000015321 00000 n 
+0000015429 00000 n 
+0000018242 00000 n 
+0000018365 00000 n 
+0000018392 00000 n 
+0000018583 00000 n 
+0000020789 00000 n 
+0000020912 00000 n 
+0000020939 00000 n 
+0000021132 00000 n 
+0000023201 00000 n 
+0000023309 00000 n 
+0000025279 00000 n 
+0000025402 00000 n 
+0000025429 00000 n 
+0000025680 00000 n 
+0000027853 00000 n 
+0000027961 00000 n 
+0000030247 00000 n 
+0000030370 00000 n 
+0000030397 00000 n 
+0000030602 00000 n 
+0000032511 00000 n 
+0000032619 00000 n 
+0000033987 00000 n 
+0000047537 00000 n 
+0000034095 00000 n 
+0000034281 00000 n 
+0000034522 00000 n 
+0000034828 00000 n 
+0000035136 00000 n 
+0000035334 00000 n 
+0000035647 00000 n 
+0000036024 00000 n 
+0000036462 00000 n 
+0000036702 00000 n 
+0000036964 00000 n 
+0000037390 00000 n 
+0000038224 00000 n 
+0000038764 00000 n 
+0000039146 00000 n 
+0000039437 00000 n 
+0000040017 00000 n 
+0000040344 00000 n 
+0000040644 00000 n 
+0000041495 00000 n 
+0000042076 00000 n 
+0000043121 00000 n 
+0000043498 00000 n 
+0000043897 00000 n 
+0000044360 00000 n 
+0000044732 00000 n 
+0000044846 00000 n 
+0000044957 00000 n 
+0000045069 00000 n 
+0000045178 00000 n 
+0000045285 00000 n 
+0000045402 00000 n 
 trailer
 trailer
 <<
 <<
 /Size 128
 /Size 128
@@ -1148,5 +1148,5 @@ trailer
 /Info 4 0 R
 /Info 4 0 R
 >>
 >>
 startxref
 startxref
-47593
+47589
 %%EOF
 %%EOF

+ 3 - 3
src/docs/src/documentation/content/xdocs/cluster_setup.xml

@@ -147,12 +147,12 @@
 		    </tr>
 		    </tr>
   		    <tr>
   		    <tr>
 		      <td>fs.default.name</td>
 		      <td>fs.default.name</td>
-  		      <td>Hostname or IP address of <code>NameNode</code>.</td>
-		      <td><em>host:port</em> pair.</td>
+  		      <td>URI of <code>NameNode</code>.</td>
+		      <td><em>hdfs://hostname/</em></td>
 		    </tr>
 		    </tr>
 		    <tr>
 		    <tr>
 		      <td>mapred.job.tracker</td>
 		      <td>mapred.job.tracker</td>
-		      <td>Hostname or IP address of <code>JobTracker</code>.</td>
+		      <td>Host or IP and port of <code>JobTracker</code>.</td>
 		      <td><em>host:port</em> pair.</td>
 		      <td><em>host:port</em> pair.</td>
 		    </tr>
 		    </tr>
 		    <tr>
 		    <tr>

+ 66 - 54
src/docs/src/documentation/content/xdocs/hdfs_shell.xml

@@ -21,17 +21,28 @@
 	</header>
 	</header>
 	<body>
 	<body>
 		<section>
 		<section>
-			<title> DFShell </title>
-			<p>
-      The HDFS shell is invoked by 
-      <code>bin/hadoop dfs &lt;args&gt;</code>.
-      All the HDFS shell commands take path URIs as arguments. The URI format is <em>scheme://autority/path</em>. For HDFS the scheme is <em>hdfs</em>, and for the local filesystem the scheme is <em>file</em>. The scheme and authority are optional. If not specified, the default scheme specified in the configuration is used. An HDFS file or directory such as <em>/parent/child</em> can be specified as <em>hdfs://namenode:namenodeport/parent/child</em> or simply as <em>/parent/child</em> (given that your configuration is set to point to <em>namenode:namenodeport</em>). Most of the commands in HDFS shell behave like corresponding Unix commands. Differences are described with each of the commands. Error information is sent to <em>stderr</em> and the output is sent to <em>stdout</em>. 
+			<title> FS Shell </title>
+			<p>
+      The FileSystem (FS) shell is invoked by 
+      <code>bin/hadoop fs &lt;args&gt;</code>.
+      All the FS shell commands take path URIs as arguments. The URI
+      format is <em>scheme://autority/path</em>. For HDFS the scheme
+      is <em>hdfs</em>, and for the local filesystem the scheme
+      is <em>file</em>. The scheme and authority are optional. If not
+      specified, the default scheme specified in the configuration is
+      used. An HDFS file or directory such as <em>/parent/child</em>
+      can be specified as <em>hdfs://namenodehost/parent/child</em> or
+      simply as <em>/parent/child</em> (given that your configuration
+      is set to point to <em>hdfs://namenodehost</em>). Most of the
+      commands in FS shell behave like corresponding Unix
+      commands. Differences are described with each of the
+      commands. Error information is sent to <em>stderr</em> and the
+      output is sent to <em>stdout</em>.
   </p>
   </p>
-		</section>
 		<section>
 		<section>
 			<title> cat </title>
 			<title> cat </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -cat URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -cat URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 		   Copies source paths to <em>stdout</em>. 
 		   Copies source paths to <em>stdout</em>. 
@@ -39,11 +50,11 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -cat hdfs://host1:port1/file1 hdfs://host2:port2/file2 
+					<code> hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2 
 		   </code>
 		   </code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code>hadoop dfs -cat file:///file3 /user/hadoop/file4 </code>
+					<code>hadoop fs -cat file:///file3 /user/hadoop/file4 </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:<br/>
 			<p>Exit Code:<br/>
@@ -52,7 +63,7 @@
 		<section>
 		<section>
 			<title> chgrp </title>
 			<title> chgrp </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -chgrp [-R] GROUP URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -chgrp [-R] GROUP URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Change group association of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change group association of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -61,7 +72,7 @@
 		<section>
 		<section>
 			<title> chmod </title>
 			<title> chmod </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Change the permissions of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change the permissions of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -70,7 +81,7 @@
 		<section>
 		<section>
 			<title> chown </title>
 			<title> chown </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</code>
+				<code>Usage: hadoop fs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Change the owner of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
 	    Change the owner of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -79,21 +90,21 @@
 		<section>
 		<section>
 			<title>copyFromLocal</title>
 			<title>copyFromLocal</title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -copyFromLocal &lt;localsrc&gt; URI</code>
+				<code>Usage: hadoop fs -copyFromLocal &lt;localsrc&gt; URI</code>
 			</p>
 			</p>
 			<p>Similar to <a href="#putlink"><strong>put</strong></a> command, except that the source is restricted to a local file reference. </p>
 			<p>Similar to <a href="#putlink"><strong>put</strong></a> command, except that the source is restricted to a local file reference. </p>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title> copyToLocal</title>
 			<title> copyToLocal</title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</code>
+				<code>Usage: hadoop fs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</code>
 			</p>
 			</p>
 			<p> Similar to <a href="#getlink"><strong>get</strong></a> command, except that the destination is restricted to a local file reference.</p>
 			<p> Similar to <a href="#getlink"><strong>get</strong></a> command, except that the destination is restricted to a local file reference.</p>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title> cp </title>
 			<title> cp </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -cp URI [URI &#x2026;] &lt;dest&gt;</code>
+				<code>Usage: hadoop fs -cp URI [URI &#x2026;] &lt;dest&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
 	    Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
@@ -101,10 +112,10 @@
 	    Example:</p>
 	    Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2</code>
+					<code> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2</code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </code>
+					<code> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -115,17 +126,17 @@
 		<section>
 		<section>
 			<title>du</title>
 			<title>du</title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -du URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -du URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	     Displays aggregate length of  files contained in the directory or the length of a file in case its just a file.<br/>
 	     Displays aggregate length of  files contained in the directory or the length of a file in case its just a file.<br/>
-	     Example:<br/><code>hadoop dfs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://host:port/user/hadoop/dir1</code><br/>
+	     Example:<br/><code>hadoop fs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://nn.example.com/user/hadoop/dir1</code><br/>
 	     Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 	     Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title> dus </title>
 			<title> dus </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -dus &lt;args&gt;</code>
+				<code>Usage: hadoop fs -dus &lt;args&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Displays a summary of file lengths.
 	    Displays a summary of file lengths.
@@ -134,7 +145,7 @@
 		<section>
 		<section>
 			<title> expunge </title>
 			<title> expunge </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -expunge</code>
+				<code>Usage: hadoop fs -expunge</code>
 			</p>
 			</p>
 			<p>Empty the Trash. Refer to <a href="hdfs_design.html">HDFS Design</a> for more information on Trash feature.
 			<p>Empty the Trash. Refer to <a href="hdfs_design.html">HDFS Design</a> for more information on Trash feature.
 	   </p>
 	   </p>
@@ -142,7 +153,7 @@
 		<section>
 		<section>
 			<title id="getlink"> get </title>
 			<title id="getlink"> get </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</code>
+				<code>Usage: hadoop fs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</code>
 				<br/>
 				<br/>
 			</p>
 			</p>
 			<p>
 			<p>
@@ -153,10 +164,10 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -get /user/hadoop/file localfile </code>
+					<code> hadoop fs -get /user/hadoop/file localfile </code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -get hdfs://host:port/user/hadoop/file localfile</code>
+					<code> hadoop fs -get hdfs://nn.example.com/user/hadoop/file localfile</code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -167,7 +178,7 @@
 		<section>
 		<section>
 			<title> getmerge </title>
 			<title> getmerge </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</code>
+				<code>Usage: hadoop fs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally <code>addnl</code> can be set to enable adding a newline character at the end of each file.  
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally <code>addnl</code> can be set to enable adding a newline character at the end of each file.  
@@ -176,25 +187,25 @@
 		<section>
 		<section>
 			<title> ls </title>
 			<title> ls </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -ls &lt;args&gt;</code>
+				<code>Usage: hadoop fs -ls &lt;args&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 		 For a file returns stat on the file with the following format:<br/><code>filename &lt;number of replicas&gt; filesize modification_date modification_time permissions userid groupid</code><br/>
 		 For a file returns stat on the file with the following format:<br/><code>filename &lt;number of replicas&gt; filesize modification_date modification_time permissions userid groupid</code><br/>
 	         For a directory it returns list of its direct children as in unix.
 	         For a directory it returns list of its direct children as in unix.
 	         A directory is listed as: <br/><code>dirname &lt;dir&gt; modification_time modification_time permissions userid groupid</code><br/>
 	         A directory is listed as: <br/><code>dirname &lt;dir&gt; modification_time modification_time permissions userid groupid</code><br/>
-	         Example:<br/><code>hadoop dfs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://host:port/user/hadoop/dir1 /nonexistentfile</code><br/>
+	         Example:<br/><code>hadoop fs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://nn.example.com/user/hadoop/dir1 /nonexistentfile</code><br/>
 	         Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 	         Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title>lsr</title>
 			<title>lsr</title>
-			<p><code>Usage: hadoop dfs -lsr &lt;args&gt;</code><br/>
+			<p><code>Usage: hadoop fs -lsr &lt;args&gt;</code><br/>
 	      Recursive version of <code>ls</code>. Similar to Unix <code>ls -R</code>.
 	      Recursive version of <code>ls</code>. Similar to Unix <code>ls -R</code>.
 	      </p>
 	      </p>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title> mkdir </title>
 			<title> mkdir </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -mkdir &lt;paths&gt;</code>
+				<code>Usage: hadoop fs -mkdir &lt;paths&gt;</code>
 				<br/>
 				<br/>
 			</p>
 			</p>
 			<p>
 			<p>
@@ -203,10 +214,10 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code>hadoop dfs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </code>
+					<code>hadoop fs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code>hadoop dfs -mkdir hdfs://host1:port1/user/hadoop/dir hdfs://host2:port2/user/hadoop/dir
+					<code>hadoop fs -mkdir hdfs://nn1.example.com/user/hadoop/dir hdfs://nn2.example.com/user/hadoop/dir
 	  </code>
 	  </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
@@ -226,7 +237,7 @@
 		<section>
 		<section>
 			<title> mv </title>
 			<title> mv </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -mv URI [URI &#x2026;] &lt;dest&gt;</code>
+				<code>Usage: hadoop fs -mv URI [URI &#x2026;] &lt;dest&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	    Moves files from source to destination. This command allows multiple sources as well in which case the destination needs to be a directory. Moving files across filesystems is not permitted.
 	    Moves files from source to destination. This command allows multiple sources as well in which case the destination needs to be a directory. Moving files across filesystems is not permitted.
@@ -235,10 +246,10 @@
 	    </p>
 	    </p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -mv /user/hadoop/file1 /user/hadoop/file2</code>
+					<code> hadoop fs -mv /user/hadoop/file1 /user/hadoop/file2</code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -mv hdfs://host:port/file1 hdfs://host:port/file2 hdfs://host:port/file3 hdfs://host:port/dir1</code>
+					<code> hadoop fs -mv hdfs://nn.example.com/file1 hdfs://nn.example.com/file2 hdfs://nn.example.com/file3 hdfs://nn.example.com/dir1</code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -249,21 +260,21 @@
 		<section>
 		<section>
 			<title id="putlink"> put </title>
 			<title id="putlink"> put </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -put &lt;localsrc&gt; ... &lt;dst&gt;</code>
+				<code>Usage: hadoop fs -put &lt;localsrc&gt; ... &lt;dst&gt;</code>
 			</p>
 			</p>
 			<p>Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.<br/>
 			<p>Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.<br/>
 	   </p>
 	   </p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -put localfile /user/hadoop/hadoopfile</code>
+					<code> hadoop fs -put localfile /user/hadoop/hadoopfile</code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -put localfile1 localfile2 /user/hadoop/hadoopdir</code>
+					<code> hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir</code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -put localfile hdfs://host:port/hadoop/hadoopfile</code>
+					<code> hadoop fs -put localfile hdfs://nn.example.com/hadoop/hadoopfile</code>
 				</li>
 				</li>
-				<li><code>hadoop dfs -put - hdfs://host:port/hadoop/hadoopfile</code><br/>Reads the input from stdin.</li>
+				<li><code>hadoop fs -put - hdfs://nn.example.com/hadoop/hadoopfile</code><br/>Reads the input from stdin.</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
 			<p>
 			<p>
@@ -273,7 +284,7 @@
 		<section>
 		<section>
 			<title> rm </title>
 			<title> rm </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -rm URI [URI &#x2026;] </code>
+				<code>Usage: hadoop fs -rm URI [URI &#x2026;] </code>
 			</p>
 			</p>
 			<p>
 			<p>
 	   Delete files specified as args. Only deletes non empty directory and files. Refer to rmr for recursive deletes.<br/>
 	   Delete files specified as args. Only deletes non empty directory and files. Refer to rmr for recursive deletes.<br/>
@@ -281,7 +292,7 @@
 	   </p>
 	   </p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -rm hdfs://host:port/file /user/hadoop/emptydir </code>
+					<code> hadoop fs -rm hdfs://nn.example.com/file /user/hadoop/emptydir </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -292,17 +303,17 @@
 		<section>
 		<section>
 			<title> rmr </title>
 			<title> rmr </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -rmr URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -rmr URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>Recursive version of delete.<br/>
 			<p>Recursive version of delete.<br/>
 	   Example:
 	   Example:
 	   </p>
 	   </p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -rmr /user/hadoop/dir </code>
+					<code> hadoop fs -rmr /user/hadoop/dir </code>
 				</li>
 				</li>
 				<li>
 				<li>
-					<code> hadoop dfs -rmr hdfs://host:port/user/hadoop/dir </code>
+					<code> hadoop fs -rmr hdfs://nn.example.com/user/hadoop/dir </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -313,7 +324,7 @@
 		<section>
 		<section>
 			<title> setrep </title>
 			<title> setrep </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -setrep [-R] &lt;path&gt;</code>
+				<code>Usage: hadoop fs -setrep [-R] &lt;path&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	   Changes the replication factor of a file. -R option is for recursively increasing the replication factor of files within a directory.
 	   Changes the replication factor of a file. -R option is for recursively increasing the replication factor of files within a directory.
@@ -321,7 +332,7 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -setrep -w 3 -R /user/hadoop/dir1 </code>
+					<code> hadoop fs -setrep -w 3 -R /user/hadoop/dir1 </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:</p>
 			<p>Exit Code:</p>
@@ -332,7 +343,7 @@
 		<section>
 		<section>
 			<title> stat </title>
 			<title> stat </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -stat URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -stat URI [URI &#x2026;]</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	   Returns the stat information on the path.
 	   Returns the stat information on the path.
@@ -340,7 +351,7 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -stat path </code>
+					<code> hadoop fs -stat path </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code:<br/>
 			<p>Exit Code:<br/>
@@ -349,7 +360,7 @@
 		<section>
 		<section>
 			<title> tail </title>
 			<title> tail </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -tail [-f] URI </code>
+				<code>Usage: hadoop fs -tail [-f] URI </code>
 			</p>
 			</p>
 			<p>
 			<p>
 	   Displays last kilobyte of the file to stdout. -f option can be used as in Unix.
 	   Displays last kilobyte of the file to stdout. -f option can be used as in Unix.
@@ -357,7 +368,7 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -tail pathname </code>
+					<code> hadoop fs -tail pathname </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 			<p>Exit Code: <br/>
 			<p>Exit Code: <br/>
@@ -366,7 +377,7 @@
 		<section>
 		<section>
 			<title> test </title>
 			<title> test </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -test -[ezd] URI</code>
+				<code>Usage: hadoop fs -test -[ezd] URI</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	   Options: <br/>
 	   Options: <br/>
@@ -376,14 +387,14 @@
 			<p>Example:</p>
 			<p>Example:</p>
 			<ul>
 			<ul>
 				<li>
 				<li>
-					<code> hadoop dfs -test -e filename </code>
+					<code> hadoop fs -test -e filename </code>
 				</li>
 				</li>
 			</ul>
 			</ul>
 		</section>
 		</section>
 		<section>
 		<section>
 			<title> text </title>
 			<title> text </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -text &lt;src&gt;</code>
+				<code>Usage: hadoop fs -text &lt;src&gt;</code>
 				<br/>
 				<br/>
 			</p>
 			</p>
 			<p>
 			<p>
@@ -393,7 +404,7 @@
 		<section>
 		<section>
 			<title> touchz </title>
 			<title> touchz </title>
 			<p>
 			<p>
-				<code>Usage: hadoop dfs -touchz URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -touchz URI [URI &#x2026;]</code>
 				<br/>
 				<br/>
 			</p>
 			</p>
 			<p>
 			<p>
@@ -408,5 +419,6 @@
 			<p>Exit Code:<br/>
 			<p>Exit Code:<br/>
 	   <code> Returns 0 on success and -1 on error.</code></p>
 	   <code> Returns 0 on success and -1 on error.</code></p>
 		</section>
 		</section>
+        </section>
 	</body>
 	</body>
 </document>
 </document>

+ 1 - 1
src/docs/src/documentation/content/xdocs/site.xml

@@ -37,8 +37,8 @@ See http://forrest.apache.org/docs/linking.html for more info.
     <setup     label="Cluster Setup"      href="cluster_setup.html" />
     <setup     label="Cluster Setup"      href="cluster_setup.html" />
     <hdfs      label="HDFS Architecture"  href="hdfs_design.html" />
     <hdfs      label="HDFS Architecture"  href="hdfs_design.html" />
     <hdfs      label="HDFS User Guide"    href="hdfs_user_guide.html" />
     <hdfs      label="HDFS User Guide"    href="hdfs_user_guide.html" />
-    <hdfs      label="HDFS Shell Guide"   href="hdfs_shell.html" />
     <hdfs      label="HDFS Permissions Guide"    href="hdfs_permissions_guide.html" />
     <hdfs      label="HDFS Permissions Guide"    href="hdfs_permissions_guide.html" />
+    <fs        label="FS Shell Guide"     href="hdfs_shell.html" />
     <mapred    label="Map-Reduce Tutorial" href="mapred_tutorial.html" />
     <mapred    label="Map-Reduce Tutorial" href="mapred_tutorial.html" />
     <mapred    label="Native Hadoop Libraries" href="native_libraries.html" />
     <mapred    label="Native Hadoop Libraries" href="native_libraries.html" />
     <streaming label="Streaming"          href="streaming.html" />
     <streaming label="Streaming"          href="streaming.html" />

+ 1 - 1
src/docs/src/documentation/content/xdocs/streaming.xml

@@ -163,7 +163,7 @@ $HADOOP_HOME/bin/hadoop  jar $HADOOP_HOME/hadoop-streaming.jar \
                   -mapper "xargs cat"  \
                   -mapper "xargs cat"  \
                   -reducer "cat"  \
                   -reducer "cat"  \
                   -output "/user/me/samples/cachefile/out" \  
                   -output "/user/me/samples/cachefile/out" \  
-                  -cacheArchive 'hdfs://hadoop-nn1.example.com:8020/user/me/samples/cachefile/cachedir.jar#testlink' \  
+                  -cacheArchive 'hdfs://hadoop-nn1.example.com/user/me/samples/cachefile/cachedir.jar#testlink' \  
                   -jobconf mapred.map.tasks=1 \
                   -jobconf mapred.map.tasks=1 \
                   -jobconf mapred.reduce.tasks=1 \ 
                   -jobconf mapred.reduce.tasks=1 \ 
                   -jobconf mapred.job.name="Experiment"
                   -jobconf mapred.job.name="Experiment"

+ 2 - 6
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -81,9 +81,7 @@ class DFSClient implements FSConstants {
     new TreeMap<String, OutputStream>();
     new TreeMap<String, OutputStream>();
  
  
   static ClientProtocol createNamenode(Configuration conf) throws IOException {
   static ClientProtocol createNamenode(Configuration conf) throws IOException {
-    return createNamenode(NetUtils.createSocketAddr
-                          (FileSystem.getDefaultUri(conf).getAuthority()),
-                          conf);
+    return createNamenode(NameNode.getAddress(conf), conf);
   }
   }
 
 
   static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
   static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
@@ -132,9 +130,7 @@ class DFSClient implements FSConstants {
    * Create a new DFSClient connected to the default namenode.
    * Create a new DFSClient connected to the default namenode.
    */
    */
   public DFSClient(Configuration conf) throws IOException {
   public DFSClient(Configuration conf) throws IOException {
-    this(NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf)
-                                   .getAuthority()),
-         conf);
+    this(NameNode.getAddress(conf), conf);
   }
   }
 
 
   /** 
   /** 

+ 1 - 2
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -210,8 +210,7 @@ public class DataNode implements InterDatanodeProtocol, FSConstants, Runnable {
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
                                      conf.get("dfs.datanode.dns.nameserver","default"));
     }
     }
-    InetSocketAddress nameNodeAddr =
-      NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf).getAuthority());
+    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
     
     
     this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.socketTimeout =  conf.getInt("dfs.socket.timeout",
     this.socketTimeout =  conf.getInt("dfs.socket.timeout",

+ 8 - 10
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -47,10 +47,7 @@ public class DistributedFileSystem extends FileSystem {
   /** @deprecated */
   /** @deprecated */
   public DistributedFileSystem(InetSocketAddress namenode,
   public DistributedFileSystem(InetSocketAddress namenode,
     Configuration conf) throws IOException {
     Configuration conf) throws IOException {
-    initialize(URI.create("hdfs://"+
-                          namenode.getHostName()+":"+
-                          namenode.getPort()),
-                          conf);
+    initialize(NameNode.getUri(namenode), conf);
   }
   }
 
 
   /** @deprecated */
   /** @deprecated */
@@ -60,14 +57,15 @@ public class DistributedFileSystem extends FileSystem {
 
 
   public void initialize(URI uri, Configuration conf) throws IOException {
   public void initialize(URI uri, Configuration conf) throws IOException {
     setConf(conf);
     setConf(conf);
+
     String host = uri.getHost();
     String host = uri.getHost();
-    int port = uri.getPort();
-    if (host == null || port == -1) {
-      throw new IOException("Incomplete HDFS URI, no host/port: "+ uri);
+    if (host == null) {
+      throw new IOException("Incomplete HDFS URI, no host: "+ uri);
     }
     }
-    this.dfs = new DFSClient(new InetSocketAddress(host, port), conf,
-                             statistics);
-    this.uri = URI.create("hdfs://"+host+":"+port);
+
+    InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
+    this.dfs = new DFSClient(namenode, conf, statistics);
+    this.uri = NameNode.getUri(namenode);
     this.workingDir = getHomeDirectory();
     this.workingDir = getHomeDirectory();
   }
   }
 
 

+ 18 - 3
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -85,6 +85,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     }
     }
   }
   }
     
     
+  public static final int DEFAULT_PORT = 8020;
+
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
 
 
@@ -110,7 +112,20 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     return myMetrics;
     return myMetrics;
   }
   }
   
   
-    
+  static InetSocketAddress getAddress(String address) {
+    return NetUtils.createSocketAddr(address, DEFAULT_PORT);
+  }
+
+  static InetSocketAddress getAddress(Configuration conf) {
+    return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
+  }
+
+  static URI getUri(InetSocketAddress namenode) {
+    int port = namenode.getPort();
+    String portString = port == DEFAULT_PORT ? "" : (":"+port);
+    return URI.create("hdfs://"+ namenode.getHostName()+portString);
+  }
+
   /**
   /**
    * Initialize the server
    * Initialize the server
    * 
    * 
@@ -118,14 +133,14 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
    * @param conf the configuration
    * @param conf the configuration
    */
    */
   private void initialize(String address, Configuration conf) throws IOException {
   private void initialize(String address, Configuration conf) throws IOException {
-    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
+    InetSocketAddress socAddr = NameNode.getAddress(address);
     this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
     this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
     this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
     this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf);
                                 handlerCount, false, conf);
 
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
     // The rpc-server port can be ephemeral... ensure we have the correct info
     this.nameNodeAddress = this.server.getListenerAddress(); 
     this.nameNodeAddress = this.server.getListenerAddress(); 
-    FileSystem.setDefaultUri(conf, "hdfs://"+nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort());
+    FileSystem.setDefaultUri(conf, getUri(nameNodeAddress));
     LOG.info("Namenode up at: " + this.nameNodeAddress);
     LOG.info("Namenode up at: " + this.nameNodeAddress);
 
 
     myMetrics = new NameNodeMetrics(conf, this);
     myMetrics = new NameNodeMetrics(conf, this);

+ 2 - 2
src/java/org/apache/hadoop/dfs/SecondaryNameNode.java

@@ -121,8 +121,8 @@ public class SecondaryNameNode implements FSConstants, Runnable {
     
     
     // Create connection to the namenode.
     // Create connection to the namenode.
     shouldRun = true;
     shouldRun = true;
-    nameNodeAddr =
-      NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf).getAuthority());
+    nameNodeAddr = NameNode.getAddress(conf);
+
     this.conf = conf;
     this.conf = conf;
     this.namenode =
     this.namenode =
         (ClientProtocol) RPC.waitForProxy(ClientProtocol.class,
         (ClientProtocol) RPC.waitForProxy(ClientProtocol.class,

+ 24 - 5
src/java/org/apache/hadoop/net/NetUtils.java

@@ -116,22 +116,41 @@ public class NetUtils {
    *   <fs>://<host>:<port>/<path>
    *   <fs>://<host>:<port>/<path>
    */
    */
   public static InetSocketAddress createSocketAddr(String target) {
   public static InetSocketAddress createSocketAddr(String target) {
+    return createSocketAddr(target, -1);
+  }
+
+  /**
+   * Util method to build socket addr from either:
+   *   <host>
+   *   <host>:<post>
+   *   <fs>://<host>:<port>/<path>
+   */
+  public static InetSocketAddress createSocketAddr(String target,
+                                                   int defaultPort) {
     int colonIndex = target.indexOf(':');
     int colonIndex = target.indexOf(':');
-    if (colonIndex < 0) {
+    if (colonIndex < 0 && defaultPort == -1) {
       throw new RuntimeException("Not a host:port pair: " + target);
       throw new RuntimeException("Not a host:port pair: " + target);
     }
     }
     String hostname;
     String hostname;
-    int port;
+    int port = -1;
     if (!target.contains("/")) {
     if (!target.contains("/")) {
-      // must be the old style <host>:<port>
-      hostname = target.substring(0, colonIndex);
-      port = Integer.parseInt(target.substring(colonIndex + 1));
+      if (colonIndex == -1) {
+        hostname = target;
+      } else {
+        // must be the old style <host>:<port>
+        hostname = target.substring(0, colonIndex);
+        port = Integer.parseInt(target.substring(colonIndex + 1));
+      }
     } else {
     } else {
       // a new uri
       // a new uri
       URI addr = new Path(target).toUri();
       URI addr = new Path(target).toUri();
       hostname = addr.getHost();
       hostname = addr.getHost();
       port = addr.getPort();
       port = addr.getPort();
     }
     }
+
+    if (port == -1) {
+      port = defaultPort;
+    }
   
   
     if (getStaticResolution(hostname) != null) {
     if (getStaticResolution(hostname) != null) {
       hostname = getStaticResolution(hostname);
       hostname = getStaticResolution(hostname);

+ 30 - 30
src/java/overview.html

@@ -133,7 +133,7 @@ following:
 <ol>
 <ol>
 
 
 <li>The {@link org.apache.hadoop.dfs.NameNode} (Distributed Filesystem
 <li>The {@link org.apache.hadoop.dfs.NameNode} (Distributed Filesystem
-master) host and port.  This is specified with the configuration
+master) host.  This is specified with the configuration
 property <tt><a
 property <tt><a
 href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>.
 href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>.
 </li>
 </li>
@@ -158,7 +158,7 @@ way, put the following in conf/hadoop-site.xml:
 
 
   <property>
   <property>
     <name>fs.default.name</name>
     <name>fs.default.name</name>
-    <value>localhost:9000</value>
+    <value>hdfs://localhost/</value>
   </property>
   </property>
 
 
   <property>
   <property>
@@ -173,7 +173,7 @@ way, put the following in conf/hadoop-site.xml:
 
 
 </configuration></xmp>
 </configuration></xmp>
 
 
-<p>(We also set the DFS replication level to 1 in order to
+<p>(We also set the HDFS replication level to 1 in order to
 reduce warnings when running on a single node.)</p>
 reduce warnings when running on a single node.)</p>
 
 
 <p>Now check that the command <br><tt>ssh localhost</tt><br> does not
 <p>Now check that the command <br><tt>ssh localhost</tt><br> does not
@@ -198,7 +198,7 @@ command, run on the master node:</p>
 
 
 <p>Input files are copied into the distributed filesystem as follows:</p>
 <p>Input files are copied into the distributed filesystem as follows:</p>
 
 
-<p><tt>bin/hadoop dfs -put input input</tt></p>
+<p><tt>bin/hadoop fs -put input input</tt></p>
 
 
 <h3>Distributed execution</h3>
 <h3>Distributed execution</h3>
 
 
@@ -207,7 +207,7 @@ examine it:</p>
 
 
 <tt>
 <tt>
 bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'<br>
 bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'<br>
-bin/hadoop dfs -get output output
+bin/hadoop fs -get output output
 cat output/*
 cat output/*
 </tt>
 </tt>
 
 
@@ -215,49 +215,49 @@ cat output/*
 
 
 <p><tt>bin/stop-all.sh</tt></p>
 <p><tt>bin/stop-all.sh</tt></p>
 
 
-<h2>Fully-distributed operation</h2>
+<h3>Fully-distributed operation</h3>
 
 
-<p>Distributed operation is just like the pseudo-distributed operation
-described above, except:</p>
+<p>Fully distributed operation is just like the pseudo-distributed operation
+described above, except, in <tt>conf/hadoop-site.xml</tt>, specify:</p>
 
 
 <ol>
 <ol>
 
 
-<li>Specify hostname or IP address of the master server in the values
+<li>The hostname or IP address of your master server in the value
 for <tt><a
 for <tt><a
-href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>
-and <tt><a
-href="../hadoop-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  These are specified as
-<tt><em>host</em>:<em>port</em></tt> pairs.</li>
+href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>,
+  as <tt><em>hdfs://master.example.com/</em></tt>.</li>
+
+<li>The host and port of the your master server in the value
+of <tt><a href="../hadoop-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>
+as <tt><em>master.example.com</em>:<em>port</em></tt>.</li>
 
 
-<li>Specify directories for <tt><a
+<li>Directories for <tt><a
 href="../hadoop-default.html#dfs.name.dir">dfs.name.dir</a></tt> and
 href="../hadoop-default.html#dfs.name.dir">dfs.name.dir</a></tt> and
-<tt><a
-href="../hadoop-default.html#dfs.data.dir">dfs.data.dir</a></tt> in
-<tt>conf/hadoop-site.xml</tt>.  These are used to hold distributed
-filesystem data on the master node and slave nodes respectively.  Note
+<tt><a href="../hadoop-default.html#dfs.data.dir">dfs.data.dir</a>.
+</tt>These are local directories used to hold distributed filesystem
+data on the master node and slave nodes respectively.  Note
 that <tt>dfs.data.dir</tt> may contain a space- or comma-separated
 that <tt>dfs.data.dir</tt> may contain a space- or comma-separated
-list of directory names, so that data may be stored on multiple
+list of directory names, so that data may be stored on multiple local
 devices.</li>
 devices.</li>
 
 
-<li>Specify <tt><a
-href="../hadoop-default.html#mapred.local.dir">mapred.local.dir</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  This determines where temporary
-MapReduce data is written.  It also may be a list of directories.</li>
+<li><tt><a href="../hadoop-default.html#mapred.local.dir">mapred.local.dir</a></tt>,
+  the local directory where temporary MapReduce data is stored.  It
+  also may be a list of directories.</li>
 
 
-<li>Specify <tt><a
+<li><tt><a
 href="../hadoop-default.html#mapred.map.tasks">mapred.map.tasks</a></tt>
 href="../hadoop-default.html#mapred.map.tasks">mapred.map.tasks</a></tt>
 and <tt><a
 and <tt><a
-href="../hadoop-default.html#mapred.reduce.tasks">mapred.reduce.tasks</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  As a rule of thumb, use 10x the
+href="../hadoop-default.html#mapred.reduce.tasks">mapred.reduce.tasks</a></tt>.
+As a rule of thumb, use 10x the
 number of slave processors for <tt>mapred.map.tasks</tt>, and 2x the
 number of slave processors for <tt>mapred.map.tasks</tt>, and 2x the
 number of slave processors for <tt>mapred.reduce.tasks</tt>.</li>
 number of slave processors for <tt>mapred.reduce.tasks</tt>.</li>
 
 
-<li>List all slave hostnames or IP addresses in your
-<tt>conf/slaves</tt> file, one per line.</li>
-
 </ol>
 </ol>
 
 
+<p>Finally, list all slave hostnames or IP addresses in your
+<tt>conf/slaves</tt> file, one per line.  Then format your filesystem
+and start your cluster on your master node, as above.
+
 </body>
 </body>
 </html>
 </html>
 
 

+ 1 - 2
src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java

@@ -100,8 +100,7 @@ public class TestDFSShellGenericOptions extends TestCase {
     FileSystem fs=null;
     FileSystem fs=null;
     try {
     try {
       ToolRunner.run(shell, args);
       ToolRunner.run(shell, args);
-      fs = new DistributedFileSystem(
-                                     NetUtils.createSocketAddr(namenode), 
+      fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
                                      shell.getConf());
                                      shell.getConf());
       assertTrue("Directory does not get created", 
       assertTrue("Directory does not get created", 
                  fs.isDirectory(new Path("/data")));
                  fs.isDirectory(new Path("/data")));

+ 59 - 0
src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.net.*;
+import java.util.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+
+/** Test NameNode port defaulting code. */
+public class TestDefaultNameNodePort extends TestCase {
+
+  public void testGetAddressFromString() throws Exception {
+    assertEquals(NameNode.getAddress("foo").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
+                 555);
+    assertEquals(NameNode.getAddress("foo:555").getPort(),
+                 555);
+  }
+
+  public void testGetAddressFromConf() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://foo/");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+    FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
+    assertEquals(NameNode.getAddress(conf).getPort(), 555);
+    FileSystem.setDefaultUri(conf, "foo");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+  }
+
+  public void testGetUri() {
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
+                 URI.create("hdfs://foo:555"));
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo",
+                                                       NameNode.DEFAULT_PORT)),
+                 URI.create("hdfs://foo"));
+  }
+}

Some files were not shown because too many files changed in this diff