Переглянути джерело

Base security branch on hadoop-0.20.1

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security@1062069 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 14 роки тому
батько
коміт
a594c0ab91
100 змінених файлів з 1243 додано та 2630 видалено
  1. 0 1
      .eclipse.templates/.classpath
  2. 0 219
      CHANGES.txt
  3. 3 3
      bin/hadoop
  4. 1 1
      bin/rcc
  5. 98 286
      build.xml
  6. 1 5
      ivy.xml
  7. 0 122
      ivy/hadoop-core-pom-template.xml
  8. 257 0
      ivy/hadoop-core.pom
  9. 0 34
      ivy/hadoop-examples-pom-template.xml
  10. 0 34
      ivy/hadoop-streaming-pom-template.xml
  11. 0 53
      ivy/hadoop-test-pom-template.xml
  12. 0 34
      ivy/hadoop-tools-pom-template.xml
  13. 0 4
      ivy/libraries.properties
  14. 0 11
      lib/jdiff/hadoop_0.20.1.xml
  15. 0 11
      lib/jdiff/hadoop_0.20.2.xml
  16. 1 1
      src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java
  17. 2 8
      src/c++/libhdfs/hdfs.c
  18. 1 1
      src/c++/libhdfs/hdfsJniHelper.c
  19. 2 0
      src/c++/libhdfs/hdfsJniHelper.h
  20. 0 2
      src/c++/pipes/api/hadoop/Pipes.hh
  21. 0 1
      src/c++/pipes/impl/HadoopPipes.cc
  22. 0 1
      src/c++/utils/api/hadoop/SerialUtils.hh
  23. 0 1
      src/c++/utils/impl/SerialUtils.cc
  24. 0 2
      src/c++/utils/impl/StringUtils.cc
  25. 3 4
      src/contrib/build-contrib.xml
  26. 3 2
      src/contrib/data_join/build.xml
  27. 2 2
      src/contrib/eclipse-plugin/build.xml
  28. 2 2
      src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
  29. 2 2
      src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
  30. 2 2
      src/contrib/failmon/build.xml
  31. 45 59
      src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
  32. 0 4
      src/contrib/hdfsproxy/ivy.xml
  33. 1 1
      src/contrib/index/build.xml
  34. 4 2
      src/contrib/streaming/build.xml
  35. 1 7
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
  36. 3 5
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
  37. 2 11
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
  38. 1 2
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
  39. 51 33
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java
  40. 68 64
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
  41. 10 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java
  42. 10 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java
  43. 10 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java
  44. 3 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java
  45. 21 11
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
  46. 27 24
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
  47. 5 3
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java
  48. 28 38
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java
  49. 10 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java
  50. 10 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java
  51. 0 101
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
  52. 20 12
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java
  53. 66 54
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
  54. 1 1
      src/contrib/thriftfs/build.xml
  55. 1 1
      src/contrib/vaidya/build.xml
  56. 0 6
      src/core/core-default.xml
  57. 3 4
      src/core/org/apache/hadoop/conf/Configuration.java
  58. 0 5
      src/core/org/apache/hadoop/fs/FileSystem.java
  59. 3 3
      src/core/org/apache/hadoop/fs/FsShellPermissions.java
  60. 14 4
      src/core/org/apache/hadoop/fs/HarFileSystem.java
  61. 0 1
      src/core/org/apache/hadoop/http/HttpServer.java
  62. 3 1
      src/core/org/apache/hadoop/io/BooleanWritable.java
  63. 3 28
      src/core/org/apache/hadoop/io/WritableComparator.java
  64. 2 2
      src/core/org/apache/hadoop/io/compress/GzipCodec.java
  65. 6 130
      src/core/org/apache/hadoop/io/file/tfile/TFile.java
  66. 5 9
      src/core/org/apache/hadoop/ipc/Client.java
  67. 2 6
      src/core/org/apache/hadoop/ipc/Server.java
  68. 3 199
      src/docs/releasenotes.html
  69. 2 4
      src/examples/org/apache/hadoop/examples/PiEstimator.java
  70. 30 71
      src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
  71. 0 92
      src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
  72. 4 93
      src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java
  73. 47 60
      src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  74. 140 196
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  75. 26 68
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  76. 33 157
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  77. 5 12
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  78. 0 4
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  79. 0 9
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  80. 19 13
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
  81. 20 3
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  82. 0 28
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
  83. 6 10
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  84. 3 0
      src/mapred/org/apache/hadoop/mapred/Counters.java
  85. 0 2
      src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java
  86. 0 2
      src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java
  87. 3 0
      src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
  88. 3 0
      src/mapred/org/apache/hadoop/mapred/FileSplit.java
  89. 1 0
      src/mapred/org/apache/hadoop/mapred/ID.java
  90. 2 0
      src/mapred/org/apache/hadoop/mapred/InputFormat.java
  91. 2 0
      src/mapred/org/apache/hadoop/mapred/InputSplit.java
  92. 0 2
      src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java
  93. 0 2
      src/mapred/org/apache/hadoop/mapred/InvalidInputException.java
  94. 0 2
      src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java
  95. 45 75
      src/mapred/org/apache/hadoop/mapred/JobConf.java
  96. 1 0
      src/mapred/org/apache/hadoop/mapred/JobConfigurable.java
  97. 4 0
      src/mapred/org/apache/hadoop/mapred/JobContext.java
  98. 1 3
      src/mapred/org/apache/hadoop/mapred/JobHistory.java
  99. 1 0
      src/mapred/org/apache/hadoop/mapred/JobID.java
  100. 23 36
      src/mapred/org/apache/hadoop/mapred/JobTracker.java

+ 0 - 1
.eclipse.templates/.classpath

@@ -29,7 +29,6 @@
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/jets3t-0.6.1.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/junit-3.8.1.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/log4j-1.2.15.jar"/>
-	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/mockito-all-1.8.0.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/oro-2.0.8.jar"/>
   	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/jetty-6.1.14.jar"/>
   	<classpathentry kind="lib" path="build/ivy/lib/Hadoop/common/jetty-util-6.1.14.jar"/>

+ 0 - 219
CHANGES.txt

@@ -1,224 +1,5 @@
 Hadoop Change Log
 
-Release 0.20.4 - Unreleased
-
-  NEW FEATURES
-
-  BUG FIXES
-
-  IMPROVEMENTS
-
-    MAPREDUCE-1734. Un-deprecate the old MapReduce API in the 0.20 branch.
-    (todd)
-
-Release 0.20.3 - 2011-1-5
-
-  NEW FEATURES
-
-    HADOOP-6637. Benchmark for establishing RPC session. (shv)
-
-    HADOOP-6382. Add support for publishing Hadoop jars to Apache Maven
-    repository. (Giridharan Kesavan via cdouglas)
-
-  BUG FIXES
-
-    MAPREDUCE-1280. Update Eclipse plugin to the new eclipse.jdt API.
-    (Alex Kozlov via szetszwo)
-
-    HADOOP-6760. WebServer shouldn't increase port number in case of negative
-    port setting caused by Jetty's race (cos)
-
-    MAPREDUCE-1522. FileInputFormat may use the default FileSystem for the
-    input path. (Tsz Wo (Nicholas), SZE via cdouglas)
-
-    HDFS-1024 SecondaryNamenode fails to checkpoint because namenode fails
-    with CancelledKeyException (Dmytro Molkov via Stack)
-
-    HDFS-955. New implementation of saveNamespace() to avoid loss of edits 
-    when name-node fails during saving. (shv)
-
-    HDFS-1041. DFSClient.getFileChecksum(..) should retry if connection to
-    the first datanode fails.  (szetszwo)
-
-    HDFS-909. Wait until edits syncing is finishes before purging edits.
-    (Todd Lipcon via shv)
-
-    MAPREDUCE-1372. ConcurrentModificationException in JobInProgress.
-    (Dick King and Amareshwari Sriramadasu via tomwhite)
-
-    MAPREDUCE-118. Fix Job.getJobID(). (Amareshwari Sriramadasu via sharad)
-
-    MAPREDUCE-1880. Fix BigDecimal.divide(..) in the pi example.  (szetszwo)
-
-    HDFS-1258. Clearing namespace quota on "/" corrupts fs image.  
-    (Aaron T. Myers via szetszwo)
-
-    HDFS-132. Fix namenode to not report files deleted metrics for deletions
-    done while replaying edits during startup. (suresh & shv)
-
-    HADOOP-6881. Make WritableComparator intialize classes when
-    looking for their raw comparator, as classes often register raw
-    comparators in initializers, which are no longer automatically run
-    in Java 6 when a class is referenced. (cutting via omalley)
-
-    HADOOP-6833. IPC leaks call parameters when exceptions thrown.
-    (Todd Lipcon via Eli Collins)
-
-    HADOOP-6928. Fix BooleanWritable comparator in 0.20.
-    (Owen O'Malley and Johannes Zillmann via Eli Collins)
-
-    HDFS-1404. TestNodeCount logic incorrect in branch-0.20.
-    (Todd Lipcon via Eli Collins)
-
-    HADOOP-6724. IPC doesn't properly handle IOEs thrown by socket factory.
-    (Todd Lipcon via Eli Collins)
-
-    HDFS-1240. TestDFSShell failing in branch-20.
-    (Todd Lipcon via Eli Collins)
-
-    HDFS-727. bug setting block size hdfsOpenFile (Eli Collins via cos)
-
-    HDFS-908. TestDistributedFileSystem fails with Wrong FS on weird hosts. 
-    (Todd Lipcon via eli)
-
-    HDFS-1377. Quota bug for partial blocks allows quotas to be violated. (eli)
-
-    HDFS-1406. TestCLI fails on Ubuntu with default /etc/hosts. (cos)
-
-  IMPROVEMENTS
-
-    MAPREDUCE-1407. Update javadoc in mapreduce.{Mapper,Reducer} to match
-    actual usage. (Benoit Sigoure via cdouglas)
-
-    MAPREDUCE-1832. Allow file sizes less than 1MB in DFSIO benchmark. (shv)
-
-Release 0.20.2 - 2010-2-19
-
-  NEW FEATURES
-
-    HADOOP-6218. Adds a feature where TFile can be split by Record
-    Sequence number. (Hong Tang and Raghu Angadi via ddas)
-
-  BUG FIXES
-
-    MAPREDUCE-112. Add counters for reduce input, output records to the new API.
-    (Jothi Padmanabhan via cdouglas)
-
-    HADOOP-6231.  Allow caching of filesystem instances to be disabled on a
-    per-instance basis (Tom White and Ben Slusky via mahadev)
-
-    MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even
-    if the job fails with exception (koji via mahadev)
-
-    MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return
-    values of new configuration variables when deprecated variables are
-    disabled. (Sreekanth Ramakrishnan via yhemanth)
-
-    HDFS-686. NullPointerException is thrown while merging edit log and image.
-    (hairong)
-
-    HDFS-677. Rename failure when both source and destination quota exceeds
-    results in deletion of source. (suresh)
-
-    HDFS-709. Fix TestDFSShell failure due to rename bug introduced by 
-    HDFS-677. (suresh)
-
-    HDFS-579. Fix DfsTask to follow the semantics of 0.19, regarding non-zero
-    return values as failures. (Christian Kunz via cdouglas)
-
-    MAPREDUCE-1070. Prevent a deadlock in the fair scheduler servlet.
-    (Todd Lipcon via cdouglas)
-
-    HADOOP-5759. Fix for  IllegalArgumentException when CombineFileInputFormat
-    is used as job InputFormat. (Amareshwari Sriramadasu via zshao)
-
-    HADOOP-6097. Fix Path conversion in makeQualified and reset LineReader byte
-    count at the start of each block in Hadoop archives. (Ben Slusky, Tom
-    White, and Mahadev Konar via cdouglas)
-
-    HDFS-723. Fix deadlock in DFSClient#DFSOutputStream. (hairong)
-
-    HDFS-732. DFSClient.DFSOutputStream.close() should throw an exception if
-    the stream cannot be closed successfully.  (szetszwo)
-
-    MAPREDUCE-1163. Remove unused, hard-coded paths from libhdfs. (Allen
-    Wittenauer via cdouglas)
-
-    HDFS-761. Fix failure to process rename operation from edits log due to 
-    quota verification. (suresh)
-
-    MAPREDUCE-623. Resolve javac warnings in mapreduce. (Jothi Padmanabhan
-    via sharad)
-
-    HADOOP-6575. Remove call to fault injection tests not present in 0.20.
-    (cdouglas)
-
-    HADOOP-6576. Fix streaming test failures on 0.20. (Todd Lipcon via cdouglas)
-
-  IMPROVEMENTS
-
-    HADOOP-5611. Fix C++ libraries to build on Debian Lenny. (Todd Lipcon
-    via tomwhite)
-
-    MAPREDUCE-1068. Fix streaming job to show proper message if file is 
-    is not present. (Amareshwari Sriramadasu via sharad)
-
-    HDFS-596. Fix memory leak in hdfsFreeFileInfo() for libhdfs.
-    (Zhang Bingjun via dhruba)
-
-    MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via
-    cdouglas)
-
-    HADOOP-6269. Fix threading issue with defaultResource in Configuration.
-    (Sreekanth Ramakrishnan via cdouglas)
-
-    MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the
-    configured threshold. (cdouglas)
-
-    HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress:
-    IllegalArgumentException is thrown. (cos)
-
-    HDFS-185. Disallow chown, chgrp, chmod, setQuota, and setSpaceQuota when
-    name-node is in safemode. (Ravi Phulari via shv)
-
-    HADOOP-6428. HttpServer sleeps with negative values (cos)
-
-    HADOOP-5623. Fixes a problem to do with status messages getting overwritten
-    in streaming jobs. (Rick Cox and Jothi Padmanabhan via tomwhite)
-
-    HADOOP-6315. Avoid incorrect use of BuiltInflater/BuiltInDeflater in
-    GzipCodec. (Aaron Kimball via cdouglas)
-
-    HDFS-187. Initialize secondary namenode http address in TestStartup.
-    (Todd Lipcon via szetszwo)
-
-    MAPREDUCE-433. Use more reliable counters in TestReduceFetch. (cdouglas)
-
-    HDFS-792. DFSClient 0.20.1 is incompatible with HDFS 0.20.2.
-    (Tod Lipcon via hairong)
-
-    HADOOP-6498. IPC client bug may cause rpc call hang. (Ruyue Ma and
-    hairong via hairong)
-
-    HADOOP-6596. Failing tests prevent the rest of test targets from
-    execution. (cos)
-
-    HADOOP-6524. Contrib tests are failing Clover'ed build. (cos)
-
-    HDFS-919. Create test to validate the BlocksVerified metric (Gary Murry
-    via cos)
-
-    HDFS-907. Add tests for getBlockLocations and totalLoad metrics.
-    (Ravi Phulari via cos)
-
-    MAPREDUCE-1251. c++ utils doesn't compile. (Eli Collins via tomwhite)
-
-    HADOOP-5612. Some c++ scripts are not chmodded before ant execution.
-    (Todd Lipcon via tomwhite)
-
-    HADOOP-1849. Add undocumented configuration parameter for per handler 
-    call queue size in IPC Server. (shv)
-
 Release 0.20.1 - 2009-09-01
 
   INCOMPATIBLE CHANGES

+ 3 - 3
bin/hadoop

@@ -134,7 +134,7 @@ IFS=
 if [ -d "$HADOOP_HOME/webapps" ]; then
   CLASSPATH=${CLASSPATH}:$HADOOP_HOME
 fi
-for f in $HADOOP_HOME/hadoop-core-*.jar; do
+for f in $HADOOP_HOME/hadoop-*-core.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
@@ -153,10 +153,10 @@ for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-for f in $HADOOP_HOME/hadoop-tools-*.jar; do
+for f in $HADOOP_HOME/hadoop-*-tools.jar; do
   TOOL_PATH=${TOOL_PATH}:$f;
 done
-for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do
+for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do
   TOOL_PATH=${TOOL_PATH}:$f;
 done
 

+ 1 - 1
bin/rcc

@@ -72,7 +72,7 @@ IFS=
 if [ -d "$HADOOP_HOME/webapps" ]; then
   CLASSPATH=${CLASSPATH}:$HADOOP_HOME
 fi
-for f in $HADOOP_HOME/hadoop-core-*.jar; do
+for f in $HADOOP_HOME/hadoop-*-core.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 

+ 98 - 286
build.xml

@@ -18,7 +18,6 @@
 -->
 
 <project name="Hadoop" default="compile" 
-   xmlns:artifact="urn:maven-artifact-ant"
    xmlns:ivy="antlib:org.apache.ivy.ant"> 
 
   <!-- Load all the default properties, and any the user wants    -->
@@ -28,17 +27,9 @@
  
   <property name="Name" value="Hadoop"/>
   <property name="name" value="hadoop"/>
-  <property name="version" value="0.20.4-SNAPSHOT"/>
+  <property name="version" value="0.20.2-dev"/>
   <property name="final.name" value="${name}-${version}"/>
   <property name="year" value="2009"/>
-  
-
-  <property name="core.final.name" value="${name}-core-${version}"/>
-  <property name="test.final.name" value="${name}-test-${version}"/>
-  <property name="examples.final.name" value="${name}-examples-${version}"/>
-  <property name="tools.final.name" value="${name}-tools-${version}"/>
-  <property name="ant.final.name" value="${name}-ant-${version}"/>
-  <property name="streaming.final.name" value="${name}-streaming-${version}"/>
 
   <property name="src.dir" value="${basedir}/src"/>  	
   <property name="core.src.dir" value="${src.dir}/core"/>
@@ -159,65 +150,29 @@
   <!-- IVY properteis set here -->
   <property name="ivy.dir" location="ivy" />
   <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
-  <property name="mvnrepo" value="http://repo2.maven.org/maven2"/>
-  <property name="asfrepo" value="https://repository.apache.org"/> 
   <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/>
-  <property name="ivy_repo_url" 
-    value="${mvnrepo}/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/>
-  <property name="ant_task.jar" 
-    location="${ivy.dir}/maven-ant-tasks-${ant-task.version}.jar"/>
-  <property name="tsk.org" value="/org/apache/maven/maven-ant-tasks/"/>
-  <property name="ant_task_repo_url"
-    value="${mvnrepo}${tsk.org}${ant-task.version}/maven-ant-tasks-${ant-task.version}.jar"/>
-  <property name="repo" value="snapshots"/>
-  <property name="asfsnapshotrepo" 
-    value="${asfrepo}/content/repositories/snapshots"/> 
-  <property name="asfstagingrepo"
-    value="${asfrepo}/service/local/staging/deploy/maven2"/> 
-  <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml"/>
+  <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/>
+  <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml" />
   <property name="ivy.org" value="org.apache.hadoop"/>
   <property name="build.dir" location="build" />
   <property name="dist.dir" value="${build.dir}/${final.name}"/>
   <property name="build.ivy.dir" location="${build.dir}/ivy" />
-  <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib"/>
-  <property name="common.ivy.lib.dir" 
-    location="${build.ivy.lib.dir}/${ant.project.name}/common"/>
-  <property name="build.ivy.report.dir" location="${build.ivy.dir}/report"/>
-
-  <property name="hadoop-core.pom" location="${ivy.dir}/hadoop-core-pom.xml"/>
-  <property name="hadoop-core-pom-template.xml" 
-    location="${ivy.dir}/hadoop-core-pom-template.xml"/>
-  <property name="hadoop-core.jar" location="${build.dir}/${core.final.name}.jar"/>
-  <property name="hadoop-test.pom" location="${ivy.dir}/hadoop-test-pom.xml"/>
-  <property name="hadoop-test-pom-template.xml" 
-    location="${ivy.dir}/hadoop-test-pom-template.xml" />
-  <property name="hadoop-test.jar" location="${build.dir}/${test.final.name}.jar"/>
-  <property name="hadoop-tools.pom" location="${ivy.dir}/hadoop-tools-pom.xml"/>
-  <property name="hadoop-tools-pom-template.xml" 
-    location="${ivy.dir}/hadoop-tools-pom-template.xml" />
-  <property name="hadoop-tools.jar" location="${build.dir}/${tools.final.name}.jar"/>
-  <property name="hadoop-examples.pom" location="${ivy.dir}/hadoop-examples-pom.xml"/>
-  <property name="hadoop-examples-pom-template.xml" 
-    location="${ivy.dir}/hadoop-examples-pom-template.xml"/>
-  <property name="hadoop-examples.jar" 
-    location="${build.dir}/${examples.final.name}.jar"/>
-  <property name="hadoop-streaming.pom" 
-    location="${ivy.dir}/hadoop-streaming-pom.xml"/>
-  <property name="hadoop-streaming-pom-template.xml" 
-    location="${ivy.dir}/hadoop-streaming-pom-template.xml"/>
-  <property name="hadoop-streaming.jar" 
-    location="${build.dir}/contrib/streaming/${streaming.final.name}.jar"/>
-   
+  <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
+  <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/>
+  <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
+  <property name="build.ivy.maven.dir" location="${build.ivy.dir}/maven" />
+  <property name="build.ivy.maven.pom" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.pom" />
+  <property name="build.ivy.maven.jar" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.jar" />
+
   <!--this is the naming policy for artifacts we want pulled down-->
-  <property name="ivy.artifact.retrieve.pattern" 
-    value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
+  <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
 
   <!--this is how artifacts that get built are named-->
   <property name="ivy.publish.pattern" value="hadoop-[revision]-core.[ext]"/>
+  <property name="hadoop.jar" location="${build.dir}/hadoop-${hadoop.version}-core.jar" />
 
   <!-- jdiff.home property set -->
-  <property name="jdiff.home" 
-    value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/>
+  <property name="jdiff.home" value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/>
   <property name="jdiff.jar" value="${jdiff.home}/jdiff-${jdiff.version}.jar"/>
   <property name="xerces.jar" value="${jdiff.home}/xerces-${xerces.version}.jar"/>
 
@@ -232,10 +187,6 @@
     </and>
   </condition>
 
-  <condition property="staging">
-     <equals arg1="${repo}" arg2="staging"/>
-  </condition>
-
   <!-- the normal classpath -->
   <path id="classpath">
     <pathelement location="${build.classes}"/>
@@ -257,8 +208,8 @@
     <pathelement location="${build.tools}"/>
     <pathelement path="${clover.jar}"/>
     <fileset dir="${test.lib.dir}">
-      <include name="**/*.jar"/>
-      <exclude name="**/excluded/"/>
+      <include name="**/*.jar" />
+      <exclude name="**/excluded/" />
     </fileset>
     <path refid="classpath"/>
   </path>
@@ -271,6 +222,9 @@
     <pathelement location="${build.dir}"/>
   </path>
 
+  <!-- properties dependent on the items defined above. -->
+  <!--<available classname="${rat.reporting.classname}" classpathref="classpath" property="rat.present" value="true"/> -->
+
   <!-- ====================================================== -->
   <!-- Macro definitions                                      -->
   <!-- ====================================================== -->
@@ -578,7 +532,7 @@
     <tar compression="gzip" destfile="${build.classes}/bin.tgz">
       <tarfileset dir="bin" mode="755"/>
     </tar>
-    <jar jarfile="${build.dir}/${core.final.name}.jar"
+    <jar jarfile="${build.dir}/${final.name}-core.jar"
          basedir="${build.classes}">
       <manifest>
         <section name="org/apache/hadoop">
@@ -600,7 +554,7 @@
   <!--                                                                    -->
   <!-- ================================================================== -->
   <target name="examples" depends="jar, compile-examples" description="Make the Hadoop examples jar.">
-    <jar jarfile="${build.dir}/${examples.final.name}.jar"
+    <jar jarfile="${build.dir}/${final.name}-examples.jar"
          basedir="${build.examples}">
       <manifest>
         <attribute name="Main-Class" 
@@ -611,7 +565,7 @@
 
   <target name="tools-jar" depends="jar, compile-tools" 
           description="Make the Hadoop tools jar.">
-    <jar jarfile="${build.dir}/${tools.final.name}.jar"
+    <jar jarfile="${build.dir}/${final.name}-tools.jar"
          basedir="${build.tools}">
       <manifest>
         <attribute name="Main-Class" 
@@ -731,7 +685,7 @@
   <!--                                                                    -->
   <!-- ================================================================== -->
   <target name="jar-test" depends="compile-core-test" description="Make hadoop-test.jar">
-    <jar jarfile="${build.dir}/${test.final.name}.jar"
+    <jar jarfile="${build.dir}/${final.name}-test.jar"
          basedir="${test.build.classes}">
          <manifest>
            <attribute name="Main-Class"
@@ -750,7 +704,6 @@
   <!-- ================================================================== -->
   <target name="test-core" depends="jar-test" description="Run core unit tests">
 
-    <delete file="${test.build.dir}/testsfailed"/>
     <delete dir="${test.build.data}"/>
     <mkdir dir="${test.build.data}"/>
     <delete dir="${test.log.dir}"/>
@@ -794,30 +747,18 @@
         <fileset dir="${test.src.dir}" includes="**/${testcase}.java"/>
       </batchtest>
     </junit>
-    <antcall target="checkfailure"/>
+    <fail if="tests.failed">Tests failed!</fail>
   </target>   
 
-  <target name="checkfailure" if="tests.failed">
-    <touch file="${test.build.dir}/testsfailed"/>
-    <fail unless="continueOnFailure">Tests failed!</fail>
-  </target>
-
   <target name="test-contrib" depends="compile, compile-core-test" description="Run contrib unit tests">
     <subant target="test">
        <property name="version" value="${version}"/>
-       <property name="clover.jar" value="${clover.jar}"/>
        <fileset file="${contrib.dir}/build.xml"/>
     </subant> 
   </target>
 	  
-  <target name="test" description="Run core, contrib tests">
-    <delete file="${test.build.dir}/testsfailed"/>
-    <property name="continueOnFailure" value="true"/>
-    <antcall target="test-core"/>
-    <antcall target="test-contrib"/>
-    <available file="${test.build.dir}/testsfailed" property="testsfailed"/>
-    <fail if="testsfailed">Tests failed!</fail>
-  </target>  
+  <target name="test" depends="test-core, test-contrib" description="Run core, contrib unit tests">
+  </target>
 
   <!-- Run all unit tests, not just Test*, and use non-test configuration. -->
   <target name="test-cluster" description="Run all unit tests, not just Test*, and use non-test configuration.">
@@ -887,10 +828,10 @@
       <sourcePath path="${examples.dir}" />
       <sourcePath path="${tools.src}" />
       <sourcePath path="${basedir}/src/contrib/streaming/src/java" />
-      <class location="${build.dir}/${core.final.name}.jar" />
-      <class location="${build.dir}/${examples.final.name}.jar" />
-      <class location="${build.dir}/${tools.final.name}.jar" />
-      <class location="${build.dir}/contrib/streaming/${streaming.final.name}.jar" />
+      <class location="${basedir}/build/${final.name}-core.jar" />
+      <class location="${basedir}/build/${final.name}-examples.jar" />
+      <class location="${basedir}/build/${final.name}-tools.jar" />
+      <class location="${basedir}/build/contrib/streaming/${final.name}-streaming.jar" />
     </findbugs>
 
         <xslt style="${findbugs.home}/src/xsl/default.xsl"
@@ -1142,7 +1083,7 @@
     </copy>
 
     <copy todir="${dist.dir}"> 
-      <fileset file="${build.dir}/${name}-*-${version}.jar"/>
+      <fileset file="${build.dir}/${final.name}-*.jar"/>
     </copy>
     
     <copy todir="${dist.dir}/bin">
@@ -1252,7 +1193,7 @@
     </copy>
 
     <copy todir="${dist.dir}"> 
-      <fileset file="${build.dir}/${name}-*-${version}.jar"/>
+      <fileset file="${build.dir}/${final.name}-*.jar"/>
     </copy>
     
     <copy todir="${dist.dir}/bin">
@@ -1316,29 +1257,12 @@
   <!-- ================================================================== -->
   <!-- Clean.  Delete the build files, and their directories              -->
   <!-- ================================================================== -->
-  <target name="clean" depends="clean-contrib, clean-sign" description="Clean.  Delete the build files, and their directories">
+  <target name="clean" depends="clean-contrib" description="Clean.  Delete the build files, and their directories">
     <delete dir="${build.dir}"/>
     <delete dir="${docs.src}/build"/>
     <delete dir="${src.docs.cn}/build"/>
-    <delete file="${basedir}/ivy/hadoop-core-pom.xml"/>
-    <delete file="${basedir}/ivy/hadoop-test-pom.xml"/>
-    <delete file="${basedir}/ivy/hadoop-examples-pom.xml"/>
-    <delete file="${basedir}/ivy/hadoop-tools-pom.xml"/>
-    <delete file="${basedir}/ivy/hadoop-streaming-pom.xml"/>
-  </target>
-
-  <target name="clean-sign" description="Clean.  Delete .asc files">
-    <delete>
-      <fileset dir="." includes="**/**/*.asc"/>
-    </delete>
-  </target>  
- 
-  <target name="veryclean" depends="clean" description="Delete mvn ant task jar and ivy ant taks jar">
-    <delete file="${ant_task.jar}"/>
-    <delete file="${ivy.jar}"/>
   </target>
 
-
   <!-- ================================================================== -->
   <!-- Clean contrib target. For now, must be called explicitly           -->
   <!-- Using subant instead of ant as a workaround for 30569              -->
@@ -1473,7 +1397,6 @@
   <target name="create-c++-utils-makefile" depends="check-c++-makefiles" 
                                            if="need.c++.utils.makefile">
     <mkdir dir="${build.c++.utils}"/>
-    <chmod file="${c++.utils.src}/configure" perm="ugo+x"/>
     <exec executable="${c++.utils.src}/configure" dir="${build.c++.utils}"
           failonerror="yes">
       <arg value="--prefix=${install.c++}"/>
@@ -1491,7 +1414,6 @@
   <target name="create-c++-pipes-makefile" depends="check-c++-makefiles" 
                                            if="need.c++.pipes.makefile">
     <mkdir dir="${build.c++.pipes}"/>
-    <chmod file="${c++.pipes.src}/configure" perm="ugo+x"/>
     <exec executable="${c++.pipes.src}/configure" dir="${build.c++.pipes}"
           failonerror="yes">
       <arg value="--prefix=${install.c++}"/>
@@ -1514,7 +1436,6 @@
           depends="check-c++-makefiles" 
           if="need.c++.examples.pipes.makefile">
     <mkdir dir="${build.c++.examples.pipes}"/>
-    <chmod file="${c++.examples.pipes.src}/configure" perm="ugo+x"/>
     <exec executable="${c++.examples.pipes.src}/configure" 
           dir="${build.c++.examples.pipes}"
           failonerror="yes">
@@ -1566,7 +1487,7 @@
   <target name="ant-tasks" depends="jar, compile-ant-tasks">
     <copy file="${anttasks.dir}/org/apache/hadoop/ant/antlib.xml"
           todir="${build.anttasks}/org/apache/hadoop/ant"/>
-    <jar destfile="${build.dir}/${ant.final.name}.jar">
+    <jar destfile="${build.dir}/${final.name}-ant.jar">
       <fileset dir="${build.anttasks}"/>
     </jar>
   </target>
@@ -1680,6 +1601,7 @@
     <mkdir dir="${build.ivy.dir}" />
     <mkdir dir="${build.ivy.lib.dir}" />
     <mkdir dir="${build.ivy.report.dir}" />
+    <mkdir dir="${build.ivy.maven.dir}" />
   </target>
 
   <target name="ivy-probe-antlib" >
@@ -1809,180 +1731,70 @@
     </echo>
   </target>
 
-  <target name="ant-task-download" description="To download mvn-ant-task">
-    <get src="${ant_task_repo_url}" dest="${ant_task.jar}" usetimestamp="true"/>
-  </target>
-
-  <target name="mvn-taskdef" depends="ant-task-download">
-     <path id="mvn-ant-task.classpath" path="${ant_task.jar}"/>
-     <typedef resource="org/apache/maven/artifact/ant/antlib.xml"
-         uri="urn:maven-artifact-ant"
-         classpathref="mvn-ant-task.classpath"/>
-  </target>  
-
-  <target name="mvn-install" depends="mvn-taskdef,bin-package,set-version"
-     description="To install hadoop core and test jars to local filesystem's m2 cache">
-     <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/>
-     <artifact:pom file="${hadoop-test.pom}" id="hadoop.test"/>
-     <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/>
-     <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/>
-     <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/>
-
-     <artifact:install file="${hadoop-core.jar}">
-        <pom refid="hadoop.core"/>
-     </artifact:install>
-     <artifact:install file="${hadoop-test.jar}">
-        <pom refid="hadoop.test"/>
-     </artifact:install>
-     <artifact:install file="${hadoop-tools.jar}">
-        <pom refid="hadoop.tools"/>
-     </artifact:install>
-     <artifact:install file="${hadoop-examples.jar}">
-        <pom refid="hadoop.examples"/>
-     </artifact:install>
-     <artifact:install file="${hadoop-streaming.jar}">
-        <pom refid="hadoop.streaming"/>
-     </artifact:install>
-  </target>
-
-  <target name="mvn-deploy" depends="mvn-taskdef, bin-package, set-version, signanddeploy, simpledeploy"
-     description="To deploy hadoop core and test jar's to apache maven repository"/>
-
-  <target name="signanddeploy" if="staging" depends="sign">
-     <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/>
-     <artifact:pom file="${hadoop-test.pom}" id="hadoop.core.test"/>
-     <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/>
-     <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/>
-     <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/>
-     <artifact:install-provider artifactId="wagon-http"
-       version="${wagon-http.version}"/>
-     <artifact:deploy file="${hadoop-core.jar}">
-         <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/>
-         <pom refid="hadoop.core"/>
-         <attach file="${hadoop-core.jar}.asc" type="jar.asc"/>
-         <attach file="${hadoop-core.pom}.asc" type="pom.asc"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-test.jar}">
-         <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> 
-         <pom refid="hadoop.core.test"/>
-         <attach file="${hadoop-test.jar}.asc" type="jar.asc"/>
-         <attach file="${hadoop-test.pom}.asc" type="pom.asc"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-tools.jar}">
-         <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> 
-         <pom refid="hadoop.tools"/>
-         <attach file="${hadoop-tools.jar}.asc" type="jar.asc"/>
-         <attach file="${hadoop-tools.pom}.asc" type="pom.asc"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-examples.jar}">
-         <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> 
-         <pom refid="hadoop.examples"/>
-         <attach file="${hadoop-examples.jar}.asc" type="jar.asc"/>
-         <attach file="${hadoop-examples.pom}.asc" type="pom.asc"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-streaming.jar}">
-         <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> 
-         <pom refid="hadoop.streaming"/>
-         <attach file="${hadoop-streaming.jar}.asc" type="jar.asc"/>
-         <attach file="${hadoop-streaming.pom}.asc" type="pom.asc"/>
-     </artifact:deploy>
-  </target>
-
-  <target name="sign" depends="clean-sign" if="staging">
-    <input message="password:>" addproperty="gpg.passphrase">
-     <handler classname="org.apache.tools.ant.input.SecureInputHandler" />
-    </input>
-    <macrodef name="sign-artifact" description="Signs the artifact">
-      <attribute name="input.file"/>
-      <attribute name="output.file" default="@{input.file}.asc"/>
-      <attribute name="gpg.passphrase"/>
-      <sequential>
-        <echo>Signing @{input.file} Sig File: @{output.file}</echo>
-        <exec executable="gpg" >
-          <arg value="--armor"/>
-          <arg value="--output"/>
-          <arg value="@{output.file}"/>
-          <arg value="--passphrase"/>
-          <arg value="@{gpg.passphrase}"/>
-          <arg value="--detach-sig"/>
-          <arg value="@{input.file}"/>
-        </exec>
-      </sequential>
-    </macrodef>
-    <sign-artifact input.file="${hadoop-core.jar}" 
-     output.file="${hadoop-core.jar}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-test.jar}" 
-     output.file="${hadoop-test.jar}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-tools.jar}" 
-     output.file="${hadoop-tools.jar}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-examples.jar}" 
-     output.file="${hadoop-examples.jar}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-streaming.jar}" 
-     output.file="${hadoop-streaming.jar}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-core.pom}" 
-     output.file="${hadoop-core.pom}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-test.pom}" 
-     output.file="${hadoop-test.pom}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-tools.pom}" 
-     output.file="${hadoop-tools.pom}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-examples.pom}" 
-     output.file="${hadoop-examples.pom}.asc" gpg.passphrase="${gpg.passphrase}"/>
-    <sign-artifact input.file="${hadoop-streaming.pom}" 
-     output.file="${hadoop-streaming.pom}.asc" gpg.passphrase="${gpg.passphrase}"/>
-  </target>
-
-  <target name="simpledeploy" unless="staging">
-     <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/>
-     <artifact:pom file="${hadoop-test.pom}" id="hadoop.test"/>
-     <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/>
-     <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/>
-     <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/>
-
-     <artifact:install-provider artifactId="wagon-http" version="${wagon-http.version}"/>
-     <artifact:deploy file="${hadoop-core.jar}">
-         <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/>
-         <pom refid="hadoop.core"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-test.jar}">
-         <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/>
-         <pom refid="hadoop.test"/>
-     </artifact:deploy> 
-     <artifact:deploy file="${hadoop-examples.jar}">
-         <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/>
-         <pom refid="hadoop.examples"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-tools.jar}">
-         <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/>
-         <pom refid="hadoop.tools"/>
-     </artifact:deploy>
-     <artifact:deploy file="${hadoop-streaming.jar}">
-         <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/>
-         <pom refid="hadoop.streaming"/>
-     </artifact:deploy>
-  </target>
-
-  <target name="set-version">
-    <delete file="${hadoop-core.pom}"/>
-    <delete file="${hadoop-test.pom}"/>
-    <delete file="${hadoop-examples.pom}"/>
-    <delete file="${hadoop-tools.pom}"/>
-    <delete file="${hadoop-streaming.pom}"/>
-    <copy file="${hadoop-core-pom-template.xml}" tofile="${hadoop-core.pom}"/>
-    <copy file="${hadoop-test-pom-template.xml}" tofile="${hadoop-test.pom}"/>
-    <copy file="${hadoop-examples-pom-template.xml}" tofile="${hadoop-examples.pom}"/>
-    <copy file="${hadoop-tools-pom-template.xml}" tofile="${hadoop-tools.pom}"/>
-    <copy file="${hadoop-streaming-pom-template.xml}" tofile="${hadoop-streaming.pom}"/>
-    <replaceregexp byline="true">
-      <regexp pattern="@version"/>
-      <substitution expression="${version}"/>
-      <fileset dir="${basedir}/ivy">
-        <include name="hadoop-core-pom.xml"/>
-        <include name="hadoop-test-pom.xml"/>
-        <include name="hadoop-tools-pom.xml"/>
-        <include name="hadoop-examples-pom.xml"/>
-        <include name="hadoop-streaming-pom.xml"/>
-      </fileset>
-    </replaceregexp>
+  <target name="assert-hadoop-jar-exists" depends="ivy-init">
+    <fail>
+      <condition >
+        <not>
+          <available file="${hadoop.jar}" />
+        </not>
+      </condition>
+      Not found: ${hadoop.jar}
+      Please run the target "jar" in the main build file
+    </fail>
+
+  </target>
+
+  <target name="ready-to-publish" depends="jar,assert-hadoop-jar-exists,ivy-resolve"/>
+
+  <target name="ivy-publish-local" depends="ready-to-publish,ivy-resolve">
+    <ivy:publish
+      settingsRef="${ant.project.name}.ivy.settings"
+      resolver="local"
+      pubrevision="${hadoop.version}"
+      overwrite="true"
+      artifactspattern="${build.dir}/${ivy.publish.pattern}" />
+  </target>
+
+
+  <!-- this is here for curiosity, to see how well the makepom task works
+  Answer: it depends whether you want transitive dependencies excluded or not
+  -->
+  <target name="makepom" depends="ivy-resolve">
+    <ivy:makepom settingsRef="${ant.project.name}.ivy.settings"
+      ivyfile="ivy.xml"
+      pomfile="${build.ivy.maven.dir}/generated.pom">
+      <ivy:mapping conf="default" scope="default"/>
+      <ivy:mapping conf="master" scope="master"/>
+      <ivy:mapping conf="runtime" scope="runtime"/>
+    </ivy:makepom>
+  </target>
+
+
+  <target name="copy-jar-to-maven" depends="ready-to-publish">
+    <copy file="${hadoop.jar}"
+      tofile="${build.ivy.maven.jar}"/>
+    <checksum file="${build.ivy.maven.jar}" algorithm="md5"/>
+  </target>
+
+  <target name="copypom" depends="ivy-init-dirs">
+
+   <presetdef name="expandingcopy" >
+    <copy overwrite="true">
+      <filterchain>
+        <expandproperties/>
+      </filterchain>
+    </copy>
+   </presetdef>
+
+   <expandingcopy file="ivy/hadoop-core.pom"
+      tofile="${build.ivy.maven.pom}"/>
+   <checksum file="${build.ivy.maven.pom}" algorithm="md5"/>
+  </target>
+
+  <target name="maven-artifacts" depends="copy-jar-to-maven,copypom" />
+
+  <target name="published" depends="ivy-publish-local,maven-artifacts">
+
   </target>
 
 </project>

+ 1 - 5
ivy.xml

@@ -256,10 +256,6 @@
       rev="${slf4j-log4j12.version}"
       conf="common->master">
     </dependency>
-    <dependency org="org.mockito"
-      name="mockito-all"
-      rev="${mockito-all.version}"
-      conf="common->master"/>
-</dependencies>
+    </dependencies>
   
 </ivy-module>

+ 0 - 122
ivy/hadoop-core-pom-template.xml

@@ -1,122 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-core</artifactId>
-  <packaging>jar</packaging>
-  <version>@version</version>
-  <dependencies>
-   <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-      <version>1.2</version>
-    </dependency>
-   <dependency>
-      <groupId>xmlenc</groupId>
-      <artifactId>xmlenc</artifactId>
-      <version>0.52</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-httpclient</groupId>
-      <artifactId>commons-httpclient</artifactId>
-      <version>3.0.1</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
-      <version>1.3</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-net</groupId>
-      <artifactId>commons-net</artifactId>
-      <version>1.4.1</version>
-    </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty</artifactId>
-      <version>6.1.14</version>
-    </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty-util</artifactId>
-      <version>6.1.14</version>
-    </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <version>5.5.12</version>
-    </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-compiler</artifactId>
-      <version>5.5.12</version>
-    </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jsp-api-2.1</artifactId>
-      <version>6.1.14</version>
-    </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jsp-2.1</artifactId>
-      <version>6.1.14</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-el</groupId>
-      <artifactId>commons-el</artifactId>
-      <version>1.0</version>
-    </dependency>
-    <dependency>
-      <groupId>net.java.dev.jets3t</groupId>
-      <artifactId>jets3t</artifactId>
-      <version>0.7.1</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-net</groupId>
-      <artifactId>commons-net</artifactId>
-      <version>1.4.1</version>
-    </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>servlet-api-2.5</artifactId>
-      <version>6.1.14</version>
-    </dependency>
-    <dependency>
-      <groupId>net.sf.kosmosfs</groupId>
-      <artifactId>kfs</artifactId>
-      <version>0.3</version>
-    </dependency>
-    <dependency>
-      <groupId>hsqldb</groupId>
-      <artifactId>hsqldb</artifactId>
-      <version>1.8.0.10</version>
-    </dependency>
-    <dependency>
-      <groupId>oro</groupId>
-      <artifactId>oro</artifactId>
-      <version>2.0.8</version>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jdt</groupId>
-      <artifactId>core</artifactId>
-      <version>3.1.1</version>
-    </dependency>
-  </dependencies>
-</project>

+ 257 - 0
ivy/hadoop-core.pom

@@ -0,0 +1,257 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
+
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-core</artifactId>
+  <packaging>jar</packaging>
+  <version>${hadoop.version}</version>
+  <description>
+    Hadoop is the distributed computing framework of Apache; hadoop-core contains
+    the filesystem, job tracker and map/reduce modules
+  </description>
+  <licenses>
+    <license>
+      <name>Apache License, Version 2.0</name>
+      <url>http://apache.org/licenses/LICENSE-2.0</url>
+    </license>
+  </licenses>
+  <dependencies>
+
+
+    <!-- always include commons-logging and log4J -->
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <version>${commons-logging.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>avalon-framework</groupId>
+          <artifactId>avalon-framework</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>logkit</groupId>
+          <artifactId>logkit</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>${log4j.version}</version>
+      <scope>optional</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.mail</groupId>
+          <artifactId>mail</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.jms</groupId>
+          <artifactId>jms</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jdmk</groupId>
+          <artifactId>jmxtools</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jmx</groupId>
+          <artifactId>jmxri</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!--SLF4J is a JAR-based dependency; this POM binds it to log4J-->
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>${slf4j-api.version}</version>
+      <scope>optional</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <version>${slf4j-log4j12.version}</version>
+      <scope>optional</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!--Httpclient and its components are optional-->
+
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <version>3.1</version>
+      <scope>optional</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <version>1.3</version>
+      <scope>optional</scope>
+    </dependency>
+
+    <!--CLI is needed to scan the command line, but only the 1.0 branch is released -->
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+      <version>2.0-20070823</version>
+      <scope>optional</scope>
+    </dependency>
+
+
+   <!-- this is used for the ftp:// filesystem-->
+    <dependency>
+      <groupId>commons-net</groupId>
+      <artifactId>commons-net</artifactId>
+      <version>1.4.1</version>
+      <scope>optional</scope>
+    </dependency>
+
+    <!-- Jetty is used to serve up the application. It is marked as optional because
+    clients do not need it. All server-side deployments will need
+     all of these files.-->
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+      <version>${servlet-api.version}</version>
+      <scope>optional</scope>
+    </dependency>
+    <dependency>
+      <groupId>jetty</groupId>
+      <artifactId>org.mortbay.jetty</artifactId>
+      <version>${jetty.version}</version>
+      <scope>optional</scope>
+    </dependency>
+
+
+    <!--JSP support -->
+
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jsp-2.1</artifactId>
+      <version>${jetty.version}</version>
+      <scope>optional</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jsp-api-2.1</artifactId>
+      <version>${jetty.version}</version>
+      <scope>optional</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-el</groupId>
+      <artifactId>commons-el</artifactId>
+      <version>${commons-el.version}</version>
+      <scope>optional</scope>
+    </dependency>
+
+
+    <!--JSPC assistance-->
+
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>core</artifactId>
+      <version>${core.version}</version>
+      <scope>optional</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ant</groupId>
+      <artifactId>ant</artifactId>
+      <version>${apacheant.version}</version>
+      <scope>optional</scope>
+    </dependency>
+
+    <!-- JetS3t is a client library for S3.
+    -It is only needed if you want to work with S3 filesystems
+    -It pulls in commons-logging 1.1.1 and does not exclude all the cruft that comes with it.
+    By excluding it we stay in control of versions and dependencies
+    -->
+
+    <dependency>
+      <groupId>net.java.dev.jets3t</groupId>
+      <artifactId>jets3t</artifactId>
+      <version>${jets3t.version}</version>
+      <scope>optional</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!--Kosmos filesystem
+    http://kosmosfs.sourceforge.net/
+    This is not in the central repository
+    -->
+    <!--
+        <dependency>
+          <groupId>org.kosmix</groupId>
+          <artifactId>kfs</artifactId>
+          <version>0.1</version>
+          <scope>optional</scope>
+        </dependency>
+    -->
+
+    <!--
+     http://xmlenc.sourceforge.net/
+     "The xmlenc library is a fast stream-based XML output library for Java."
+    -->
+    <dependency>
+      <groupId>xmlenc</groupId>
+      <artifactId>xmlenc</artifactId>
+      <version>0.52</version>
+      <scope>optional</scope>
+    </dependency>
+  </dependencies>
+</project>

+ 0 - 34
ivy/hadoop-examples-pom-template.xml

@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
- <!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-examples</artifactId>
-  <packaging>jar</packaging>
-  <version>@version</version>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-      <version>@version</version>
-    </dependency>
-  </dependencies>
-</project>

+ 0 - 34
ivy/hadoop-streaming-pom-template.xml

@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
- <!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-streaming</artifactId>
-  <packaging>jar</packaging>
-  <version>@version</version>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-      <version>@version</version>
-    </dependency>
-  </dependencies>
-</project>

+ 0 - 53
ivy/hadoop-test-pom-template.xml

@@ -1,53 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-test</artifactId>
-  <packaging>jar</packaging>
-  <version>@version</version>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-      <version>@version</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftplet-api</artifactId>
-      <version>1.0.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.mina</groupId>
-      <artifactId>mina-core</artifactId>
-      <version>2.0.0-M5</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftpserver-core</artifactId>
-      <version>1.0.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftpserver-deprecated</artifactId>
-      <version>1.0.0-M2</version>
-    </dependency>
-  </dependencies>
-</project>

+ 0 - 34
ivy/hadoop-tools-pom-template.xml

@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
- <!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-tools</artifactId>
-  <packaging>jar</packaging>
-  <version>@version</version>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-      <version>@version</version>
-    </dependency>
-  </dependencies>
-</project>

+ 0 - 4
ivy/libraries.properties

@@ -18,7 +18,6 @@ hadoop.version=0.20.0
 
 #These are the versions of our dependencies (in alphabetical order)
 apacheant.version=1.7.0
-ant-task.version=2.0.10
 
 checkstyle.version=4.2
 
@@ -58,8 +57,6 @@ kfs.version=0.1
 log4j.version=1.2.15
 lucene-core.version=2.3.1
 
-mockito-all.version=1.8.2
-
 oro.version=2.0.8
 
 rats-lib.version=0.5.1
@@ -70,6 +67,5 @@ servlet-api.version=2.5
 slf4j-api.version=1.4.3
 slf4j-log4j12.version=1.4.3
 
-wagon-http.version=1.0-beta-2
 xmlenc.version=0.52
 xerces.version=1.4.4

Різницю між файлами не показано, бо вона завелика
+ 0 - 11
lib/jdiff/hadoop_0.20.1.xml


Різницю між файлами не показано, бо вона завелика
+ 0 - 11
lib/jdiff/hadoop_0.20.2.xml


+ 1 - 1
src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java

@@ -56,7 +56,7 @@ public abstract class DfsBaseConditional extends org.apache.hadoop.ant.DfsTask
 
   protected int postCmd(int exit_code) {
     exit_code = super.postCmd(exit_code);
-    result = exit_code == 0;
+    result = exit_code == 1;
     return exit_code;
   }
 

+ 2 - 8
src/c++/libhdfs/hdfs.c

@@ -492,7 +492,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
         if (!blockSize) {
             if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration, 
                              HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
-                             jStrBlockSize, (jlong)67108864)) {
+                             jStrBlockSize, 67108864)) {
                 errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
                                            "FileSystem::%s(%s)", method,
                                            signature);
@@ -2071,18 +2071,12 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
 
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
 {
-    //Free the mName, mOwner, and mGroup
+    //Free the mName
     int i;
     for (i=0; i < numEntries; ++i) {
         if (hdfsFileInfo[i].mName) {
             free(hdfsFileInfo[i].mName);
         }
-        if (hdfsFileInfo[i].mOwner) {
-            free(hdfsFileInfo[i].mOwner);
-        }
-        if (hdfsFileInfo[i].mGroup) {
-            free(hdfsFileInfo[i].mGroup);
-        }
     }
 
     //Free entire block

+ 1 - 1
src/c++/libhdfs/hdfsJniHelper.c

@@ -222,7 +222,7 @@ int invokeMethod(JNIEnv *env, RetVal *retval, Exc *exc, MethType methType,
 }
 
 jarray constructNewArrayString(JNIEnv *env, Exc *exc, const char **elements, int size) {
-  const char *className = "java/lang/String";
+  const char *className = "Ljava/lang/String;";
   jobjectArray result;
   int i;
   jclass arrCls = (*env)->FindClass(env, className);

+ 2 - 0
src/c++/libhdfs/hdfsJniHelper.h

@@ -30,6 +30,8 @@
 
 #define PATH_SEPARATOR ':'
 
+#define USER_CLASSPATH "/home/y/libexec/hadoop/conf:/home/y/libexec/hadoop/lib/hadoop-0.1.0.jar"
+
 
 /** Denote the method we want to invoke as STATIC or INSTANCE */
 typedef enum {

+ 0 - 2
src/c++/pipes/api/hadoop/Pipes.hh

@@ -31,8 +31,6 @@
 #include <string>
 #endif
 
-#include <stdint.h>
-
 namespace HadoopPipes {
 
 /**

+ 0 - 1
src/c++/pipes/impl/HadoopPipes.cc

@@ -28,7 +28,6 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <string.h>
 #include <strings.h>
 #include <sys/socket.h>
 #include <pthread.h>

+ 0 - 1
src/c++/utils/api/hadoop/SerialUtils.hh

@@ -19,7 +19,6 @@
 #define HADOOP_SERIAL_UTILS_HH
 
 #include <string>
-#include <stdint.h>
 
 namespace HadoopUtils {
 

+ 0 - 1
src/c++/utils/impl/SerialUtils.cc

@@ -22,7 +22,6 @@
 #include <rpc/types.h>
 #include <rpc/xdr.h>
 #include <string>
-#include <string.h>
 
 using std::string;
 

+ 0 - 2
src/c++/utils/impl/StringUtils.cc

@@ -21,8 +21,6 @@
 #include <errno.h>
 #include <stdint.h>
 #include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
 #include <strings.h>
 #include <sys/time.h>
 

+ 3 - 4
src/contrib/build-contrib.xml

@@ -86,7 +86,6 @@
       <include name="**/*.jar" />
     </fileset>
     <path refid="${ant.project.name}.common-classpath"/>
-    <pathelement path="${clover.jar}"/>
   </path>
 
   <!-- the unit test classpath -->
@@ -174,7 +173,7 @@
   <target name="jar" depends="compile" unless="skip.contrib">
     <echo message="contrib: ${name}"/>
     <jar
-      jarfile="${build.dir}/hadoop-${name}-${version}.jar"
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
       basedir="${build.classes}"      
     />
   </target>
@@ -186,7 +185,7 @@
   <target name="jar-examples" depends="compile-examples"
           if="examples.available" unless="skip.contrib">
     <echo message="contrib: ${name}"/>
-    <jar jarfile="${build.dir}/hadoop-${name}-examples-${version}.jar">
+    <jar jarfile="${build.dir}/hadoop-${version}-${name}-examples.jar">
       <fileset dir="${build.classes}">
       </fileset>
       <fileset dir="${build.examples}">
@@ -201,7 +200,7 @@
     <mkdir dir="${dist.dir}/contrib/${name}"/>
     <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true">
       <fileset dir="${build.dir}">
-        <include name="hadoop-${name}-${version}.jar" />
+        <include name="hadoop-${version}-${name}.jar" />
       </fileset>
     </copy>
   </target>

+ 3 - 2
src/contrib/data_join/build.xml

@@ -28,8 +28,9 @@ to call at top-level: ant deploy-contrib compile-core-test
   <!-- Override jar target to specify main class -->
   <target name="jar" depends="compile">
     <jar
-      jarfile="${build.dir}/hadoop-${name}-${version}.jar"
-      basedir="${build.classes}">      
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
+      basedir="${build.classes}"      
+    >
   	<manifest>
 	    <attribute name="Main-Class" value="org.apache.hadoop.contrib.utils.join.DataJoinJob"/>
 	</manifest>

+ 2 - 2
src/contrib/eclipse-plugin/build.xml

@@ -66,10 +66,10 @@
   <!-- Override jar target to specify manifest -->
   <target name="jar" depends="compile" unless="skip.contrib">
     <mkdir dir="${build.dir}/lib"/>
-    <copy file="${hadoop.root}/build/hadoop-core-${version}.jar" tofile="${build.dir}/lib/hadoop-core.jar" verbose="true"/>
+    <copy file="${hadoop.root}/build/hadoop-${version}-core.jar" tofile="${build.dir}/lib/hadoop-core.jar" verbose="true"/>
     <copy file="${hadoop.root}/build/ivy/lib/Hadoop/common/commons-cli-${commons-cli.version}.jar"  todir="${build.dir}/lib" verbose="true"/>
     <jar
-      jarfile="${build.dir}/hadoop-${name}-${version}.jar"
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
       manifest="${root}/META-INF/MANIFEST.MF">
       <fileset dir="${build.dir}" includes="classes/ lib/"/>
       <fileset dir="${root}" includes="resources/ plugin.xml"/>

+ 2 - 2
src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java

@@ -32,7 +32,7 @@ import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
 import org.eclipse.jdt.core.IJavaProject;
 import org.eclipse.jdt.core.IType;
 import org.eclipse.jdt.core.JavaCore;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
+import org.eclipse.jdt.internal.debug.ui.launcher.JavaApplicationLaunchShortcut;
 import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
 import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
 import org.eclipse.jdt.launching.JavaRuntime;
@@ -64,7 +64,7 @@ public class HadoopApplicationLaunchShortcut extends
     // Find an existing or create a launch configuration (Standard way)
     ILaunchConfiguration iConf =
         super.findLaunchConfiguration(type, configType);
-    if (iConf == null) iConf = super.createConfiguration(type);
+
     ILaunchConfigurationWorkingCopy iConfWC;
     try {
       /*

+ 2 - 2
src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java

@@ -159,9 +159,9 @@ public class RunOnHadoopWizard extends Wizard {
 
     // Write it to the disk file
     try {
-      // File confFile = File.createTempFile("core-site-", ".xml",
+      // File confFile = File.createTempFile("hadoop-site-", ".xml",
       // confDir);
-      File confFile = new File(confDir, "core-site.xml");
+      File confFile = new File(confDir, "hadoop-site.xml");
       FileOutputStream fos = new FileOutputStream(confFile);
       conf.writeXml(fos);
       fos.close();

+ 2 - 2
src/contrib/failmon/build.xml

@@ -21,7 +21,7 @@
 
   <import file="../build-contrib.xml"/>
 
-  <property name="jarfile" value="${build.dir}/hadoop-${name}-${version}.jar"/>
+  <property name="jarfile" value="${build.dir}/${name}.jar"/>
 
   <target name="jar" depends="compile" unless="skip.contrib">
     <!-- Make sure that the hadoop jar has been created -->
@@ -113,7 +113,7 @@
     <delete file="${name}.jar"/>
 
     <move file="${name}.tar.gz" todir="${build.dir}"/>
-    <echo message= "${hadoop.root}/build/contrib/failmon/hadoop-${name}-${version}.jar"/>
+    <echo message= "${hadoop.root}/build/contrib/failmon/${name}.jar"/>
     
   </target>
   

+ 45 - 59
src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java

@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
-import java.io.ByteArrayOutputStream;
 import java.io.IOException;
-import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
@@ -124,12 +122,7 @@ public class FairSchedulerServlet extends HttpServlet {
     }
     // Print out the normal response
     response.setContentType("text/html");
-
-    // Because the client may read arbitrarily slow, and we hold locks while
-    // the servlet output, we want to write to our own buffer which we know
-    // won't block.
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    PrintWriter out = new PrintWriter(baos);
+    PrintWriter out = new PrintWriter(response.getOutputStream());
     String hostname = StringUtils.simpleHostname(
         jobTracker.getJobTrackerMachine());
     out.print("<html><head>");
@@ -144,11 +137,6 @@ public class FairSchedulerServlet extends HttpServlet {
     showAdminForm(out, advancedView);
     out.print("</body></html>\n");
     out.close();
-
-    // Flush our buffer to the real servlet output
-    OutputStream servletOut = response.getOutputStream();
-    baos.writeTo(servletOut);
-    servletOut.close();
   }
 
   /**
@@ -218,53 +206,51 @@ public class FairSchedulerServlet extends HttpServlet {
     out.print("<th>Finished</th><th>Running</th><th>Fair Share</th>" +
         (advancedView ? "<th>Weight</th><th>Deficit</th><th>minReduces</th>" : ""));
     out.print("</tr>\n");
-    synchronized (jobTracker) {
-      Collection<JobInProgress> runningJobs = jobTracker.getRunningJobs();
-      synchronized (scheduler) {
-        for (JobInProgress job: runningJobs) {
-          JobProfile profile = job.getProfile();
-          JobInfo info = scheduler.infos.get(job);
-          if (info == null) { // Job finished, but let's show 0's for info
-            info = new JobInfo();
-          }
-          out.print("<tr>\n");
-          out.printf("<td>%s</td>\n", DATE_FORMAT.format(
-                       new Date(job.getStartTime())));
-          out.printf("<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>",
-                     profile.getJobID(), profile.getJobID());
-          out.printf("<td>%s</td>\n", profile.getUser());
-          out.printf("<td>%s</td>\n", profile.getJobName());
-          out.printf("<td>%s</td>\n", generateSelect(
-                       scheduler.getPoolManager().getPoolNames(),
-                       scheduler.getPoolManager().getPoolName(job),
-                       "/scheduler?setPool=<CHOICE>&jobid=" + profile.getJobID() +
-                       (advancedView ? "&advanced" : "")));
-          out.printf("<td>%s</td>\n", generateSelect(
-                       Arrays.asList(new String[]
-                         {"VERY_LOW", "LOW", "NORMAL", "HIGH", "VERY_HIGH"}),
-                       job.getPriority().toString(),
-                       "/scheduler?setPriority=<CHOICE>&jobid=" + profile.getJobID() +
-                       (advancedView ? "&advanced" : "")));
-          out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
-                     job.finishedMaps(), job.desiredMaps(), info.runningMaps,
-                     info.mapFairShare);
-          if (advancedView) {
-            out.printf("<td>%8.1f</td>\n", info.mapWeight);
-            out.printf("<td>%s</td>\n", info.neededMaps > 0 ?
-                       (info.mapDeficit / 1000) + "s" : "--");
-            out.printf("<td>%d</td>\n", info.minMaps);
-          }
-          out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
-                     job.finishedReduces(), job.desiredReduces(), info.runningReduces,
-                     info.reduceFairShare);
-          if (advancedView) {
-            out.printf("<td>%8.1f</td>\n", info.reduceWeight);
-            out.printf("<td>%s</td>\n", info.neededReduces > 0 ?
-                       (info.reduceDeficit / 1000) + "s" : "--");
-            out.printf("<td>%d</td>\n", info.minReduces);
-          }
-          out.print("</tr>\n");
+    Collection<JobInProgress> runningJobs = jobTracker.getRunningJobs();
+    synchronized (scheduler) {
+      for (JobInProgress job: runningJobs) {
+        JobProfile profile = job.getProfile();
+        JobInfo info = scheduler.infos.get(job);
+        if (info == null) { // Job finished, but let's show 0's for info
+          info = new JobInfo();
+        }
+        out.print("<tr>\n");
+        out.printf("<td>%s</td>\n", DATE_FORMAT.format(
+            new Date(job.getStartTime())));
+        out.printf("<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>",
+            profile.getJobID(), profile.getJobID());
+        out.printf("<td>%s</td>\n", profile.getUser());
+        out.printf("<td>%s</td>\n", profile.getJobName());
+        out.printf("<td>%s</td>\n", generateSelect(
+            scheduler.getPoolManager().getPoolNames(),
+            scheduler.getPoolManager().getPoolName(job),
+            "/scheduler?setPool=<CHOICE>&jobid=" + profile.getJobID() +
+            (advancedView ? "&advanced" : "")));
+        out.printf("<td>%s</td>\n", generateSelect(
+            Arrays.asList(new String[]
+                {"VERY_LOW", "LOW", "NORMAL", "HIGH", "VERY_HIGH"}),
+            job.getPriority().toString(),
+            "/scheduler?setPriority=<CHOICE>&jobid=" + profile.getJobID() +
+            (advancedView ? "&advanced" : "")));
+        out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
+            job.finishedMaps(), job.desiredMaps(), info.runningMaps,
+            info.mapFairShare);
+        if (advancedView) {
+          out.printf("<td>%8.1f</td>\n", info.mapWeight);
+          out.printf("<td>%s</td>\n", info.neededMaps > 0 ?
+              (info.mapDeficit / 1000) + "s" : "--");
+          out.printf("<td>%d</td>\n", info.minMaps);
+        }
+        out.printf("<td>%d / %d</td><td>%d</td><td>%8.1f</td>\n",
+            job.finishedReduces(), job.desiredReduces(), info.runningReduces,
+            info.reduceFairShare);
+        if (advancedView) {
+          out.printf("<td>%8.1f</td>\n", info.reduceWeight);
+          out.printf("<td>%s</td>\n", info.neededReduces > 0 ?
+              (info.reduceDeficit / 1000) + "s" : "--");
+          out.printf("<td>%d</td>\n", info.minReduces);
         }
+        out.print("</tr>\n");
       }
     }
     out.print("</table>\n");

+ 0 - 4
src/contrib/hdfsproxy/ivy.xml

@@ -30,10 +30,6 @@
       name="commons-logging"
       rev="${commons-logging.version}"
       conf="common->default"/>
-    <dependency org="commons-cli"
-      name="commons-cli"
-      rev="${commons-cli.version}"
-      conf="common->default"/>
     <dependency org="log4j"
       name="log4j"
       rev="${log4j.version}"

+ 1 - 1
src/contrib/index/build.xml

@@ -25,7 +25,7 @@
   <target name="jar" depends="compile" unless="skip.contrib">
     <echo message="contrib: ${name}"/>
     <jar
-      jarfile="${build.dir}/hadoop-${name}-${version}.jar"
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
       basedir="${build.classes}"
     >
       <manifest>

+ 4 - 2
src/contrib/streaming/build.xml

@@ -27,8 +27,10 @@ to call at top-level: ant deploy-contrib compile-core-test
 
   <!-- Override jar target to specify main class -->
   <target name="jar" depends="compile">
-      <jar jarfile="${build.dir}/hadoop-${name}-${version}.jar"
-      basedir="${build.classes}">
+    <jar
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
+      basedir="${build.classes}"      
+    >
   	<manifest>
 	    <attribute name="Main-Class" value="org.apache.hadoop.streaming.HadoopStreaming"/>
 	</manifest>

+ 1 - 7
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java

@@ -385,11 +385,7 @@ public abstract class PipeMapRed {
           if (now-lastStdoutReport > reporterOutDelay_) {
             lastStdoutReport = now;
             String hline = "Records R/W=" + numRecRead_ + "/" + numRecWritten_;
-            if (!processProvidedStatus_) {
-              reporter.setStatus(hline);
-            } else {
-              reporter.progress();
-            }
+            reporter.setStatus(hline);
             logprintln(hline);
             logflush();
           }
@@ -450,7 +446,6 @@ public abstract class PipeMapRed {
             if (matchesCounter(lineStr)) {
               incrCounter(lineStr);
             } else if (matchesStatus(lineStr)) {
-              processProvidedStatus_ = true;
               setStatus(lineStr);
             } else {
               LOG.warn("Cannot parse reporter line: " + lineStr);
@@ -676,5 +671,4 @@ public abstract class PipeMapRed {
   String LOGNAME;
   PrintStream log_;
 
-  volatile boolean processProvidedStatus_ = false;
 }

+ 3 - 5
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java

@@ -118,8 +118,6 @@ public class StreamJob implements Tool {
       return submitAndMonitorJob();
     }catch (IllegalArgumentException ex) {
       //ignore, since log will already be printed
-      // print the log in debug mode.
-      LOG.debug("Error in streaming job", ex);
       return 1;
     }
   }
@@ -344,13 +342,13 @@ public class StreamJob implements Tool {
     return OptionBuilder.withDescription(desc).create(name);
   }
   
-  private void validate(final List<String> values) 
+  private static void validate(final List<String> values) 
   throws IllegalArgumentException {
     for (String file : values) {
       File f = new File(file);  
       if (!f.canRead()) {
-        fail("File: " + f.getAbsolutePath() 
-          + " does not exist, or is not readable."); 
+        throw new IllegalArgumentException("File : " + f.getAbsolutePath() 
+                                           + " is not readable."); 
       }
     }
   }

+ 2 - 11
src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java

@@ -32,16 +32,8 @@ public class StderrApp
    * postWriteLines to stderr.
    */
   public static void go(int preWriteLines, int sleep, int postWriteLines) throws IOException {
-    go(preWriteLines, sleep, postWriteLines, false);
-  }
-  
-  public static void go(int preWriteLines, int sleep, int postWriteLines, boolean status) throws IOException {
     BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
     String line;
-    
-    if (status) {
-      System.err.println("reporter:status:starting echo");
-    }      
        
     while (preWriteLines > 0) {
       --preWriteLines;
@@ -65,14 +57,13 @@ public class StderrApp
 
   public static void main(String[] args) throws IOException {
     if (args.length < 3) {
-      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE [STATUS]");
+      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE");
       return;
     }
     int preWriteLines = Integer.parseInt(args[0]);
     int sleep = Integer.parseInt(args[1]);
     int postWriteLines = Integer.parseInt(args[2]);
-    boolean status = args.length > 3 ? Boolean.parseBoolean(args[3]) : false;
     
-    go(preWriteLines, sleep, postWriteLines, status);
+    go(preWriteLines, sleep, postWriteLines);
   }
 }

+ 1 - 2
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java

@@ -30,12 +30,11 @@ public class TestGzipInput extends TestStreaming
 {
 
   public TestGzipInput() throws IOException {
-    INPUT_FILE = new File(TEST_DIR, "input.txt.gz");
+    INPUT_FILE = new File("input.txt.gz");
   }
   
   protected void createInput() throws IOException
   {
-    assertTrue("Creating " + TEST_DIR, TEST_DIR.mkdirs());
     GZIPOutputStream out = new GZIPOutputStream(
                                                 new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
     out.write(input.getBytes("UTF-8"));

+ 51 - 33
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java

@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.io.DataOutputStream;
 import java.io.InputStreamReader;
 import java.io.BufferedReader;
-import java.util.Arrays;
 import java.util.zip.ZipEntry;
 import java.util.jar.JarOutputStream;
 import java.util.zip.ZipOutputStream;
@@ -44,11 +43,10 @@ public class TestMultipleArchiveFiles extends TestStreaming
 {
 
   private StreamJob job;
-  private String INPUT_DIR = "multiple-archive-files/";
-  private String INPUT_FILE = INPUT_DIR + "input.txt";
-  private String CACHE_ARCHIVE_1 = INPUT_DIR + "cacheArchive1.zip";
+  private String INPUT_FILE = "input.txt";
+  private String CACHE_ARCHIVE_1 = "cacheArchive1.zip";
   private File CACHE_FILE_1 = null;
-  private String CACHE_ARCHIVE_2 = INPUT_DIR + "cacheArchive2.zip";
+  private String CACHE_ARCHIVE_2 = "cacheArchive2.zip";
   private File CACHE_FILE_2 = null;
   private String expectedOutput = null;
   private String OUTPUT_DIR = "out";
@@ -60,23 +58,27 @@ public class TestMultipleArchiveFiles extends TestStreaming
   private String strNamenode = null;
   private String namenode = null;
 
-  public TestMultipleArchiveFiles() throws Exception {
+  public TestMultipleArchiveFiles() throws IOException {
     CACHE_FILE_1 = new File("cacheArchive1");
     CACHE_FILE_2 = new File("cacheArchive2");
     input = "HADOOP";
     expectedOutput = "HADOOP\t\nHADOOP\t\n";
-    conf = new Configuration();      
-    dfs = new MiniDFSCluster(conf, 1, true, null);      
-    fileSys = dfs.getFileSystem();
-    namenode = fileSys.getUri().getAuthority();
-    mr  = new MiniMRCluster(1, namenode, 3);
-    strJobTracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
-    strNamenode = "fs.default.name=" + namenode;
+    try {
+      conf = new Configuration();      
+      dfs = new MiniDFSCluster(conf, 1, true, null);      
+      fileSys = dfs.getFileSystem();
+      namenode = fileSys.getUri().getAuthority();
+      mr  = new MiniMRCluster(1, namenode, 3);
+      strJobTracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
+      strNamenode = "fs.default.name=" + namenode;
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
   }
   
   protected void createInput() throws IOException
   {
-    fileSys.delete(new Path(INPUT_DIR), true);
+
     DataOutputStream dos = fileSys.create(new Path(INPUT_FILE));
     String inputFileString = "symlink1/cacheArchive1\nsymlink2/cacheArchive2";
     dos.write(inputFileString.getBytes("UTF-8"));
@@ -100,9 +102,14 @@ public class TestMultipleArchiveFiles extends TestStreaming
   }
 
   protected String[] genArgs() {
-    String workDir = fileSys.getWorkingDirectory().toString() + "/";
-    String cache1 = workDir + CACHE_ARCHIVE_1 + "#symlink1";
-    String cache2 = workDir + CACHE_ARCHIVE_2 + "#symlink2";
+    String cacheArchiveString1 = null;
+    String cacheArchiveString2 = null;
+    try {
+      cacheArchiveString1 = fileSys.getUri().toString()+fileSys.getWorkingDirectory().toString()+"/"+CACHE_ARCHIVE_1+"#symlink1";
+      cacheArchiveString2 = fileSys.getUri().toString()+fileSys.getWorkingDirectory().toString()+"/"+CACHE_ARCHIVE_2+"#symlink2";
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
 
     return new String[] {
       "-input", INPUT_FILE.toString(),
@@ -110,28 +117,39 @@ public class TestMultipleArchiveFiles extends TestStreaming
       "-mapper", "xargs cat", 
       "-reducer", "cat",
       "-jobconf", "mapred.reduce.tasks=1",
-      "-cacheArchive", cache1, 
-      "-cacheArchive", cache2,
+      "-cacheArchive", cacheArchiveString1, 
+      "-cacheArchive", cacheArchiveString2,
       "-jobconf", strNamenode,
       "-jobconf", strJobTracker,
       "-jobconf", "stream.tmpdir=" + System.getProperty("test.build.data","/tmp")
     };
   }
 
-  public void testCommandLine() throws IOException {
-   createInput();
-   job = new StreamJob(genArgs(), true);
-   if(job.go() != 0) {
-     throw new IOException("Job Failed");
-   }
-   StringBuffer output = new StringBuffer(256);
-   Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
-                                         new Path(OUTPUT_DIR)));
-   for (int i = 0; i < fileList.length; i++){
-     if (fileList[i].getName().equals("_logs")) continue;
-     output.append(StreamUtil.slurpHadoop(fileList[i], fileSys));
-   }
-   assertEquals(expectedOutput, output.toString());
+  public void testCommandLine() {
+    try {
+      createInput();
+      job = new StreamJob(genArgs(), true);
+      if(job.go() != 0) {
+        throw new Exception("Job Failed");
+      }
+      StringBuffer output = new StringBuffer(256);
+      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
+                                            new Path(OUTPUT_DIR)));
+      for (int i = 0; i < fileList.length; i++){
+        BufferedReader bread =
+          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
+        output.append(bread.readLine());
+        output.append("\n");
+        output.append(bread.readLine());
+        output.append("\n");
+      }
+      assertEquals(expectedOutput, output.toString());
+    } catch (Exception e) {
+      e.printStackTrace();
+    } finally {
+      CACHE_FILE_1.delete();
+      CACHE_FILE_2.delete();
+    }
   }
 
   public static void main(String[]args) throws Exception

+ 68 - 64
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java

@@ -50,76 +50,80 @@ public class TestMultipleCachefiles extends TestCase
   {
   }
 
-  public void testMultipleCachefiles() throws Exception
+  public void testMultipleCachefiles()
   {
-    boolean mayExit = false;
-    MiniMRCluster mr = null;
-    MiniDFSCluster dfs = null; 
-    try{
-      Configuration conf = new Configuration();
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      FileSystem fileSys = dfs.getFileSystem();
-      String namenode = fileSys.getName();
-      mr  = new MiniMRCluster(1, namenode, 3);
-      // During tests, the default Configuration will use a local mapred
-      // So don't specify -config or -cluster
-      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
-      String strNamenode = "fs.default.name=" + namenode;
-      String argv[] = new String[] {
-        "-input", INPUT_FILE,
-        "-output", OUTPUT_DIR,
-        "-mapper", map,
-        "-reducer", reduce,
-        //"-verbose",
-        //"-jobconf", "stream.debug=set"
-        "-jobconf", strNamenode,
-        "-jobconf", strJobtracker,
-        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
-        "-jobconf", "mapred.child.java.opts=-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
-                    "-Dbuild.test=" + System.getProperty("build.test") + " " +
-                    conf.get("mapred.child.java.opts",""),
-        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#" + mapString,
-        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE_2 + "#" + mapString2
-      };
+    try {
+      boolean mayExit = false;
+      MiniMRCluster mr = null;
+      MiniDFSCluster dfs = null; 
+      try{
+        Configuration conf = new Configuration();
+        dfs = new MiniDFSCluster(conf, 1, true, null);
+        FileSystem fileSys = dfs.getFileSystem();
+        String namenode = fileSys.getName();
+        mr  = new MiniMRCluster(1, namenode, 3);
+        // During tests, the default Configuration will use a local mapred
+        // So don't specify -config or -cluster
+        String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
+        String strNamenode = "fs.default.name=" + namenode;
+        String argv[] = new String[] {
+          "-input", INPUT_FILE,
+          "-output", OUTPUT_DIR,
+          "-mapper", map,
+          "-reducer", reduce,
+          //"-verbose",
+          //"-jobconf", "stream.debug=set"
+          "-jobconf", strNamenode,
+          "-jobconf", strJobtracker,
+          "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
+          "-jobconf", "mapred.child.java.opts=-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
+                      "-Dbuild.test=" + System.getProperty("build.test") + " " +
+                      conf.get("mapred.child.java.opts",""),
+          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#" + mapString,
+          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE_2 + "#" + mapString2
+        };
 
-      fileSys.delete(new Path(OUTPUT_DIR));
-      
-      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
-      file.writeBytes(mapString + "\n");
-      file.writeBytes(mapString2 + "\n");
-      file.close();
-      file = fileSys.create(new Path(CACHE_FILE));
-      file.writeBytes(cacheString);
-      file.close();
-      file = fileSys.create(new Path(CACHE_FILE_2));
-      file.writeBytes(cacheString2);
-      file.close();
+        fileSys.delete(new Path(OUTPUT_DIR));
         
-      job = new StreamJob(argv, mayExit);     
-      job.go();
+        DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
+        file.writeBytes(mapString + "\n");
+        file.writeBytes(mapString2 + "\n");
+        file.close();
+        file = fileSys.create(new Path(CACHE_FILE));
+        file.writeBytes(cacheString);
+        file.close();
+        file = fileSys.create(new Path(CACHE_FILE_2));
+        file.writeBytes(cacheString2);
+        file.close();
+          
+        job = new StreamJob(argv, mayExit);     
+        job.go();
 
-      fileSys = dfs.getFileSystem();
-      String line = null;
-      String line2 = null;
-      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
-                                   new Path(OUTPUT_DIR),
-                                   new OutputLogFilter()));
-      for (int i = 0; i < fileList.length; i++){
-        System.out.println(fileList[i].toString());
-        BufferedReader bread =
-          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
-        line = bread.readLine();
-        System.out.println(line);
-        line2 = bread.readLine();
-        System.out.println(line2);
+	fileSys = dfs.getFileSystem();
+        String line = null;
+        String line2 = null;
+        Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
+                                     new Path(OUTPUT_DIR),
+                                     new OutputLogFilter()));
+        for (int i = 0; i < fileList.length; i++){
+          System.out.println(fileList[i].toString());
+          BufferedReader bread =
+            new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
+          line = bread.readLine();
+          System.out.println(line);
+          line2 = bread.readLine();
+          System.out.println(line2);
+        }
+        assertEquals(cacheString + "\t", line);
+        assertEquals(cacheString2 + "\t", line2);
+      } finally{
+        if (dfs != null) { dfs.shutdown(); }
+        if (mr != null) { mr.shutdown();}
       }
-      assertEquals(cacheString + "\t", line);
-      assertEquals(cacheString2 + "\t", line2);
-    } finally{
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown();}
+      
+    } catch(Exception e) {
+      failTrace(e);
     }
-    
   }
 
   void failTrace(Exception e)

+ 10 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java

@@ -70,7 +70,7 @@ public class TestStreamAggregate extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine()
   {
     try {
       try {
@@ -91,6 +91,8 @@ public class TestStreamAggregate extends TestCase
       System.err.println("outEx1=" + outputExpect);
       System.err.println("  out1=" + output);
       assertEquals(outputExpect, output);
+    } catch(Exception e) {
+      failTrace(e);
     } finally {
       File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
       INPUT_FILE.delete();
@@ -99,6 +101,13 @@ public class TestStreamAggregate extends TestCase
     }
   }
 
+  private void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreaming().testCommandLine();

+ 10 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java

@@ -78,7 +78,7 @@ public class TestStreamDataProtocol extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine()
   {
     try {
       try {
@@ -100,6 +100,8 @@ public class TestStreamDataProtocol extends TestCase
       System.err.println("  out1=" + output);
       System.err.println("  equals=" + outputExpect.compareTo(output));
       assertEquals(outputExpect, output);
+    } catch(Exception e) {
+      failTrace(e);
     } finally {
       File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
       INPUT_FILE.delete();
@@ -108,6 +110,13 @@ public class TestStreamDataProtocol extends TestCase
     }
   }
 
+  private void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreamDataProtocol().testCommandLine();

+ 10 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java

@@ -69,7 +69,7 @@ public class TestStreamReduceNone extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine()
   {
     String outFileName = "part-00000";
     File outFile = null;
@@ -91,6 +91,8 @@ public class TestStreamReduceNone extends TestCase
       System.err.println("outEx1=" + outputExpect);
       System.err.println("  out1=" + output);
       assertEquals(outputExpect, output);
+    } catch(Exception e) {
+      failTrace(e);
     } finally {
       outFile.delete();
       File outFileCRC = new File(OUTPUT_DIR, "."+outFileName+".crc").getAbsoluteFile();
@@ -100,6 +102,13 @@ public class TestStreamReduceNone extends TestCase
     }
   }
 
+  private void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreamReduceNone().testCommandLine();

+ 3 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java

@@ -58,7 +58,7 @@ public class TestStreamXmlRecordReader extends TestStreaming
     };
   }
 
-  public void testCommandLine() throws IOException {
+  public void testCommandLine() {
     try {
       try {
         OUTPUT_DIR.getAbsoluteFile().delete();
@@ -71,6 +71,8 @@ public class TestStreamXmlRecordReader extends TestStreaming
       String output = StreamUtil.slurp(outFile);
       outFile.delete();
       assertEquals(input, output);
+    } catch (Exception e) {
+      e.printStackTrace();
     } finally {
       INPUT_FILE.delete();
       File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();

+ 21 - 11
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -104,16 +104,24 @@ public class TestStreamedMerge extends TestCase {
     return c;
   }
 
-  void lsr() throws Exception  {
-    System.out.println("lsr /");
-    ToolRunner.run(conf_, new FsShell(), new String[]{ "-lsr", "/" });
+  void lsr() {
+    try {
+      System.out.println("lsr /");
+      ToolRunner.run(conf_, new FsShell(), new String[]{ "-lsr", "/" });
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
   }
 
-  void printSampleInput() throws IOException {
-    System.out.println("cat /input/part-00");
-    String content = StreamUtil.slurpHadoop(new Path("/input/part-00"), fs_);
-    System.out.println(content);
-    System.out.println("cat done.");
+  void printSampleInput() {
+    try {
+      System.out.println("cat /input/part-00");
+      String content = StreamUtil.slurpHadoop(new Path("/input/part-00"), fs_);
+      System.out.println(content);
+      System.out.println("cat done.");
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
   }
 
   void callStreaming(String argSideOutput, boolean inputTagged) throws IOException {
@@ -202,7 +210,7 @@ public class TestStreamedMerge extends TestCase {
     StringBuffer buf_;
   }
 
-  public void testMain() throws Exception {
+  public void testMain() throws IOException {
     boolean success = false;
     String base = new File(".").getAbsolutePath();
     System.setProperty("hadoop.log.dir", base + "/logs");
@@ -220,6 +228,8 @@ public class TestStreamedMerge extends TestCase {
       }
       doAllTestJobs();
       success = true;
+    } catch (IOException io) {
+      io.printStackTrace();
     } finally {
       try {
         fs_.close();
@@ -233,14 +243,14 @@ public class TestStreamedMerge extends TestCase {
     }
   }
 
-  void doAllTestJobs() throws Exception 
+  void doAllTestJobs() throws IOException
   {
     goSocketTagged(true, false);
     goSocketTagged(false, false);
     goSocketTagged(true, true);
   }
   
-  void goSocketTagged(boolean socket, boolean inputTagged) throws Exception {
+  void goSocketTagged(boolean socket, boolean inputTagged) throws IOException {
     System.out.println("***** goSocketTagged: " + socket + ", " + inputTagged);
     String expect = createInputs(inputTagged);
     lsr();

+ 27 - 24
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java

@@ -29,9 +29,8 @@ public class TestStreaming extends TestCase
 
   // "map" command: grep -E (red|green|blue)
   // reduce command: uniq
-  protected File TEST_DIR;
-  protected File INPUT_FILE;
-  protected File OUTPUT_DIR;
+  protected File INPUT_FILE = new File("input.txt");
+  protected File OUTPUT_DIR = new File("out");
   protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
   // map behaves like "/usr/bin/tr . \\n"; (split words into lines)
   protected String map = StreamUtil.makeJavaCommand(TrApp.class, new String[]{".", "\\n"});
@@ -47,16 +46,10 @@ public class TestStreaming extends TestCase
     UtilTest utilTest = new UtilTest(getClass().getName());
     utilTest.checkUserDir();
     utilTest.redirectIfAntJunit();
-    TEST_DIR = new File(getClass().getName()).getAbsoluteFile();
-    OUTPUT_DIR = new File(TEST_DIR, "out");
-    INPUT_FILE = new File(TEST_DIR, "input.txt");
   }
 
   protected void createInput() throws IOException
   {
-    if (!TEST_DIR.exists()) {
-      assertTrue("Creating " + TEST_DIR, TEST_DIR.mkdirs());
-    }
     DataOutputStream out = new DataOutputStream(
                                                 new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
     out.write(input.getBytes("UTF-8"));
@@ -76,23 +69,33 @@ public class TestStreaming extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine() throws IOException
   {
-    UtilTest.recursiveDelete(TEST_DIR);
+    try {
+      try {
+        OUTPUT_DIR.getAbsoluteFile().delete();
+      } catch (Exception e) {
+      }
+
+      createInput();
+      boolean mayExit = false;
 
-    createInput();
-    boolean mayExit = false;
- 
-    // During tests, the default Configuration will use a local mapred
-    // So don't specify -config or -cluster
-    job = new StreamJob(genArgs(), mayExit);      
-    job.go();
-    File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
-    String output = StreamUtil.slurp(outFile);
-    outFile.delete();
-    System.err.println("outEx1=" + outputExpect);
-    System.err.println("  out1=" + output);
-    assertEquals(outputExpect, output);
+      // During tests, the default Configuration will use a local mapred
+      // So don't specify -config or -cluster
+      job = new StreamJob(genArgs(), mayExit);      
+      job.go();
+      File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
+      String output = StreamUtil.slurp(outFile);
+      outFile.delete();
+      System.err.println("outEx1=" + outputExpect);
+      System.err.println("  out1=" + output);
+      assertEquals(outputExpect, output);
+    } finally {
+      File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
+      INPUT_FILE.delete();
+      outFileCRC.delete();
+      OUTPUT_DIR.getAbsoluteFile().delete();
+    }
   }
 
   public static void main(String[]args) throws Exception

+ 5 - 3
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.streaming;
 
 import java.io.File;
 import java.io.IOException;
-import org.apache.hadoop.fs.FileUtil;
+
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.Counters.Counter;
 import org.apache.hadoop.mapred.Counters.Group;
@@ -38,7 +38,7 @@ public class TestStreamingCounters extends TestStreaming {
   {
     try {
       try {
-        FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
+        OUTPUT_DIR.getAbsoluteFile().delete();
       } catch (Exception e) {
       }
 
@@ -62,8 +62,10 @@ public class TestStreamingCounters extends TestStreaming {
       assertNotNull("Counter", counter);
       assertEquals(3, counter.getCounter());
     } finally {
+      File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
       INPUT_FILE.delete();
-      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
+      outFileCRC.delete();
+      OUTPUT_DIR.getAbsoluteFile().delete();
     }
   }
   

+ 28 - 38
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java

@@ -24,7 +24,6 @@ import java.util.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.StringUtils;
 
 /**
  * This class tests if hadoopStreaming fails a job when the mapper or
@@ -33,11 +32,8 @@ import org.apache.hadoop.util.StringUtils;
  */
 public class TestStreamingExitStatus extends TestCase
 {
-  protected File TEST_DIR =
-    new File("TestStreamingExitStatus").getAbsoluteFile();
-
-  protected File INPUT_FILE = new File(TEST_DIR, "input.txt");
-  protected File OUTPUT_DIR = new File(TEST_DIR, "out");  
+  protected File INPUT_FILE = new File("input.txt");
+  protected File OUTPUT_DIR = new File("out");  
 
   protected String failingTask = StreamUtil.makeJavaCommand(FailApp.class, new String[]{"true"});
   protected String echoTask = StreamUtil.makeJavaCommand(FailApp.class, new String[]{"false"});
@@ -45,6 +41,7 @@ public class TestStreamingExitStatus extends TestCase
   public TestStreamingExitStatus() throws IOException {
     UtilTest utilTest = new UtilTest(getClass().getName());
     utilTest.checkUserDir();
+    utilTest.redirectIfAntJunit();
   }
 
   protected String[] genArgs(boolean exitStatusIsFailure, boolean failMap) {
@@ -60,58 +57,51 @@ public class TestStreamingExitStatus extends TestCase
   }
 
   public void setUp() throws IOException {
-    UtilTest.recursiveDelete(TEST_DIR);
-    assertTrue(TEST_DIR.mkdirs());
-
+    UtilTest.recursiveDelete(INPUT_FILE);
+    UtilTest.recursiveDelete(OUTPUT_DIR);
+    
     FileOutputStream out = new FileOutputStream(INPUT_FILE.getAbsoluteFile());
     out.write("hello\n".getBytes());
     out.close();
   }
 
-  private static String join(CharSequence separator, Iterable<String> strings) {
-    StringBuilder sb = new StringBuilder();
-    boolean first = true;
-    for (String s : strings) {
-      if (first) {
-        first = false;
+  public void runStreamJob(boolean exitStatusIsFailure, boolean failMap) {
+    try {
+      boolean mayExit = false;
+      int returnStatus = 0;
+
+      StreamJob job = new StreamJob(genArgs(exitStatusIsFailure, failMap), mayExit);
+      returnStatus = job.go();
+      
+      if (exitStatusIsFailure) {
+        assertEquals("Streaming Job failure code expected", /*job not successful:*/1, returnStatus);
       } else {
-        sb.append(separator);
+        assertEquals("Streaming Job expected to succeed", 0, returnStatus);
       }
-      sb.append(s);
-    }
-    return sb.toString();
-  }
-
-  public void runStreamJob(boolean exitStatusIsFailure, boolean failMap) throws Exception {
-    boolean mayExit = false;
-    int returnStatus = 0;
-    String args[] = genArgs(exitStatusIsFailure, failMap);
-    System.err.println("Testing streaming command line:\n" +
-               join(" ", Arrays.asList(args)));
-    StreamJob job = new StreamJob(genArgs(exitStatusIsFailure, failMap), mayExit);
-    returnStatus = job.go();
-    
-    if (exitStatusIsFailure) {
-      assertEquals("Streaming Job failure code expected", /*job not successful:*/1, returnStatus);
-    } else {
-      assertEquals("Streaming Job expected to succeed", 0, returnStatus);
+    } catch (Exception e) {
+      failTrace(e);
     }
   }
   
-  public void testMapFailOk() throws Exception {
+  public void testMapFailOk() {
     runStreamJob(false, true);
   }
   
-  public void testMapFailNotOk() throws Exception {
+  public void testMapFailNotOk() {
     runStreamJob(true, true);
   }
   
-  public void testReduceFailOk() throws Exception {
+  public void testReduceFailOk() {
     runStreamJob(false, false);
   }
   
-  public void testReduceFailNotOk() throws Exception {
+  public void testReduceFailNotOk() {
     runStreamJob(true, false);
   }  
   
+  protected void failTrace(Exception e) {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
 }

+ 10 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java

@@ -72,7 +72,7 @@ public class TestStreamingKeyValue extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine()
   {
     String outFileName = "part-00000";
     File outFile = null;
@@ -94,6 +94,8 @@ public class TestStreamingKeyValue extends TestCase
       System.err.println("outEx1=" + outputExpect);
       System.err.println("  out1=" + output);
       assertEquals(outputExpect, output);
+    } catch(Exception e) {
+      failTrace(e);
     } finally {
       outFile.delete();
       File outFileCRC = new File(OUTPUT_DIR,
@@ -104,6 +106,13 @@ public class TestStreamingKeyValue extends TestCase
     }
   }
 
+  private void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreamingKeyValue().testCommandLine();

+ 10 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java

@@ -85,7 +85,7 @@ public class TestStreamingSeparator extends TestCase
     };
   }
   
-  public void testCommandLine() throws Exception
+  public void testCommandLine()
   {
     try {
       try {
@@ -106,6 +106,8 @@ public class TestStreamingSeparator extends TestCase
       System.err.println("outEx1=" + outputExpect);
       System.err.println("  out1=" + output);
       assertEquals(outputExpect, output);
+    } catch(Exception e) {
+      failTrace(e);
     } finally {
       File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
       INPUT_FILE.delete();
@@ -114,6 +116,13 @@ public class TestStreamingSeparator extends TestCase
     }
   }
 
+  private void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreamingSeparator().testCommandLine();

+ 0 - 101
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java

@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.streaming;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.File;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.mapred.TaskReport;
-
-/**
- * Tests for the ability of a streaming task to set the status
- * by writing "reporter:status:" lines to stderr. Uses MiniMR
- * since the local jobtracker doesn't track status.
- */
-public class TestStreamingStatus extends TestCase {
-  private static String TEST_ROOT_DIR =
-    new File(System.getProperty("test.build.data","/tmp"))
-    .toURI().toString().replace(' ', '+');
-  protected String INPUT_FILE = TEST_ROOT_DIR + "/input.txt";
-  protected String OUTPUT_DIR = TEST_ROOT_DIR + "/out";
-  protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
-  protected String map = StreamUtil.makeJavaCommand(StderrApp.class, new String[]{"3", "0", "0", "true"});
-
-  protected String[] genArgs(int jobtrackerPort) {
-    return new String[] {
-      "-input", INPUT_FILE,
-      "-output", OUTPUT_DIR,
-      "-mapper", map,
-      "-jobconf", "mapred.map.tasks=1",
-      "-jobconf", "mapred.reduce.tasks=0",      
-      "-jobconf", "keep.failed.task.files=true",
-      "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
-      "-jobconf", "mapred.job.tracker=localhost:"+jobtrackerPort,
-      "-jobconf", "fs.default.name=file:///"
-    };
-  }
-  
-  public void makeInput(FileSystem fs) throws IOException {
-    Path inFile = new Path(INPUT_FILE);
-    DataOutputStream file = fs.create(inFile);
-    file.writeBytes(input);
-    file.close();
-  }
-
-  public void clean(FileSystem fs) {
-    try {
-      Path outDir = new Path(OUTPUT_DIR);
-      fs.delete(outDir, true);
-    } catch (Exception e) {}
-    try {
-      Path inFile = new Path(INPUT_FILE);    
-      fs.delete(inFile, false);
-    } catch (Exception e) {}
-  }
-  
-  public void testStreamingStatus() throws Exception {
-    MiniMRCluster mr = null;
-    FileSystem fs = null;
-    try {
-      mr = new MiniMRCluster(1, "file:///", 3);
-
-      Path inFile = new Path(INPUT_FILE);
-      fs = inFile.getFileSystem(mr.createJobConf());
-      clean(fs);
-      makeInput(fs);
-      
-      StreamJob job = new StreamJob();
-      int failed = job.run(genArgs(mr.getJobTrackerPort()));
-      assertEquals(0, failed);
-
-      TaskReport[] reports = job.jc_.getMapTaskReports(job.jobId_);
-      assertEquals(1, reports.length);
-      assertEquals("starting echo", reports[0].getState());
-    } finally {
-      if (fs != null) { clean(fs); }
-      if (mr != null) { mr.shutdown(); }
-    }
-  }
-}

+ 20 - 12
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java

@@ -71,35 +71,43 @@ public class TestStreamingStderr extends TestCase
   }
 
   public void runStreamJob(String baseName, boolean hasInput,
-                           int preLines, int duringLines, int postLines)
-    throws Exception {
-    File input = setupInput(baseName, hasInput);
-    File output = setupOutput(baseName);
-    boolean mayExit = false;
-    int returnStatus = 0;
+                           int preLines, int duringLines, int postLines) {
+    try {
+      File input = setupInput(baseName, hasInput);
+      File output = setupOutput(baseName);
+      boolean mayExit = false;
+      int returnStatus = 0;
 
-    StreamJob job = new StreamJob(genArgs(input, output, preLines, duringLines, postLines), mayExit);
-    returnStatus = job.go();
-    assertEquals("StreamJob success", 0, returnStatus);
+      StreamJob job = new StreamJob(genArgs(input, output, preLines, duringLines, postLines), mayExit);
+      returnStatus = job.go();
+      assertEquals("StreamJob success", 0, returnStatus);
+    } catch (Exception e) {
+      failTrace(e);
+    }
   }
 
   // This test will fail by blocking forever if the stderr isn't
   // consumed by Hadoop for tasks that don't have any input.
-  public void testStderrNoInput() throws Exception {
+  public void testStderrNoInput() throws IOException {
     runStreamJob("stderr-pre", false, 10000, 0, 0);
   }
 
   // Streaming should continue to read stderr even after all input has
   // been consumed.
-  public void testStderrAfterOutput() throws Exception {
+  public void testStderrAfterOutput() throws IOException {
     runStreamJob("stderr-post", false, 0, 0, 10000);
   }
 
   // This test should produce a task timeout if stderr lines aren't
   // counted as progress. This won't actually work until
   // LocalJobRunner supports timeouts.
-  public void testStderrCountsAsProgress() throws Exception {
+  public void testStderrCountsAsProgress() throws IOException {
     runStreamJob("stderr-progress", true, 10, 1000, 0);
   }
   
+  protected void failTrace(Exception e) {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
 }

+ 66 - 54
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java

@@ -47,68 +47,80 @@ public class TestSymLink extends TestCase
   {
   }
 
-  public void testSymLink() throws Exception
+  public void testSymLink()
   {
-    boolean mayExit = false;
-    MiniMRCluster mr = null;
-    MiniDFSCluster dfs = null; 
-    try{
-      Configuration conf = new Configuration();
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      FileSystem fileSys = dfs.getFileSystem();
-      String namenode = fileSys.getName();
-      mr  = new MiniMRCluster(1, namenode, 3);
-      // During tests, the default Configuration will use a local mapred
-      // So don't specify -config or -cluster
-      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
-      String strNamenode = "fs.default.name=" + namenode;
-      String argv[] = new String[] {
-        "-input", INPUT_FILE,
-        "-output", OUTPUT_DIR,
-        "-mapper", map,
-        "-reducer", reduce,
-        //"-verbose",
-        //"-jobconf", "stream.debug=set"
-        "-jobconf", strNamenode,
-        "-jobconf", strJobtracker,
-        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
-        "-jobconf", "mapred.child.java.opts=-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
-                    "-Dbuild.test=" + System.getProperty("build.test") + " " +
-                    conf.get("mapred.child.java.opts",""),
-        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
-      };
+    try {
+      boolean mayExit = false;
+      MiniMRCluster mr = null;
+      MiniDFSCluster dfs = null; 
+      try{
+        Configuration conf = new Configuration();
+        dfs = new MiniDFSCluster(conf, 1, true, null);
+        FileSystem fileSys = dfs.getFileSystem();
+        String namenode = fileSys.getName();
+        mr  = new MiniMRCluster(1, namenode, 3);
+        // During tests, the default Configuration will use a local mapred
+        // So don't specify -config or -cluster
+        String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
+        String strNamenode = "fs.default.name=" + namenode;
+        String argv[] = new String[] {
+          "-input", INPUT_FILE,
+          "-output", OUTPUT_DIR,
+          "-mapper", map,
+          "-reducer", reduce,
+          //"-verbose",
+          //"-jobconf", "stream.debug=set"
+          "-jobconf", strNamenode,
+          "-jobconf", strJobtracker,
+          "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
+          "-jobconf", "mapred.child.java.opts=-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
+                      "-Dbuild.test=" + System.getProperty("build.test") + " " +
+                      conf.get("mapred.child.java.opts",""),
+          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
+        };
 
-      fileSys.delete(new Path(OUTPUT_DIR));
-      
-      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
-      file.writeBytes(mapString);
-      file.close();
-      file = fileSys.create(new Path(CACHE_FILE));
-      file.writeBytes(cacheString);
-      file.close();
+        fileSys.delete(new Path(OUTPUT_DIR));
         
-      job = new StreamJob(argv, mayExit);      
-      job.go();
+        DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
+        file.writeBytes(mapString);
+        file.close();
+        file = fileSys.create(new Path(CACHE_FILE));
+        file.writeBytes(cacheString);
+        file.close();
+          
+        job = new StreamJob(argv, mayExit);      
+        job.go();
 
-      fileSys = dfs.getFileSystem();
-      String line = null;
-      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
-                                              new Path(OUTPUT_DIR),
-                                              new OutputLogFilter()));
-      for (int i = 0; i < fileList.length; i++){
-        System.out.println(fileList[i].toString());
-        BufferedReader bread =
-          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
-        line = bread.readLine();
-        System.out.println(line);
+        fileSys = dfs.getFileSystem();
+        String line = null;
+        Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
+                                                new Path(OUTPUT_DIR),
+                                                new OutputLogFilter()));
+        for (int i = 0; i < fileList.length; i++){
+          System.out.println(fileList[i].toString());
+          BufferedReader bread =
+            new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
+          line = bread.readLine();
+          System.out.println(line);
+        }
+        assertEquals(cacheString + "\t", line);
+      } finally{
+        if (dfs != null) { dfs.shutdown(); }
+        if (mr != null) { mr.shutdown();}
       }
-      assertEquals(cacheString + "\t", line);
-    } finally{
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown();}
+      
+    } catch(Exception e) {
+      failTrace(e);
     }
   }
 
+  void failTrace(Exception e)
+  {
+    StringWriter sw = new StringWriter();
+    e.printStackTrace(new PrintWriter(sw));
+    fail(sw.toString());
+  }
+
   public static void main(String[]args) throws Exception
   {
     new TestStreaming().testCommandLine();

+ 1 - 1
src/contrib/thriftfs/build.xml

@@ -34,7 +34,7 @@ to call at top-level: ant deploy-contrib compile-core-test
   <!-- Override jar target to specify main class -->
   <target name="jar" depends="compile">
     <jar
-      jarfile="${build.dir}/hadoop-${name}-${version}.jar"
+      jarfile="${build.dir}/hadoop-${version}-${name}.jar"
       basedir="${build.classes}"      
     >
   	<manifest>

+ 1 - 1
src/contrib/vaidya/build.xml

@@ -47,7 +47,7 @@
 	<!-- ====================================================== -->
 	<target name="jar" depends="compile" unless="skip.contrib">
 		<echo message="contrib: ${name}" />
-		<jar jarfile="${build.dir}/hadoop-${name}-${version}.jar">
+		<jar jarfile="${build.dir}/hadoop-${version}-${name}.jar">
 			<fileset dir="${build.classes}" />
 			<fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests">
 				<include name="postex_diagnosis_tests.xml" />

+ 0 - 6
src/core/core-default.xml

@@ -168,12 +168,6 @@
   <description>The filesystem for Hadoop archives. </description>
 </property>
 
-<property>
-  <name>fs.har.impl.disable.cache</name>
-  <value>true</value>
-  <description>Don't cache 'har' filesystem instances.</description>
-</property>
-
 <property>
   <name>fs.checkpoint.dir</name>
   <value>${hadoop.tmp.dir}/dfs/namesecondary</value>

+ 3 - 4
src/core/org/apache/hadoop/conf/Configuration.java

@@ -42,7 +42,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.WeakHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -163,8 +162,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * List of default Resources. Resources are loaded in the order of the list 
    * entries
    */
-  private static final CopyOnWriteArrayList<String> defaultResources =
-    new CopyOnWriteArrayList<String>();
+  private static final ArrayList<String> defaultResources = 
+    new ArrayList<String>();
   
   static{
     //print deprecation warning if hadoop-site.xml is found in classpath
@@ -1251,7 +1250,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return sb.toString();
   }
 
-  private void toString(List resources, StringBuffer sb) {
+  private void toString(ArrayList resources, StringBuffer sb) {
     ListIterator i = resources.listIterator();
     while (i.hasNext()) {
       if (i.nextIndex() != 0) {

+ 0 - 5
src/core/org/apache/hadoop/fs/FileSystem.java

@@ -187,11 +187,6 @@ public abstract class FileSystem extends Configured implements Closeable {
         return get(defaultUri, conf);              // return default
       }
     }
-    
-    String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme);
-    if (conf.getBoolean(disableCacheName, false)) {
-      return createFileSystem(uri, conf);
-    }
 
     return CACHE.get(uri, conf);
   }

+ 3 - 3
src/core/org/apache/hadoop/fs/FsShellPermissions.java

@@ -182,7 +182,7 @@ class FsShellPermissions {
                                 new FsPermission((short)newperms));
         } catch (IOException e) {
           System.err.println(getName() + ": changing permissions of '" + 
-                             file.getPath() + "':" + e.getMessage().split("\n")[0]);
+                             file.getPath() + "':" + e.getMessage());
         }
       }
     }
@@ -223,7 +223,7 @@ class FsShellPermissions {
       }
       if (owner == null && group == null) {
         throw new IOException("'" + ownerStr + "' does not specify " +
-                              " owner or group.");
+                              " onwer or group.");
       }
     }
 
@@ -240,7 +240,7 @@ class FsShellPermissions {
           srcFs.setOwner(file.getPath(), newOwner, newGroup);
         } catch (IOException e) {
           System.err.println(getName() + ": changing ownership of '" + 
-                             file.getPath() + "':" + e.getMessage().split("\n")[0]);
+                             file.getPath() + "':" + e.getMessage());
 
         }
       }

+ 14 - 4
src/core/org/apache/hadoop/fs/HarFileSystem.java

@@ -301,8 +301,19 @@ public class HarFileSystem extends FilterFileSystem {
     }
 
     URI tmpURI = fsPath.toUri();
+    fsPath = new Path(tmpURI.getPath());
     //change this to Har uri 
-    return new Path(uri.getScheme(), harAuth, tmpURI.getPath());
+    URI tmp = null;
+    try {
+      tmp = new URI(uri.getScheme(), harAuth, fsPath.toString(),
+                    tmpURI.getQuery(), tmpURI.getFragment());
+    } catch(URISyntaxException ue) {
+      LOG.error("Error in URI ", ue);
+    }
+    if (tmp != null) {
+      return new Path(tmp.toString());
+    }
+    return null;
   }
   
   /**
@@ -414,13 +425,12 @@ public class HarFileSystem extends FilterFileSystem {
       // do nothing just a read.
     }
     FSDataInputStream aIn = fs.open(archiveIndex);
-    LineReader aLin;
+    LineReader aLin = new LineReader(aIn, getConf());
     String retStr = null;
     // now start reading the real index file
+     read = 0;
     for (Store s: stores) {
-      read = 0;
       aIn.seek(s.begin);
-      aLin = new LineReader(aIn, getConf());
       while (read + s.begin < s.end) {
         int tmp = aLin.readLine(line);
         read += tmp;

+ 0 - 1
src/core/org/apache/hadoop/http/HttpServer.java

@@ -26,7 +26,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.nio.channels.ServerSocketChannel;
 
 import javax.servlet.ServletException;

+ 3 - 1
src/core/org/apache/hadoop/io/BooleanWritable.java

@@ -100,7 +100,9 @@ public class BooleanWritable implements WritableComparable {
 
     public int compare(byte[] b1, int s1, int l1,
                        byte[] b2, int s2, int l2) {
-      return compareBytes(b1, s1, l1, b2, s2, l2);
+      boolean a = (readInt(b1, s1) == 1) ? true : false;
+      boolean b = (readInt(b2, s2) == 1) ? true : false;
+      return ((a == b) ? 0 : (a == false) ? -1 : 1);
     }
   }
 

+ 3 - 28
src/core/org/apache/hadoop/io/WritableComparator.java

@@ -38,38 +38,13 @@ public class WritableComparator implements RawComparator {
     new HashMap<Class, WritableComparator>(); // registry
 
   /** Get a comparator for a {@link WritableComparable} implementation. */
-  public static synchronized 
-  WritableComparator get(Class<? extends WritableComparable> c) {
+  public static synchronized WritableComparator get(Class<? extends WritableComparable> c) {
     WritableComparator comparator = comparators.get(c);
-    if (comparator == null) {
-      // force the static initializers to run
-      forceInit(c);
-      // look to see if it is defined now
-      comparator = comparators.get(c);
-      // if not, use the generic one
-      if (comparator == null) {
-        comparator = new WritableComparator(c, true);
-        comparators.put(c, comparator);
-      }
-    }
+    if (comparator == null)
+      comparator = new WritableComparator(c, true);
     return comparator;
   }
 
-  /**
-   * Force initialization of the static members.
-   * As of Java 5, referencing a class doesn't force it to initialize. Since
-   * this class requires that the classes be initialized to declare their
-   * comparators, we force that initialization to happen.
-   * @param cls the class to initialize
-   */
-  private static void forceInit(Class<?> cls) {
-    try {
-      Class.forName(cls.getName(), true, cls.getClassLoader());
-    } catch (ClassNotFoundException e) {
-      throw new IllegalArgumentException("Can't initialize class " + cls, e);
-    }
-  } 
-
   /** Register an optimized comparator for a {@link WritableComparable}
    * implementation. */
   public static synchronized void define(Class c,

+ 2 - 2
src/core/org/apache/hadoop/io/compress/GzipCodec.java

@@ -161,7 +161,7 @@ public class GzipCodec extends DefaultCodec {
   public Class<? extends Compressor> getCompressorType() {
     return ZlibFactory.isNativeZlibLoaded(conf)
       ? GzipZlibCompressor.class
-      : null;
+      : BuiltInZlibDeflater.class;
   }
 
   public CompressionInputStream createInputStream(InputStream in) 
@@ -192,7 +192,7 @@ public class GzipCodec extends DefaultCodec {
   public Class<? extends Decompressor> getDecompressorType() {
     return ZlibFactory.isNativeZlibLoaded(conf)
       ? GzipZlibDecompressor.class
-      : null;
+      : BuiltInZlibInflater.class;
   }
 
   public String getDefaultExtension() {

+ 6 - 130
src/core/org/apache/hadoop/io/file/tfile/TFile.java

@@ -668,10 +668,10 @@ public class TFile {
    * TFile Reader. Users may only read TFiles by creating TFile.Reader.Scanner.
    * objects. A scanner may scan the whole TFile ({@link Reader#createScanner()}
    * ) , a portion of TFile based on byte offsets (
-   * {@link Reader#createScannerByByteRange(long, long)}), or a portion of TFile with keys
+   * {@link Reader#createScanner(long, long)}), or a portion of TFile with keys
    * fall in a certain key range (for sorted TFile only,
-   * {@link Reader#createScannerByKey(byte[], byte[])} or
-   * {@link Reader#createScannerByKey(RawComparable, RawComparable)}).
+   * {@link Reader#createScanner(byte[], byte[])} or
+   * {@link Reader#createScanner(RawComparable, RawComparable)}).
    */
   public static class Reader implements Closeable {
     // The underlying BCFile reader.
@@ -985,16 +985,6 @@ public class TFile {
       return new Location(blkIndex, 0);
     }
 
-    Location getLocationByRecordNum(long recNum) throws IOException {
-      checkTFileDataIndex();
-      return tfileIndex.getLocationByRecordNum(recNum);
-    }
-
-    long getRecordNumByLocation(Location location) throws IOException {
-      checkTFileDataIndex();
-      return tfileIndex.getRecordNumByLocation(location);      
-    }
-    
     int compareKeys(byte[] a, int o1, int l1, byte[] b, int o2, int l2) {
       if (!isSorted()) {
         throw new RuntimeException("Cannot compare keys for unsorted TFiles.");
@@ -1025,21 +1015,6 @@ public class TFile {
       return new Location(blockIndex, 0);
     }
 
-    /**
-     * Get the RecordNum for the first key-value pair in a compressed block
-     * whose byte offset in the TFile is greater than or equal to the specified
-     * offset.
-     * 
-     * @param offset
-     *          the user supplied offset.
-     * @return the RecordNum to the corresponding entry. If no such entry
-     *         exists, it returns the total entry count.
-     * @throws IOException
-     */
-    public long getRecordNumNear(long offset) throws IOException {
-      return getRecordNumByLocation(getLocationNear(offset));
-    }
-    
     /**
      * Get a sample key that is within a block whose starting offset is greater
      * than or equal to the specified offset.
@@ -1082,7 +1057,7 @@ public class TFile {
      *         contains zero key-value pairs even if length is positive.
      * @throws IOException
      */
-    public Scanner createScannerByByteRange(long offset, long length) throws IOException {
+    public Scanner createScanner(long offset, long length) throws IOException {
       return new Scanner(this, offset, offset + length);
     }
 
@@ -1098,31 +1073,10 @@ public class TFile {
      * @return The actual coverage of the returned scanner will cover all keys
      *         greater than or equal to the beginKey and less than the endKey.
      * @throws IOException
-     * 
-     * @deprecated Use {@link #createScannerByKey(byte[], byte[])} instead.
      */
-    @Deprecated
     public Scanner createScanner(byte[] beginKey, byte[] endKey)
-      throws IOException {
-      return createScannerByKey(beginKey, endKey);
-    }
-    
-    /**
-     * Get a scanner that covers a portion of TFile based on keys.
-     * 
-     * @param beginKey
-     *          Begin key of the scan (inclusive). If null, scan from the first
-     *          key-value entry of the TFile.
-     * @param endKey
-     *          End key of the scan (exclusive). If null, scan up to the last
-     *          key-value entry of the TFile.
-     * @return The actual coverage of the returned scanner will cover all keys
-     *         greater than or equal to the beginKey and less than the endKey.
-     * @throws IOException
-     */
-    public Scanner createScannerByKey(byte[] beginKey, byte[] endKey)
         throws IOException {
-      return createScannerByKey((beginKey == null) ? null : new ByteArray(beginKey,
+      return createScanner((beginKey == null) ? null : new ByteArray(beginKey,
           0, beginKey.length), (endKey == null) ? null : new ByteArray(endKey,
           0, endKey.length));
     }
@@ -1139,31 +1093,9 @@ public class TFile {
      * @return The actual coverage of the returned scanner will cover all keys
      *         greater than or equal to the beginKey and less than the endKey.
      * @throws IOException
-     * 
-     * @deprecated Use {@link #createScannerByKey(RawComparable, RawComparable)}
-     *             instead.
      */
-    @Deprecated
     public Scanner createScanner(RawComparable beginKey, RawComparable endKey)
         throws IOException {
-      return createScannerByKey(beginKey, endKey);
-    }
-
-    /**
-     * Get a scanner that covers a specific key range.
-     * 
-     * @param beginKey
-     *          Begin key of the scan (inclusive). If null, scan from the first
-     *          key-value entry of the TFile.
-     * @param endKey
-     *          End key of the scan (exclusive). If null, scan up to the last
-     *          key-value entry of the TFile.
-     * @return The actual coverage of the returned scanner will cover all keys
-     *         greater than or equal to the beginKey and less than the endKey.
-     * @throws IOException
-     */
-    public Scanner createScannerByKey(RawComparable beginKey, RawComparable endKey)
-        throws IOException {
       if ((beginKey != null) && (endKey != null)
           && (compareKeys(beginKey, endKey) >= 0)) {
         return new Scanner(this, beginKey, beginKey);
@@ -1171,27 +1103,6 @@ public class TFile {
       return new Scanner(this, beginKey, endKey);
     }
 
-    /**
-     * Create a scanner that covers a range of records.
-     * 
-     * @param beginRecNum
-     *          The RecordNum for the first record (inclusive).
-     * @param endRecNum
-     *          The RecordNum for the last record (exclusive). To scan the whole
-     *          file, either specify endRecNum==-1 or endRecNum==getEntryCount().
-     * @return The TFile scanner that covers the specified range of records.
-     * @throws IOException
-     */
-    public Scanner createScannerByRecordNum(long beginRecNum, long endRecNum)
-        throws IOException {
-      if (beginRecNum < 0) beginRecNum = 0;
-      if (endRecNum < 0 || endRecNum > getEntryCount()) {
-        endRecNum = getEntryCount();
-      }
-      return new Scanner(this, getLocationByRecordNum(beginRecNum),
-          getLocationByRecordNum(endRecNum));
-    }
-
     /**
      * The TFile Scanner. The Scanner has an implicit cursor, which, upon
      * creation, points to the first key-value pair in the scan range. If the
@@ -1611,15 +1522,6 @@ public class TFile {
         return new Entry();
       }
 
-      /**
-       * Get the RecordNum corresponding to the entry pointed by the cursor.
-       * @return The RecordNum corresponding to the entry pointed by the cursor.
-       * @throws IOException
-       */
-      public long getRecordNum() throws IOException {
-        return reader.getRecordNumByLocation(currentLocation);
-      }
-      
       /**
        * Internal API. Comparing the key at cursor to user-specified key.
        * 
@@ -2118,10 +2020,8 @@ public class TFile {
     final static String BLOCK_NAME = "TFile.index";
     private ByteArray firstKey;
     private final ArrayList<TFileIndexEntry> index;
-    private final ArrayList<Long> recordNumIndex;
     private final BytesComparator comparator;
-    private long sum = 0;
-    
+
     /**
      * For reading from file.
      * 
@@ -2130,7 +2030,6 @@ public class TFile {
     public TFileIndex(int entryCount, DataInput in, BytesComparator comparator)
         throws IOException {
       index = new ArrayList<TFileIndexEntry>(entryCount);
-      recordNumIndex = new ArrayList<Long>(entryCount);
       int size = Utils.readVInt(in); // size for the first key entry.
       if (size > 0) {
         byte[] buffer = new byte[size];
@@ -2152,8 +2051,6 @@ public class TFile {
               new TFileIndexEntry(new DataInputStream(new ByteArrayInputStream(
                   buffer, 0, size)));
           index.add(idx);
-          sum += idx.entries();
-          recordNumIndex.add(sum);
         }
       } else {
         if (entryCount != 0) {
@@ -2185,12 +2082,6 @@ public class TFile {
       return ret;
     }
 
-    /**
-     * @param key
-     *          input key.
-     * @return the ID of the first block that contains key > input key. Or -1
-     *         if no such block exists.
-     */
     public int upperBound(RawComparable key) {
       if (comparator == null) {
         throw new RuntimeException("Cannot search in unsorted TFile");
@@ -2212,26 +2103,13 @@ public class TFile {
      */
     public TFileIndex(BytesComparator comparator) {
       index = new ArrayList<TFileIndexEntry>();
-      recordNumIndex = new ArrayList<Long>();
       this.comparator = comparator;
     }
 
     public RawComparable getFirstKey() {
       return firstKey;
     }
-    
-    public Reader.Location getLocationByRecordNum(long recNum) {
-      int idx = Utils.upperBound(recordNumIndex, recNum);
-      long lastRecNum = (idx == 0)? 0: recordNumIndex.get(idx-1);
-      return new Reader.Location(idx, recNum-lastRecNum);
-    }
 
-    public long getRecordNumByLocation(Reader.Location location) {
-      int blkIndex = location.getBlockIndex();
-      long lastRecNum = (blkIndex == 0) ? 0: recordNumIndex.get(blkIndex-1);
-      return lastRecNum + location.getRecordIndex();
-    }
-    
     public void setFirstKey(byte[] key, int offset, int length) {
       firstKey = new ByteArray(new byte[length]);
       System.arraycopy(key, offset, firstKey.buffer(), 0, length);
@@ -2246,8 +2124,6 @@ public class TFile {
 
     public void addEntry(TFileIndexEntry keyEntry) {
       index.add(keyEntry);
-      sum += keyEntry.entries();
-      recordNumIndex.add(sum);
     }
 
     public TFileIndexEntry getEntry(int bid) {

+ 5 - 9
src/core/org/apache/hadoop/ipc/Client.java

@@ -347,12 +347,10 @@ public class Client {
     private void handleConnectionFailure(
         int curRetries, int maxRetries, IOException ioe) throws IOException {
       // close the current connection
-      if (socket != null) {
-        try {
-          socket.close();
-        } catch (IOException e) {
-          LOG.warn("Not able to close a socket", e);
-        }
+      try {
+        socket.close();
+      } catch (IOException e) {
+        LOG.warn("Not able to close a socket", e);
       }
       // set socket to null so that the next call to setupIOstreams
       // can start the process of connect all over again.
@@ -505,18 +503,16 @@ public class Client {
         if (LOG.isDebugEnabled())
           LOG.debug(getName() + " got value #" + id);
 
-        Call call = calls.get(id);
+        Call call = calls.remove(id);
 
         int state = in.readInt();     // read call status
         if (state == Status.SUCCESS.state) {
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           value.readFields(in);                 // read value
           call.setValue(value);
-          calls.remove(id);
         } else if (state == Status.ERROR.state) {
           call.setException(new RemoteException(WritableUtils.readString(in),
                                                 WritableUtils.readString(in)));
-          calls.remove(id);
         } else if (state == Status.FATAL.state) {
           // Close the connection
           markClosed(new RemoteException(WritableUtils.readString(in), 

+ 2 - 6
src/core/org/apache/hadoop/ipc/Server.java

@@ -88,9 +88,7 @@ public abstract class Server {
   /**
    * How many calls/handler are allowed in the queue.
    */
-  private static final int IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
-  private static final String  IPC_SERVER_HANDLER_QUEUE_SIZE_KEY = 
-                                            "ipc.server.handler.queue.size";
+  private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100;
   
   public static final Log LOG = LogFactory.getLog(Server.class);
 
@@ -1018,9 +1016,7 @@ public abstract class Server {
     this.paramClass = paramClass;
     this.handlerCount = handlerCount;
     this.socketSendBufferSize = 0;
-    this.maxQueueSize = handlerCount * conf.getInt(
-                                IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
-                                IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
+    this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER;
     this.callQueue  = new LinkedBlockingQueue<Call>(maxQueueSize); 
     this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000);
     this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);

+ 3 - 199
src/docs/releasenotes.html

@@ -2,7 +2,7 @@
 <html>
 <head>
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
-<title>Hadoop 0.20.3 Release Notes</title>
+<title>Hadoop 0.20.1 Release Notes</title>
 <STYLE type="text/css">
 		H1 {font-family: sans-serif}
 		H2 {font-family: sans-serif; margin-left: 7mm}
@@ -10,206 +10,10 @@
 	</STYLE>
 </head>
 <body>
-<h1>Hadoop 0.20.3 Release Notes</h1>
+<h1>Hadoop 0.20.1 Release Notes</h1>
 		These release notes include new developer and user-facing incompatibilities, features, and major improvements. The table below is sorted by Component.
 
-
-<a name="changes"></a>
-<h2>Changes Since Hadoop 0.20.2</h2>
-            
-<h3>Bug</h3>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6485'>HADOOP-6485</a>] - Trash fails on Windows</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6602'>HADOOP-6602</a>] - ClassLoader (Configuration#setClassLoader) in new Job API (0.20) does not work</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6625'>HADOOP-6625</a>] - Hadoop 0.20 doesn't generate hadoop-test pom, existing pom has bad dependencies, doesnt build javadoc,sources jar</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6665'>HADOOP-6665</a>] - DFSadmin commands setQuota and setSpaceQuota allowed when NameNode is in safemode.  </li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6688'>HADOOP-6688</a>] - FileSystem.delete(...) implementations should not throw FileNotFoundException</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6701'>HADOOP-6701</a>] -  Incorrect exit codes for "dfs -chown", "dfs -chgrp" - <b>Release Comment</b>: Commands chmod, chown and chgrp now returns non zero exit code and an error message on failure instead of returning zero.</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6702'>HADOOP-6702</a>] - Incorrect exit codes for "dfs -chown", "dfs -chgrp"  when input is given in wildcard format.</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6724'>HADOOP-6724</a>] - IPC doesn't properly handle IOEs thrown by socket factory</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6760'>HADOOP-6760</a>] - WebServer shouldn't increase port number in case of negative port setting caused by Jetty's race</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6808'>HADOOP-6808</a>] - Document steps to enable {File|Ganglia}Context for kerberos metrics</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6833'>HADOOP-6833</a>] - IPC leaks call parameters when exceptions thrown</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6848'>HADOOP-6848</a>] - FsShell have resource leak</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6881'>HADOOP-6881</a>] - The efficient comparators aren't always used except for BytesWritable and Text</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6923'>HADOOP-6923</a>] - Native Libraries do not load if a different platform signature is returned from org.apache.hadoop.util.PlatformName</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6924'>HADOOP-6924</a>] - Build fails with non-Sun JREs due to different pathing to the operating system architecture shared libraries</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6928'>HADOOP-6928</a>] - Fix BooleanWritable comparator in 0.20</li>
-</ul>
-    
-<h3>Improvement</h3>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6574'>HADOOP-6574</a>] - Commit HADOOP-6414:Add command line help for -expunge command.    to Hadoop 0.20 </li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6816'>HADOOP-6816</a>] - enhance FsShell.dus() to include sum of totalSize</li>
-</ul>
-    
-<h3>New Feature</h3>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6382'>HADOOP-6382</a>] - publish hadoop jars to apache mvn repo. - <b>Release Comment</b>: The hadoop jars are renamed  from previous hadoop-<version>-<name>.jar to hadoop-<name>-<version>.jar. Applications and documentation need to be updated to use the new file naming scheme. </li>
-</ul>
-                    
-<h3>Task</h3>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6844'>HADOOP-6844</a>] - Update docs to reflect new jar names</li>
-</ul>
-        
-<h3>Test</h3>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6637'>HADOOP-6637</a>] - Benchmark overhead of RPC session establishment </li>
-</ul>
-        
-
-<h2>Changes Since Hadoop 0.20.1</h2>
-
-<h3>Common</h3>
-<h4>        Bug
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-4802'>HADOOP-4802</a>] -         RPC Server send buffer retains size of largest response ever sent 
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-5611'>HADOOP-5611</a>] -         C++ libraries do not build on Debian Lenny
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-5612'>HADOOP-5612</a>] -         Some c++ scripts are not chmodded before ant execution
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-5623'>HADOOP-5623</a>] -         Streaming: process provided status messages are overwritten every 10 seoncds
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-5759'>HADOOP-5759</a>] -         IllegalArgumentException when CombineFileInputFormat is used as job InputFormat
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6097'>HADOOP-6097</a>] -         Multiple bugs w/ Hadoop archives
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6231'>HADOOP-6231</a>] -         Allow caching of filesystem instances to be disabled on a per-instance basis
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6269'>HADOOP-6269</a>] -         Missing synchronization for defaultResources in Configuration.addResource
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6315'>HADOOP-6315</a>] -         GzipCodec should not represent BuiltInZlibInflater as decompressorType
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6386'>HADOOP-6386</a>] -         NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6428'>HADOOP-6428</a>] -         HttpServer sleeps with negative values
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6460'>HADOOP-6460</a>] -         Namenode runs of out of memory due to memory leak in ipc Server
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6498'>HADOOP-6498</a>] -         IPC client  bug may cause rpc call hang
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6506'>HADOOP-6506</a>] -         Failing tests prevent the rest of test targets from execution.
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6524'>HADOOP-6524</a>] -         Contrib tests are failing Clover'ed build
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6575'>HADOOP-6575</a>] -         Tests do not run on 0.20 branch
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6576'>HADOOP-6576</a>] -         TestStreamingStatus is failing on 0.20 branch
-</li>
-</ul>
-
-<h4>        Task
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HADOOP-6328'>HADOOP-6328</a>] -         Hadoop 0.20 Docs - backport changes for streaming and m/r tutorial docs
-</li>
-</ul>
-
-
-<h3>HDFS</h3>
-<h4>        Bug
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-101'>HDFS-101</a>] -         DFS write pipeline : DFSClient sometimes does not detect second datanode failure 
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-185'>HDFS-185</a>] -         Chown , chgrp , chmod operations allowed when namenode is in safemode .
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-187'>HDFS-187</a>] -         TestStartup fails if hdfs is running in the same machine
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-495'>HDFS-495</a>] -         Hadoop FSNamesystem startFileInternal() getLease() has bug
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-579'>HDFS-579</a>] -         HADOOP-3792 update of DfsTask incomplete
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-596'>HDFS-596</a>] -         Memory leak in libhdfs: hdfsFreeFileInfo() in libhdfs does not free memory for mOwner and mGroup
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-677'>HDFS-677</a>] -         Rename failure due to quota results in deletion of src directory
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-723'>HDFS-723</a>] -         Deadlock in DFSClient#DFSOutputStream
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-732'>HDFS-732</a>] -         HDFS files are ending up truncated
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-734'>HDFS-734</a>] -         TestDatanodeBlockScanner times out in branch 0.20
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-745'>HDFS-745</a>] -         TestFsck timeout on 0.20.
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-761'>HDFS-761</a>] -         Failure to process rename operation from edits log due to quota verification
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-781'>HDFS-781</a>] -         Metrics PendingDeletionBlocks is not decremented
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-793'>HDFS-793</a>] -         DataNode should first receive the whole packet ack message before it constructs and sends its own ack message for the packet
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-795'>HDFS-795</a>] -         DFS Write pipeline does not detect defective datanode correctly in some cases (HADOOP-3339)
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-872'>HDFS-872</a>] -         DFSClient 0.20.1 is incompatible with HDFS 0.20.2
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-927'>HDFS-927</a>] -         DFSInputStream retries too many times for new block locations
-</li>
-</ul>
-
-<h4>        Test
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-784'>HDFS-784</a>] -         TestFsck times out on branch 0.20.1
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-907'>HDFS-907</a>] -         Add  tests for getBlockLocations and totalLoad metrics. 
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/HDFS-919'>HDFS-919</a>] -         Create test to validate the BlocksVerified metric
-</li>
-</ul>
-
-
-<h3>MapReduce</h3>
-<h4>        Bug
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-112'>MAPREDUCE-112</a>] -         Reduce Input Records and Reduce Output Records counters are not being set when using the new Mapreduce reducer API
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-433'>MAPREDUCE-433</a>] -         TestReduceFetch failed.
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-826'>MAPREDUCE-826</a>] -         harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-979'>MAPREDUCE-979</a>] -         JobConf.getMemoryFor{Map|Reduce}Task doesn't fallback to newer config knobs when mapred.taskmaxvmem is set to DISABLED_MEMORY_LIMIT of -1
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1010'>MAPREDUCE-1010</a>] -         Adding tests for changes in archives.
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1068'>MAPREDUCE-1068</a>] -         In hadoop-0.20.0 streaming job do not throw proper verbose error message if file is not present
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1070'>MAPREDUCE-1070</a>] -         Deadlock in FairSchedulerServlet
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1112'>MAPREDUCE-1112</a>] -         Fix CombineFileInputFormat for hadoop 0.20
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1147'>MAPREDUCE-1147</a>] -         Map output records counter missing for map-only jobs in new API
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1163'>MAPREDUCE-1163</a>] -         hdfsJniHelper.h: Yahoo! specific paths are encoded
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1182'>MAPREDUCE-1182</a>] -         Reducers fail with OutOfMemoryError while copying Map outputs
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1251'>MAPREDUCE-1251</a>] -         c++ utils doesn't compile
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1328'>MAPREDUCE-1328</a>] -         contrib/index  - modify build / ivy files as appropriate 
-</li>
-</ul>
-
-<h4>        Improvement
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-623'>MAPREDUCE-623</a>] -         Resolve javac warnings in mapred
-</li>
-</ul>
-
-<h4>        New Feature
-</h4>
-<ul>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1145'>MAPREDUCE-1145</a>] -         Multiple Outputs doesn't work with new API in 0.20 branch
-</li>
-<li>[<a href='https://issues.apache.org/jira/browse/MAPREDUCE-1170'>MAPREDUCE-1170</a>] -         MultipleInputs doesn't work with new API in 0.20 branch
-</li>
-</ul>
-
-
+		<a name="changes"></a>
 <h2>Changes Since Hadoop 0.20.0</h2>
 
 <h3>Common</h3>

+ 2 - 4
src/examples/org/apache/hadoop/examples/PiEstimator.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.examples;
 
 import java.io.IOException;
 import java.math.BigDecimal;
-import java.math.RoundingMode;
 import java.util.Iterator;
 
 import org.apache.hadoop.conf.Configured;
@@ -311,11 +310,10 @@ public class PiEstimator extends Configured implements Tool {
       }
 
       //compute estimated value
-      final BigDecimal numTotal
-          = BigDecimal.valueOf(numMaps).multiply(BigDecimal.valueOf(numPoints));
       return BigDecimal.valueOf(4).setScale(20)
           .multiply(BigDecimal.valueOf(numInside.get()))
-          .divide(numTotal, RoundingMode.HALF_UP);
+          .divide(BigDecimal.valueOf(numMaps))
+          .divide(BigDecimal.valueOf(numPoints));
     } finally {
       fs.delete(TMP_DIR, true);
     }

+ 30 - 71
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -29,7 +29,6 @@ import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
 import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -186,7 +185,9 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
     // dfs.write.packet.size is an internal config variable
     this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
-    this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf);
+    this.maxBlockAcquireFailures = 
+                          conf.getInt("dfs.client.max.block.acquire.failures",
+                                      MAX_BLOCK_ACQUIRE_FAILURES);
     
     try {
       this.ugi = UnixUserGroupInformation.login(conf, true);
@@ -216,11 +217,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     }
   }
 
-  static int getMaxBlockAcquireFailures(Configuration conf) {
-    return conf.getInt("dfs.client.max.block.acquire.failures",
-                       MAX_BLOCK_ACQUIRE_FAILURES);
-  }
-
   private void checkOpen() throws IOException {
     if (!clientRunning) {
       IOException result = new IOException("Filesystem closed");
@@ -306,7 +302,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     return hints;
   }
 
-  static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
+  private static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
       String src, long start, long length) throws IOException {
     try {
       return namenode.getBlockLocations(src, start, length);
@@ -658,28 +654,25 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       final int timeout = 3000 * datanodes.length + socketTimeout;
       boolean done = false;
       for(int j = 0; !done && j < datanodes.length; j++) {
-        Socket sock = null;
-        DataOutputStream out = null;
-        DataInputStream in = null;
-        
-        try {
-          //connect to a datanode
-          sock = socketFactory.createSocket();
-          NetUtils.connect(sock,
-              NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
-          sock.setSoTimeout(timeout);
+        //connect to a datanode
+        final Socket sock = socketFactory.createSocket();
+        NetUtils.connect(sock, 
+                         NetUtils.createSocketAddr(datanodes[j].getName()),
+                         timeout);
+        sock.setSoTimeout(timeout);
 
-          out = new DataOutputStream(
-              new BufferedOutputStream(NetUtils.getOutputStream(sock), 
-                                       DataNode.SMALL_BUFFER_SIZE));
-          in = new DataInputStream(NetUtils.getInputStream(sock));
+        DataOutputStream out = new DataOutputStream(
+            new BufferedOutputStream(NetUtils.getOutputStream(sock), 
+                                     DataNode.SMALL_BUFFER_SIZE));
+        DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));
 
+        // get block MD5
+        try {
           if (LOG.isDebugEnabled()) {
             LOG.debug("write to " + datanodes[j].getName() + ": "
-                + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
+                + DataTransferProtocol.OP_BLOCK_CHECKSUM +
+                ", block=" + block);
           }
-
-          // get block MD5
           out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
           out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
           out.writeLong(block.getBlockId());
@@ -1030,17 +1023,10 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       }
     }
 
-    void close() {
-      while (true) {
-        String src;
-        OutputStream out;
-        synchronized (this) {
-          if (pendingCreates.isEmpty()) {
-            return;
-          }
-          src = pendingCreates.firstKey();
-          out = pendingCreates.remove(src);
-        }
+    synchronized void close() {
+      while (!pendingCreates.isEmpty()) {
+        String src = pendingCreates.firstKey();
+        OutputStream out = pendingCreates.remove(src);
         if (out != null) {
           try {
             out.close();
@@ -1456,18 +1442,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     private Block currentBlock = null;
     private long pos = 0;
     private long blockEnd = -1;
-
-    /**
-     * This variable tracks the number of failures since the start of the
-     * most recent user-facing operation. That is to say, it should be reset
-     * whenever the user makes a call on this stream, and if at any point
-     * during the retry logic, the failure count exceeds a threshold,
-     * the errors will be thrown back to the operation.
-     *
-     * Specifically this counts the number of times the client has gone
-     * back to the namenode to get a new list of block locations, and is
-     * capped at maxBlockAcquireFailures
-     */
     private int failures = 0;
 
     /* XXX Use of CocurrentHashMap is temp fix. Need to fix 
@@ -1760,8 +1734,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       if (closed) {
         throw new IOException("Stream closed");
       }
-      failures = 0;
-
       if (pos < getFileLength()) {
         int retries = 2;
         while (retries > 0) {
@@ -1905,7 +1877,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       if (closed) {
         throw new IOException("Stream closed");
       }
-      failures = 0;
       long filelen = getFileLength();
       if ((position < 0) || (position >= filelen)) {
         return -1;
@@ -2418,24 +2389,17 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       public void run() {
 
         this.setName("ResponseProcessor for block " + block);
-        PipelineAck ack = new PipelineAck();
   
         while (!closed && clientRunning && !lastPacketInBlock) {
           // process responses from datanodes.
           try {
-            // read an ack from the pipeline
-            ack.readFields(blockReplyStream, targets.length);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("DFSClient " + ack);
-            }
-            long seqno = ack.getSeqno();
-            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
+            // verify seqno from datanode
+            long seqno = blockReplyStream.readLong();
+            LOG.debug("DFSClient received ack for seqno " + seqno);
+            if (seqno == -1) {
               continue;
             } else if (seqno == -2) {
-              // This signifies that some pipeline node failed to read downstream
-              // and therefore has no idea what sequence number the message corresponds
-              // to. So, we don't try to match it up with an ack.
-              assert ! ack.isSuccess();
+              // no nothing
             } else {
               Packet one = null;
               synchronized (ackQueue) {
@@ -2451,7 +2415,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
 
             // processes response status from all datanodes.
             for (int i = 0; i < targets.length && clientRunning; i++) {
-              short reply = ack.getReply(i);
+              short reply = blockReplyStream.readShort();
               if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                 errorIndex = i; // first bad datanode
                 throw new IOException("Bad response " + reply +
@@ -3181,13 +3145,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
      */
     @Override
     public void close() throws IOException {
-      if (closed) {
-        IOException e = lastException;
-        if (e == null)
-          return;
-        else
-          throw e;
-      }
+      if(closed)
+        return;
       closeInternal();
       leasechecker.remove(src);
       

+ 0 - 92
src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java

@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 
 /**
  * 
@@ -59,95 +56,6 @@ public interface DataTransferProtocol {
   public static final int OP_STATUS_ERROR_EXISTS = 4;  
   public static final int OP_STATUS_CHECKSUM_OK = 5;  
 
-  /* seqno for a heartbeat packet */
-  public static final int HEARTBEAT_SEQNO = -1;
 
-  /** reply **/
-  public static class PipelineAck {
-    private long seqno;
-    private short replies[];
-    final public static PipelineAck HEART_BEAT =
-      new PipelineAck(HEARTBEAT_SEQNO, new short[0]);
 
-    /** default constructor **/
-    public PipelineAck() {
-    }
-
-    /**
-     * Constructor
-     * @param seqno sequence number
-     * @param replies an array of replies
-     */
-    public PipelineAck(long seqno, short[] replies) {
-      this.seqno = seqno;
-      this.replies = replies;
-    }
-
-    /**
-     * Get the sequence number
-     * @return the sequence number
-     */
-    public long getSeqno() {
-      return seqno;
-    }
-
-    /**
-     * get the ith reply
-     * @return the the ith reply
-     */
-    public short getReply(int i) {
-      return replies[i];
-    }
-
-    /**
-     * Check if this ack contains error status
-     * @return true if all statuses are SUCCESS
-     */
-    public boolean isSuccess() {
-      for (short reply : replies) {
-        if (reply != OP_STATUS_SUCCESS) {
-          return false;
-        }
-      }
-      return true;
-    }
-
-    public void readFields(DataInput in, int numRepliesExpected)
-      throws IOException {
-      assert numRepliesExpected > 0;
-
-      seqno = in.readLong();
-      if (seqno == HEARTBEAT_SEQNO) {
-        // Heartbeat doesn't forward any replies
-        replies = new short[0];
-      } else {
-        replies = new short[numRepliesExpected];
-        for (int i=0; i < replies.length; i++) {
-          replies[i] = in.readShort();
-        }
-      }
-    }
-
-    public void write(DataOutput out) throws IOException {
-      out.writeLong(seqno);
-      for(short reply : replies) {
-        out.writeShort(reply);
-      }
-    }
-
-    @Override //Object
-    public String toString() {
-      StringBuilder ack = new StringBuilder("Replies for seqno ");
-      ack.append( seqno ).append( " are" );
-      for(short reply : replies) {
-        ack.append(" ");
-        if (reply == OP_STATUS_SUCCESS) {
-          ack.append("SUCCESS");
-        } else {
-          ack.append("FAILED");
-        }
-      }
-      return ack.toString();
-    }
-  }
 }

+ 4 - 93
src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 
 
@@ -294,119 +295,30 @@ public abstract class Storage extends StorageInfo {
         throw new IOException("Cannot create directory " + curDir);
     }
 
-    /**
-     * Directory {@code current} contains latest files defining
-     * the file system meta-data.
-     * 
-     * @return the directory path
-     */
     public File getCurrentDir() {
       return new File(root, STORAGE_DIR_CURRENT);
     }
-
-    /**
-     * File {@code VERSION} contains the following fields:
-     * <ol>
-     * <li>node type</li>
-     * <li>layout version</li>
-     * <li>namespaceID</li>
-     * <li>fs state creation time</li>
-     * <li>other fields specific for this node type</li>
-     * </ol>
-     * The version file is always written last during storage directory updates.
-     * The existence of the version file indicates that all other files have
-     * been successfully written in the storage directory, the storage is valid
-     * and does not need to be recovered.
-     * 
-     * @return the version file path
-     */
     public File getVersionFile() {
       return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
-
-    /**
-     * File {@code VERSION} from the {@code previous} directory.
-     * 
-     * @return the previous version file path
-     */
     public File getPreviousVersionFile() {
       return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
-
-    /**
-     * Directory {@code previous} contains the previous file system state,
-     * which the system can be rolled back to.
-     * 
-     * @return the directory path
-     */
     public File getPreviousDir() {
       return new File(root, STORAGE_DIR_PREVIOUS);
     }
-
-    /**
-     * {@code previous.tmp} is a transient directory, which holds
-     * current file system state while the new state is saved into the new
-     * {@code current} during upgrade.
-     * If the saving succeeds {@code previous.tmp} will be moved to
-     * {@code previous}, otherwise it will be renamed back to 
-     * {@code current} by the recovery procedure during startup.
-     * 
-     * @return the directory path
-     */
     public File getPreviousTmp() {
       return new File(root, STORAGE_TMP_PREVIOUS);
     }
-
-    /**
-     * {@code removed.tmp} is a transient directory, which holds
-     * current file system state while the previous state is moved into
-     * {@code current} during rollback.
-     * If the moving succeeds {@code removed.tmp} will be removed,
-     * otherwise it will be renamed back to 
-     * {@code current} by the recovery procedure during startup.
-     * 
-     * @return the directory path
-     */
     public File getRemovedTmp() {
       return new File(root, STORAGE_TMP_REMOVED);
     }
-
-    /**
-     * {@code finalized.tmp} is a transient directory, which holds
-     * the {@code previous} file system state while it is being removed
-     * in response to the finalize request.
-     * Finalize operation will remove {@code finalized.tmp} when completed,
-     * otherwise the removal will resume upon the system startup.
-     * 
-     * @return the directory path
-     */
     public File getFinalizedTmp() {
       return new File(root, STORAGE_TMP_FINALIZED);
     }
-
-    /**
-     * {@code lastcheckpoint.tmp} is a transient directory, which holds
-     * current file system state while the new state is saved into the new
-     * {@code current} during regular namespace updates.
-     * If the saving succeeds {@code lastcheckpoint.tmp} will be moved to
-     * {@code previous.checkpoint}, otherwise it will be renamed back to 
-     * {@code current} by the recovery procedure during startup.
-     * 
-     * @return the directory path
-     */
     public File getLastCheckpointTmp() {
       return new File(root, STORAGE_TMP_LAST_CKPT);
     }
-
-    /**
-     * {@code previous.checkpoint} is a directory, which holds the previous
-     * (before the last save) state of the storage directory.
-     * The directory is created as a reference only, it does not play role
-     * in state recovery procedures, and is recycled automatically, 
-     * but it may be useful for manual recovery of a stale state of the system.
-     * 
-     * @return the directory path
-     */
     public File getPreviousCheckpoint() {
       return new File(root, STORAGE_PREVIOUS_CKPT);
     }
@@ -417,9 +329,8 @@ public abstract class Storage extends StorageInfo {
      * @param startOpt a startup option.
      *  
      * @return state {@link StorageState} of the storage directory 
-     * @throws InconsistentFSStateException if directory state is not 
-     * consistent and cannot be recovered.
-     * @throws IOException
+     * @throws {@link InconsistentFSStateException} if directory state is not 
+     * consistent and cannot be recovered 
      */
     public StorageState analyzeStorage(StartupOption startOpt) throws IOException {
       assert root != null : "root is null";
@@ -618,7 +529,7 @@ public abstract class Storage extends StorageInfo {
         file.close();
         return null;
       } catch(IOException e) {
-        LOG.error("Cannot create lock on " + lockF, e);
+        LOG.info(StringUtils.stringifyException(e));
         file.close();
         throw e;
       }

+ 47 - 60
src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
@@ -774,13 +773,8 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
               // send a heartbeat if it is time.
               now = System.currentTimeMillis();
               if (now - lastHeartbeat > datanode.socketTimeout/2) {
-                PipelineAck.HEART_BEAT.write(replyOut);  // send heart beat
+                replyOut.writeLong(-1); // send heartbeat
                 replyOut.flush();
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("PacketResponder " + numTargets +
-                            " for block " + block + 
-                            " sent a heartbeat");
-                }
                 lastHeartbeat = now;
               }
             }
@@ -820,8 +814,8 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
               lastPacket = true;
             }
 
-            new PipelineAck(expected, new short[]{
-                DataTransferProtocol.OP_STATUS_SUCCESS}).write(replyOut);
+            replyOut.writeLong(expected);
+            replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
             replyOut.flush();
         } catch (Exception e) {
           if (running) {
@@ -851,41 +845,23 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
       while (running && datanode.shouldRun && !lastPacketInBlock) {
 
         try {
+            short op = DataTransferProtocol.OP_STATUS_SUCCESS;
             boolean didRead = false;
-
-            /**
-             * Sequence number -2 is a special value that is used when
-             * a DN fails to read an ack from a downstream. In this case,
-             * it needs to tell the client that there's been an error downstream
-             * but has no valid sequence number to use. Thus, -2 is used
-             * as an UNKNOWN value.
-             */
             long expected = -2;
-
-            PipelineAck ack = new PipelineAck();
             try { 
-              // read an ack from downstream datanode
-              ack.readFields(mirrorIn, numTargets);
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("PacketResponder " + numTargets + " got " + ack);
-              }
-              long seqno = ack.getSeqno();
+              // read seqno from downstream datanode
+              long seqno = mirrorIn.readLong();
               didRead = true;
-              if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
-                ack.write(replyOut); // send keepalive
+              if (seqno == -1) {
+                replyOut.writeLong(-1); // send keepalive
                 replyOut.flush();
+                LOG.debug("PacketResponder " + numTargets + " got -1");
                 continue;
               } else if (seqno == -2) {
-                // A downstream node must have failed to read an ack. We need
-                // to forward this on.
-                assert ! ack.isSuccess();
+                LOG.debug("PacketResponder " + numTargets + " got -2");
               } else {
-                if (seqno < 0) {
-                  throw new IOException("Received an invalid negative sequence number. "
-                                        + "Ack = " + ack);
-                }
-                assert seqno >= 0;
-
+                LOG.debug("PacketResponder " + numTargets + " got seqno = " + 
+                    seqno);
                 Packet pkt = null;
                 synchronized (this) {
                   while (running && datanode.shouldRun && ackQueue.size() == 0) {
@@ -900,6 +876,7 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
                   pkt = ackQueue.removeFirst();
                   expected = pkt.seqno;
                   notifyAll();
+                  LOG.debug("PacketResponder " + numTargets + " seqno = " + seqno);
                   if (seqno != expected) {
                     throw new IOException("PacketResponder " + numTargets +
                                           " for block " + block +
@@ -932,6 +909,10 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
               continue;
             }
             
+            if (!didRead) {
+              op = DataTransferProtocol.OP_STATUS_ERROR;
+            }
+            
             // If this is the last packet in block, then close block
             // file and finalize the block before responding success
             if (lastPacketInBlock && !receiver.finalized) {
@@ -954,37 +935,43 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
               }
             }
 
-            // construct my ack message.
-            short[] replies = new short[1 + numTargets];
-            if (!didRead) { // no ack is read
-              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
-              // Fill all downstream nodes with ERROR - the client will
-              // eject the first node with ERROR status (our mirror)
-              for (int i = 1; i < replies.length; i++) {
-                replies[i] = DataTransferProtocol.OP_STATUS_ERROR;
-              }
-            } else {
-              replies = new short[1+numTargets];
-              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
-              for (int i=0; i<numTargets; i++) {
-                replies[i+1] = ack.getReply(i);
+            // send my status back to upstream datanode
+            replyOut.writeLong(expected); // send seqno upstream
+            replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
+
+            LOG.debug("PacketResponder " + numTargets + 
+                      " for block " + block +
+                      " responded my status " +
+                      " for seqno " + expected);
+
+            // forward responses from downstream datanodes.
+            for (int i = 0; i < numTargets && datanode.shouldRun; i++) {
+              try {
+                if (op == DataTransferProtocol.OP_STATUS_SUCCESS) {
+                  op = mirrorIn.readShort();
+                  if (op != DataTransferProtocol.OP_STATUS_SUCCESS) {
+                    LOG.debug("PacketResponder for block " + block +
+                              ": error code received from downstream " +
+                              " datanode[" + i + "] " + op);
+                  }
+                }
+              } catch (Throwable e) {
+                op = DataTransferProtocol.OP_STATUS_ERROR;
               }
+              replyOut.writeShort(op);
             }
-            PipelineAck replyAck = new PipelineAck(expected, replies);
- 
-            // send my ack back to upstream datanode
-            replyAck.write(replyOut);
             replyOut.flush();
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("PacketResponder " + numTargets +
-                        " for block " + block +
-                        " responded an ack: " + replyAck);
-            }
+            LOG.debug("PacketResponder " + block + " " + numTargets + 
+                      " responded other status " + " for seqno " + expected);
 
+            // If we were unable to read the seqno from downstream, then stop.
+            if (expected == -2) {
+              running = false;
+            }
             // If we forwarded an error response from a downstream datanode
             // and we are acting on behalf of a client, then we quit. The 
             // client will drive the recovery mechanism.
-            if (!replyAck.isSuccess() && receiver.clientName.length() > 0) {
+            if (op == DataTransferProtocol.OP_STATUS_ERROR && receiver.clientName.length() > 0) {
               running = false;
             }
         } catch (IOException e) {

+ 140 - 196
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -25,6 +25,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -46,6 +49,8 @@ class FSDirectory implements FSConstants, Closeable {
   final INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
   private boolean ready = false;
+  // Metrics record
+  private MetricsRecord directoryMetrics = null;
 
   /** Access an existing dfs name directory. */
   FSDirectory(FSNamesystem ns, Configuration conf) {
@@ -60,6 +65,13 @@ class FSDirectory implements FSConstants, Closeable {
         Integer.MAX_VALUE, -1);
     this.fsImage = fsImage;
     namesystem = ns;
+    initialize(conf);
+  }
+    
+  private void initialize(Configuration conf) {
+    MetricsContext metricsContext = MetricsUtil.getContext("dfs");
+    directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
+    directoryMetrics.setTag("sessionId", conf.get("session.id"));
   }
 
   void loadFSImage(Collection<File> dataDirs,
@@ -73,7 +85,7 @@ class FSDirectory implements FSConstants, Closeable {
     }
     try {
       if (fsImage.recoverTransitionRead(dataDirs, editsDirs, startOpt)) {
-        fsImage.saveNamespace(true);
+        fsImage.saveFSImage();
       }
       FSEditLog editLog = fsImage.getEditLog();
       assert editLog != null : "editLog must be initialized";
@@ -91,8 +103,8 @@ class FSDirectory implements FSConstants, Closeable {
   }
 
   private void incrDeletedFileCount(int count) {
-    if (namesystem != null)
-      NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
+    directoryMetrics.incrMetric("files_deleted", count);
+    directoryMetrics.update();
   }
     
   /**
@@ -250,8 +262,8 @@ class FSDirectory implements FSConstants, Closeable {
       INodeFile fileNode = (INodeFile) inodes[inodes.length-1];
 
       // check quota limits and updated space consumed
-      updateCount(inodes, inodes.length-1, 0,
-          fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
+      updateCount(inodes, inodes.length-1, 0, 
+                  fileNode.getPreferredBlockSize()*fileNode.getReplication());
       
       // associate the new list of blocks with this file
       namesystem.blocksMap.addINode(block, fileNode);
@@ -351,11 +363,9 @@ class FSDirectory implements FSConstants, Closeable {
       // check the validation of the source
       if (srcInodes[srcInodes.length-1] == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-            + "failed to rename " + src + " to " + dst
-            + " because source does not exist");
+                                     +"failed to rename "+src+" to "+dst+ " because source does not exist");
         return false;
-      } 
-      if (srcInodes.length == 1) {
+      } else if (srcInodes.length == 1) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
             +"failed to rename "+src+" to "+dst+ " because source is the root");
         return false;
@@ -364,78 +374,71 @@ class FSDirectory implements FSConstants, Closeable {
         dst += Path.SEPARATOR + new Path(src).getName();
       }
       
-      // check the validity of the destination
-      if (dst.equals(src)) {
-        return true;
+      // remove source
+      INode srcChild = null;
+      try {
+        srcChild = removeChild(srcInodes, srcInodes.length-1);
+      } catch (IOException e) {
+        // srcChild == null; go to next if statement
       }
-      // dst cannot be directory or a file under src
-      if (dst.startsWith(src) && 
-          dst.charAt(src.length()) == Path.SEPARATOR_CHAR) {
+      if (srcChild == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-            + "failed to rename " + src + " to " + dst
-            + " because destination starts with src");
+            +"failed to rename "+src+" to "+dst+ " because the source can not be removed");
         return false;
       }
+
+      String srcChildName = srcChild.getLocalName();
       
+      // check the validity of the destination
+      INode dstChild = null;
+      QuotaExceededException failureByQuota = null;
+
       byte[][] dstComponents = INode.getPathComponents(dst);
       INode[] dstInodes = new INode[dstComponents.length];
       rootDir.getExistingPathINodes(dstComponents, dstInodes);
-      if (dstInodes[dstInodes.length-1] != null) {
+      if (dstInodes[dstInodes.length-1] != null) { //check if destination exists
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
                                      +"failed to rename "+src+" to "+dst+ 
                                      " because destination exists");
-        return false;
-      }
-      if (dstInodes[dstInodes.length-2] == null) {
+      } else if (dstInodes[dstInodes.length-2] == null) { // check if its parent exists
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
             +"failed to rename "+src+" to "+dst+ 
-            " because destination's parent does not exist");
-        return false;
+            " because destination's parent does not exists");
       }
-      
-      // Ensure dst has quota to accommodate rename
-      verifyQuotaForRename(srcInodes,dstInodes);
-      
-      INode dstChild = null;
-      INode srcChild = null;
-      String srcChildName = null;
-      try {
-        // remove src
-        srcChild = removeChild(srcInodes, srcInodes.length-1);
-        if (srcChild == null) {
-          NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-              + "failed to rename " + src + " to " + dst
-              + " because the source can not be removed");
-          return false;
-        }
-        srcChildName = srcChild.getLocalName();
+      else {
+        // add to the destination
         srcChild.setLocalName(dstComponents[dstInodes.length-1]);
-        
-        // add src to the destination
-        dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
-            srcChild, -1, false);
-        if (dstChild != null) {
-          srcChild = null;
-          if (NameNode.stateChangeLog.isDebugEnabled()) {
-            NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src
-                    + " is renamed to " + dst);
-          }
-          // update modification time of dst and the parent of src
-          srcInodes[srcInodes.length-2].setModificationTime(timestamp);
-          dstInodes[dstInodes.length-2].setModificationTime(timestamp);
-          return true;
+        try {
+          // add it to the namespace
+          dstChild = addChild(dstInodes, dstInodes.length-1, srcChild, false);
+        } catch (QuotaExceededException qe) {
+          failureByQuota = qe;
+        }
+      }
+      if (dstChild != null) {
+        if (NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
+            +src+" is renamed to "+dst);
         }
-      } finally {
-        if (dstChild == null && srcChild != null) {
+
+        // update modification time of dst and the parent of src
+        srcInodes[srcInodes.length-2].setModificationTime(timestamp);
+        dstInodes[dstInodes.length-2].setModificationTime(timestamp);
+        return true;
+      } else {
+        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+            +"failed to rename "+src+" to "+dst);
+        try {
           // put it back
           srcChild.setLocalName(srcChildName);
-          addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, -1,
-              false);
+          addChild(srcInodes, srcInodes.length-1, srcChild, false);
+        } catch (IOException ignored) {}
+        if (failureByQuota != null) {
+          throw failureByQuota;
+        } else {
+          return false;
         }
       }
-      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-          +"failed to rename "+src+" to "+dst);
-      return false;
     }
   }
 
@@ -480,7 +483,7 @@ class FSDirectory implements FSConstants, Closeable {
       // check disk quota
       long dsDelta = (replication - oldReplication[0]) *
            (fileNode.diskspaceConsumed()/oldReplication[0]);
-      updateCount(inodes, inodes.length-1, 0, dsDelta, true);
+      updateCount(inodes, inodes.length-1, 0, dsDelta);
 
       fileNode.setReplication(replication);
       fileBlocks = fileNode.getBlocks();
@@ -557,19 +560,17 @@ class FSDirectory implements FSConstants, Closeable {
   /**
    * Remove the file from management, return blocks
    */
-  boolean delete(String src) {
+  INode delete(String src) {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src);
     }
     waitForReady();
     long now = FSNamesystem.now();
-    int filesRemoved = unprotectedDelete(src, now);
-    if (filesRemoved <= 0) {
-      return false;
+    INode deletedNode = unprotectedDelete(src, now);
+    if (deletedNode != null) {
+      fsImage.getEditLog().logDelete(src, now);
     }
-    incrDeletedFileCount(filesRemoved);
-    fsImage.getEditLog().logDelete(src, now);
-    return true;
+    return deletedNode;
   }
   
   /** Return if a directory is empty or not **/
@@ -594,9 +595,9 @@ class FSDirectory implements FSConstants, Closeable {
    * @param src a string representation of a path to an inode
    * @param modificationTime the time the inode is removed
    * @param deletedBlocks the place holder for the blocks to be removed
-   * @return the number of inodes deleted; 0 if no inodes are deleted.
+   * @return if the deletion succeeds
    */ 
-  int unprotectedDelete(String src, long modificationTime) {
+  INode unprotectedDelete(String src, long modificationTime) {
     src = normalizePath(src);
 
     synchronized (rootDir) {
@@ -606,12 +607,12 @@ class FSDirectory implements FSConstants, Closeable {
       if (targetNode == null) { // non-existent src
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
             +"failed to remove "+src+" because it does not exist");
-        return 0;
+        return null;
       } else if (inodes.length == 1) { // src is the root
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
             "failed to remove " + src +
             " because the root is not allowed to be deleted");
-        return 0;
+        return null;
       } else {
         try {
           // Remove the node from the namespace
@@ -621,16 +622,17 @@ class FSDirectory implements FSConstants, Closeable {
           // GC all the blocks underneath the node.
           ArrayList<Block> v = new ArrayList<Block>();
           int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v);
+          incrDeletedFileCount(filesRemoved);
           namesystem.removePathAndBlocks(src, v);
           if (NameNode.stateChangeLog.isDebugEnabled()) {
             NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
               +src+" is removed");
           }
-          return filesRemoved;
+          return targetNode;
         } catch (IOException e) {
           NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
               "failed to remove " + src + " because " + e.getMessage());
-          return 0;
+          return null;
         }
       }
     }
@@ -639,9 +641,19 @@ class FSDirectory implements FSConstants, Closeable {
   /**
    * Replaces the specified inode with the specified one.
    */
-  void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
-      throws IOException {
+  void replaceNode(String path, INodeFile oldnode, INodeFile newnode) 
+                                                   throws IOException {
+    replaceNode(path, oldnode, newnode, true);
+  }
+  
+  /**
+   * @see #replaceNode(String, INodeFile, INodeFile)
+   */
+  private void replaceNode(String path, INodeFile oldnode, INodeFile newnode,
+                           boolean updateDiskspace) throws IOException {    
     synchronized (rootDir) {
+      long dsOld = oldnode.diskspaceConsumed();
+      
       //
       // Remove the node from the namespace 
       //
@@ -658,6 +670,18 @@ class FSDirectory implements FSConstants, Closeable {
       
       rootDir.addNode(path, newnode); 
 
+      //check if disk space needs to be updated.
+      long dsNew = 0;
+      if (updateDiskspace && (dsNew = newnode.diskspaceConsumed()) != dsOld) {
+        try {
+          updateSpaceConsumed(path, 0, dsNew-dsOld);
+        } catch (QuotaExceededException e) {
+          // undo
+          replaceNode(path, newnode, oldnode, false);
+          throw e;
+        }
+      }
+      
       int index = 0;
       for (Block b : newnode.getBlocks()) {
         BlockInfo info = namesystem.blocksMap.addINode(b, newnode);
@@ -804,7 +828,7 @@ class FSDirectory implements FSConstants, Closeable {
         throw new FileNotFoundException(path + 
                                         " does not exist under rootDir.");
       }
-      updateCount(inodes, len-1, nsDelta, dsDelta, true);
+      updateCount(inodes, len-1, nsDelta, dsDelta);
     }
   }
   
@@ -814,11 +838,10 @@ class FSDirectory implements FSConstants, Closeable {
    * @param numOfINodes the number of inodes to update starting from index 0
    * @param nsDelta the delta change of namespace
    * @param dsDelta the delta change of diskspace
-   * @param checkQuota if true then check if quota is exceeded
    * @throws QuotaExceededException if the new count violates any quota limit
    */
   private void updateCount(INode[] inodes, int numOfINodes, 
-                           long nsDelta, long dsDelta, boolean checkQuota)
+                           long nsDelta, long dsDelta)
                            throws QuotaExceededException {
     if (!ready) {
       //still intializing. do not check or update quotas.
@@ -827,27 +850,28 @@ class FSDirectory implements FSConstants, Closeable {
     if (numOfINodes>inodes.length) {
       numOfINodes = inodes.length;
     }
-    if (checkQuota) {
-      verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
-    }
-    for(int i = 0; i < numOfINodes; i++) {
-      if (inodes[i].isQuotaSet()) { // a directory with quota
-        INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
-        node.updateNumItemsInTree(nsDelta, dsDelta);
-      }
-    }
-  }
-  
-  /** 
-   * update quota of each inode and check to see if quota is exceeded. 
-   * See {@link #updateCount(INode[], int, long, long, boolean)}
-   */ 
-  private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, 
-                           long nsDelta, long dsDelta) {
+    // check existing components in the path  
+    int i=0;
     try {
-      updateCount(inodes, numOfINodes, nsDelta, dsDelta, false);
+      for(; i < numOfINodes; i++) {
+        if (inodes[i].isQuotaSet()) { // a directory with quota
+          INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
+          node.updateNumItemsInTree(nsDelta, dsDelta);
+        }
+      }
     } catch (QuotaExceededException e) {
-      NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e);
+      e.setPathName(getFullPathName(inodes, i));
+      // undo updates
+      for( ; i-- > 0; ) {
+        try {
+          if (inodes[i].isQuotaSet()) { // a directory with quota
+            INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
+            node.updateNumItemsInTree(-nsDelta, -dsDelta);
+          }
+        } catch (IOException ingored) {
+        }
+      }
+      throw e;
     }
   }
   
@@ -907,7 +931,7 @@ class FSDirectory implements FSConstants, Closeable {
           return false;
         }
         // Directory creation also count towards FilesCreated
-        // to match count of FilesDeleted metric. 
+        // to match count of files_deleted metric. 
         if (namesystem != null)
           NameNode.getNameNodeMetrics().numFilesCreated.inc();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
@@ -941,7 +965,7 @@ class FSDirectory implements FSConstants, Closeable {
       long timestamp) throws QuotaExceededException {
     inodes[pos] = addChild(inodes, pos, 
         new INodeDirectory(name, permission, timestamp),
-        -1, inheritPermission );
+        inheritPermission );
   }
   
   /** Add a node child to the namespace. The full path name of the node is src.
@@ -959,127 +983,48 @@ class FSDirectory implements FSConstants, Closeable {
                       inheritPermission);
     }
   }
-
-  /**
-   * Verify quota for adding or moving a new INode with required 
-   * namespace and diskspace to a given position.
-   *  
-   * @param inodes INodes corresponding to a path
-   * @param pos position where a new INode will be added
-   * @param nsDelta needed namespace
-   * @param dsDelta needed diskspace
-   * @param commonAncestor Last node in inodes array that is a common ancestor
-   *          for a INode that is being moved from one location to the other.
-   *          Pass null if a node is not being moved.
-   * @throws QuotaExceededException if quota limit is exceeded.
-   */
-  private void verifyQuota(INode[] inodes, int pos, long nsDelta, long dsDelta,
-      INode commonAncestor) throws QuotaExceededException {
-    if (!ready) {
-      // Do not check quota if edits log is still being processed
-      return;
-    }
-    if (pos>inodes.length) {
-      pos = inodes.length;
-    }
-    int i = pos - 1;
-    try {
-      // check existing components in the path  
-      for(; i >= 0; i--) {
-        if (commonAncestor == inodes[i]) {
-          // Moving an existing node. Stop checking for quota when common
-          // ancestor is reached
-          return;
-        }
-        if (inodes[i].isQuotaSet()) { // a directory with quota
-          INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
-          node.verifyQuota(nsDelta, dsDelta);
-        }
-      }
-    } catch (QuotaExceededException e) {
-      e.setPathName(getFullPathName(inodes, i));
-      throw e;
-    }
-  }
   
-  /**
-   * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
-   * dstInodes[dstInodes.length-1]
-   * 
-   * @param srcInodes directory from where node is being moved.
-   * @param dstInodes directory to where node is moved to.
-   * @throws QuotaExceededException if quota limit is exceeded.
-   */
-  private void verifyQuotaForRename(INode[] srcInodes, INode[]dstInodes)
-      throws QuotaExceededException {
-    if (!ready) {
-      // Do not check quota if edits log is still being processed
-      return;
-    }
-    INode srcInode = srcInodes[srcInodes.length - 1];
-    INode commonAncestor = null;
-    for(int i =0;srcInodes[i] == dstInodes[i]; i++) {
-      commonAncestor = srcInodes[i];
-    }
-    INode.DirCounts counts = new INode.DirCounts();
-    srcInode.spaceConsumedInTree(counts);
-    verifyQuota(dstInodes, dstInodes.length - 1, counts.getNsCount(),
-            counts.getDsCount(), commonAncestor);
+  /** Add a node child to the inodes at index pos. 
+   * Its ancestors are stored at [0, pos-1]. 
+   * QuotaExceededException is thrown if it violates quota limit */
+  private <T extends INode> T addChild(INode[] pathComponents, int pos, T child,
+      boolean inheritPermission) throws QuotaExceededException {
+    return addChild(pathComponents, pos, child, -1, inheritPermission);
   }
   
   /** Add a node child to the inodes at index pos. 
    * Its ancestors are stored at [0, pos-1]. 
    * QuotaExceededException is thrown if it violates quota limit */
-  private <T extends INode> T addChild(INode[] pathComponents, int pos,
-      T child, long childDiskspace, boolean inheritPermission,
-      boolean checkQuota) throws QuotaExceededException {
+  private <T extends INode> T addChild(INode[] pathComponents, int pos, T child,
+       long childDiskspace, boolean inheritPermission) throws QuotaExceededException {
     INode.DirCounts counts = new INode.DirCounts();
     child.spaceConsumedInTree(counts);
     if (childDiskspace < 0) {
       childDiskspace = counts.getDsCount();
     }
-    updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace,
-        checkQuota);
+    updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace);
     T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild(
         child, inheritPermission);
     if (addedNode == null) {
-      updateCount(pathComponents, pos, -counts.getNsCount(), 
-          -childDiskspace, true);
+      updateCount(pathComponents, pos, 
+                  -counts.getNsCount(), -childDiskspace);
     }
     return addedNode;
   }
-
-  private <T extends INode> T addChild(INode[] pathComponents, int pos,
-      T child, long childDiskspace, boolean inheritPermission)
-      throws QuotaExceededException {
-    return addChild(pathComponents, pos, child, childDiskspace,
-        inheritPermission, true);
-  }
-  
-  private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents,
-      int pos, T child, long childDiskspace, boolean inheritPermission) {
-    T inode = null;
-    try {
-      inode = addChild(pathComponents, pos, child, childDiskspace,
-          inheritPermission, false);
-    } catch (QuotaExceededException e) {
-      NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); 
-    }
-    return inode;
-  }
   
   /** Remove an inode at index pos from the namespace.
    * Its ancestors are stored at [0, pos-1].
    * Count of each ancestor with quota is also updated.
    * Return the removed node; null if the removal fails.
    */
-  private INode removeChild(INode[] pathComponents, int pos) {
+  private INode removeChild(INode[] pathComponents, int pos)
+  throws QuotaExceededException {
     INode removedNode = 
       ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]);
     if (removedNode != null) {
       INode.DirCounts counts = new INode.DirCounts();
       removedNode.spaceConsumedInTree(counts);
-      updateCountNoQuotaCheck(pathComponents, pos,
+      updateCount(pathComponents, pos,
                   -counts.getNsCount(), -counts.getDsCount());
     }
     return removedNode;
@@ -1129,6 +1074,7 @@ class FSDirectory implements FSConstants, Closeable {
    * @param dir the root of the tree that represents the directory
    * @param counters counters for name space and disk space
    * @param nodesInPath INodes for the each of components in the path.
+   * @return the size of the tree
    */
   private static void updateCountForINodeWithQuota(INodeDirectory dir, 
                                                INode.DirCounts counts,
@@ -1210,8 +1156,6 @@ class FSDirectory implements FSConstants, Closeable {
       throw new FileNotFoundException("Directory does not exist: " + srcs);
     } else if (!targetNode.isDirectory()) {
       throw new FileNotFoundException("Cannot set quota on a file: " + srcs);  
-    } else if (targetNode.isRoot() && nsQuota == FSConstants.QUOTA_RESET) {
-      throw new IllegalArgumentException("Cannot clear namespace quota on root.");
     } else { // a directory inode
       INodeDirectory dirNode = (INodeDirectory)targetNode;
       long oldNsQuota = dirNode.getNsQuota();

+ 26 - 68
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -83,7 +83,7 @@ public class FSEditLog {
   private long lastPrintTime;
 
   // is a sync currently running?
-  private volatile boolean isSyncRunning;
+  private boolean isSyncRunning;
 
   // these are statistics counters.
   private long numTransactions;        // number of transactions
@@ -308,13 +308,6 @@ public class FSEditLog {
     return editStreams == null ? 0 : editStreams.size();
   }
 
-  /**
-   * Return the current edit streams. This is for use from tests only!
-   */
-  ArrayList<EditLogOutputStream> getEditStreams() {
-    return editStreams;
-  }
-  
   boolean isOpen() {
     return getNumEditStreams() > 0;
   }
@@ -345,17 +338,33 @@ public class FSEditLog {
   }
 
   public synchronized void createEditLogFile(File name) throws IOException {
-    waitForSyncToFinish();
     EditLogOutputStream eStream = new EditLogFileOutputStream(name);
     eStream.create();
     eStream.close();
   }
 
+  /**
+   * Create edits.new if non existent.
+   */
+  synchronized void createNewIfMissing() throws IOException {
+ for (Iterator<StorageDirectory> it = 
+       fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+      File newFile = getEditNewFile(it.next());
+      if (!newFile.exists())
+        createEditLogFile(newFile);
+    }
+  }
+  
   /**
    * Shutdown the file store.
    */
   public synchronized void close() throws IOException {
-    waitForSyncToFinish();
+    while (isSyncRunning) {
+      try {
+        wait(1000);
+      } catch (InterruptedException ie) { 
+      }
+    }
     if (editStreams == null) {
       return;
     }
@@ -854,60 +863,9 @@ public class FSEditLog {
       metrics.transactions.inc((end-start));
   }
 
-  /**
-   * Blocks until all ongoing edits have been synced to disk.
-   * This differs from logSync in that it waits for edits that have been
-   * written by other threads, not just edits from the calling thread.
-   *
-   * NOTE: this should be done while holding the FSNamesystem lock, or
-   * else more operations can start writing while this is in progress.
-   */
-  void logSyncAll() throws IOException {
-    // Record the most recent transaction ID as our own id
-    synchronized (this) {
-      TransactionId id = myTransactionId.get();
-      id.txid = txid;
-    }
-    // Then make sure we're synced up to this point
-    logSync();
-  }
-
-  synchronized void waitForSyncToFinish() {
-    while (isSyncRunning) {
-      try {
-        wait(1000);
-      } catch (InterruptedException ie) {}
-    }
-  }
-  
-  /**
-   * Sync all modifications done by this thread.
-   *
-   * The internal concurrency design of this class is as follows:
-   *   - Log items are written synchronized into an in-memory buffer,
-   *     and each assigned a transaction ID.
-   *   - When a thread (client) would like to sync all of its edits, logSync()
-   *     uses a ThreadLocal transaction ID to determine what edit number must
-   *     be synced to.
-   *   - The isSyncRunning volatile boolean tracks whether a sync is currently
-   *     under progress.
-   *
-   * The data is double-buffered within each edit log implementation so that
-   * in-memory writing can occur in parallel with the on-disk writing.
-   *
-   * Each sync occurs in three steps:
-   *   1. synchronized, it swaps the double buffer and sets the isSyncRunning
-   *      flag.
-   *   2. unsynchronized, it flushes the data to storage
-   *   3. synchronized, it resets the flag and notifies anyone waiting on the
-   *      sync.
-   *
-   * The lack of synchronization on step 2 allows other threads to continue
-   * to write into the memory buffer while the sync is in progress.
-   * Because this step is unsynchronized, actions that need to avoid
-   * concurrency with sync() should be synchronized and also call
-   * waitForSyncToFinish() before assuming they are running alone.
-   */
+  //
+  // Sync all modifications done by this thread.
+  //
   public void logSync() throws IOException {
     ArrayList<EditLogOutputStream> errorStreams = null;
     long syncStart = 0;
@@ -917,6 +875,10 @@ public class FSEditLog {
 
     final int numEditStreams;
     synchronized (this) {
+      numEditStreams = editStreams.size();
+      assert numEditStreams > 0 : "no editlog streams";
+      printStatistics(false);
+
       // if somebody is already syncing, then wait
       while (mytxid > synctxid && isSyncRunning) {
         try {
@@ -924,9 +886,6 @@ public class FSEditLog {
         } catch (InterruptedException ie) { 
         }
       }
-      numEditStreams = editStreams.size();
-      assert numEditStreams > 0 : "no editlog streams";
-      printStatistics(false);
 
       //
       // If this transaction was already flushed, then nothing to do
@@ -1159,7 +1118,6 @@ public class FSEditLog {
    * Returns the lastModified time of the edits log.
    */
   synchronized void rollEditLog() throws IOException {
-    waitForSyncToFinish();
     //
     // If edits.new already exists in some directory, verify it
     // exists in all directories.

+ 33 - 157
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -110,7 +110,7 @@ public class FSImage extends Storage {
   }
   
   protected long checkpointTime = -1L;
-  FSEditLog editLog = null;
+  private FSEditLog editLog = null;
   private boolean isUpgradeFinalized = false;
   
   /**
@@ -409,7 +409,12 @@ public class FSImage extends Storage {
       // rename current to tmp
       rename(curDir, tmpDir);
       // save new image
-      saveCurrent(sd);
+      if (!curDir.mkdir())
+        throw new IOException("Cannot create directory " + curDir);
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
+      // write version and time files
+      sd.write();
       // rename tmp to previous
       rename(tmpDir, prevDir);
       isUpgradeFinalized = false;
@@ -517,7 +522,7 @@ public class FSImage extends Storage {
     realImage.setStorageInfo(ckptImage);
     fsNamesys.dir.fsImage = realImage;
     // and save it
-    saveNamespace(false);
+    saveFSImage();
   }
 
   void finalizeUpgrade() throws IOException {
@@ -785,24 +790,9 @@ public class FSImage extends Storage {
       throw new IOException("Edits file is not found in " + editsDirs);
 
     // Make sure we are loading image and edits from same checkpoint
-    if (latestNameCheckpointTime > latestEditsCheckpointTime
-        && latestNameSD != latestEditsSD
-        && latestNameSD.getStorageDirType() == NameNodeDirType.IMAGE
-        && latestEditsSD.getStorageDirType() == NameNodeDirType.EDITS) {
-      // This is a rare failure when NN has image-only and edits-only
-      // storage directories, and fails right after saving images,
-      // in some of the storage directories, but before purging edits.
-      // See -NOTE- in saveNamespace().
-      LOG.error("This is a rare failure scenario!!!");
-      LOG.error("Image checkpoint time " + latestNameCheckpointTime +
-                " > edits checkpoint time " + latestEditsCheckpointTime);
-      LOG.error("Name-node will treat the image as the latest state of " +
-                "the namespace. Old edits will be discarded.");
-    } else if (latestNameCheckpointTime != latestEditsCheckpointTime)
-      throw new IOException("Inconsistent storage detected, " +
-                      "image and edits checkpoint times do not match. " +
-                      "image checkpoint time = " + latestNameCheckpointTime +
-                      "edits checkpoint time = " + latestEditsCheckpointTime);
+    if (latestNameCheckpointTime != latestEditsCheckpointTime)
+      throw new IOException("Inconsitent storage detected, " +
+                            "name and edits storage do not match");
     
     // Recover from previous interrrupted checkpoint if any
     needToSave |= recoverInterruptedCheckpoint(latestNameSD, latestEditsSD);
@@ -819,11 +809,7 @@ public class FSImage extends Storage {
         + (FSNamesystem.now() - startTime)/1000 + " seconds.");
     
     // Load latest edits
-    if (latestNameCheckpointTime > latestEditsCheckpointTime)
-      // the image is already current, discard edits
-      needToSave |= true;
-    else // latestNameCheckpointTime == latestEditsCheckpointTime
-      needToSave |= (loadFSEdits(latestEditsSD) > 0);
+    needToSave |= (loadFSEdits(latestEditsSD) > 0);
     
     return needToSave;
   }
@@ -1051,141 +1037,26 @@ public class FSImage extends Storage {
   }
 
   /**
-   * Save the contents of the FS image and create empty edits.
-   * 
-   * In order to minimize the recovery effort in case of failure during
-   * saveNamespace the algorithm reduces discrepancy between directory states
-   * by performing updates in the following order:
-   * <ol>
-   * <li> rename current to lastcheckpoint.tmp for all of them,</li>
-   * <li> save image and recreate edits for all of them,</li>
-   * <li> rename lastcheckpoint.tmp to previous.checkpoint.</li>
-   * </ol>
-   * On stage (2) we first save all images, then recreate edits.
-   * Otherwise the name-node may purge all edits and fail,
-   * in which case the journal will be lost.
+   * Save the contents of the FS image
+   * and create empty edits.
    */
-  void saveNamespace(boolean renewCheckpointTime) throws IOException {
-    editLog.close();
-    if(renewCheckpointTime)
-      this.checkpointTime = FSNamesystem.now();
-
-    // mv current -> lastcheckpoint.tmp
-    for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
-      StorageDirectory sd = it.next();
-      try {
-        moveCurrent(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to move current for " + sd.getRoot(), ie);
-        processIOError(sd.getRoot());
-      }
-    }
-
-    // save images into current
-    for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
-                                                              it.hasNext();) {
-      StorageDirectory sd = it.next();
-      try {
-        saveCurrent(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to save image for " + sd.getRoot(), ie);
-        processIOError(sd.getRoot());
-      }
-    }
-
-    // -NOTE-
-    // If NN has image-only and edits-only storage directories and fails here 
-    // the image will have the latest namespace state.
-    // During startup the image-only directories will recover by discarding
-    // lastcheckpoint.tmp, while
-    // the edits-only directories will recover by falling back
-    // to the old state contained in their lastcheckpoint.tmp.
-    // The edits directories should be discarded during startup because their
-    // checkpointTime is older than that of image directories.
-
-    // recreate edits in current
-    for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.EDITS);
-                                                              it.hasNext();) {
-      StorageDirectory sd = it.next();
-      try {
-        saveCurrent(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to save edits for " + sd.getRoot(), ie);
-        processIOError(sd.getRoot());
-      }
-    }
-    // mv lastcheckpoint.tmp -> previous.checkpoint
-    for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
+  public void saveFSImage() throws IOException {
+    editLog.createNewIfMissing();
+    for (Iterator<StorageDirectory> it = 
+                           dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
-      try {
-        moveLastCheckpoint(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to move last checkpoint for " + sd.getRoot(), ie);
-        processIOError(sd.getRoot());
+      NameNodeDirType dirType = (NameNodeDirType)sd.getStorageDirType();
+      if (dirType.isOfType(NameNodeDirType.IMAGE))
+        saveFSImage(getImageFile(sd, NameNodeFile.IMAGE_NEW));
+      if (dirType.isOfType(NameNodeDirType.EDITS)) {    
+        editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
+        File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
+        if (editsNew.exists()) 
+          editLog.createEditLogFile(editsNew);
       }
     }
-    if(!editLog.isOpen()) editLog.open();
     ckptState = CheckpointStates.UPLOAD_DONE;
-  }
-
-  /**
-   * Save current image and empty journal into {@code current} directory.
-   */
-  protected void saveCurrent(StorageDirectory sd) throws IOException {
-    File curDir = sd.getCurrentDir();
-    NameNodeDirType dirType = (NameNodeDirType)sd.getStorageDirType();
-    // save new image or new edits
-    if (!curDir.exists() && !curDir.mkdir())
-      throw new IOException("Cannot create directory " + curDir);
-    if (dirType.isOfType(NameNodeDirType.IMAGE))
-      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
-    if (dirType.isOfType(NameNodeDirType.EDITS))
-      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
-    // write version and time files
-    sd.write();
-  }
-
-  /**
-   * Move {@code current} to {@code lastcheckpoint.tmp} and
-   * recreate empty {@code current}.
-   * {@code current} is moved only if it is well formatted,
-   * that is contains VERSION file.
-   * 
-   * @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
-   * @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
-   */
-  protected void moveCurrent(StorageDirectory sd)
-  throws IOException {
-    File curDir = sd.getCurrentDir();
-    File tmpCkptDir = sd.getLastCheckpointTmp();
-    // mv current -> lastcheckpoint.tmp
-    // only if current is formatted - has VERSION file
-    if(sd.getVersionFile().exists()) {
-      assert curDir.exists() : curDir + " directory must exist.";
-      assert !tmpCkptDir.exists() : tmpCkptDir + " directory must not exist.";
-      rename(curDir, tmpCkptDir);
-    }
-    // recreate current
-    if(!curDir.exists() && !curDir.mkdir())
-      throw new IOException("Cannot create directory " + curDir);
-  }
-
-  /**
-   * Move {@code lastcheckpoint.tmp} to {@code previous.checkpoint}
-   * 
-   * @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
-   * @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
-   */
-  protected void moveLastCheckpoint(StorageDirectory sd)
-  throws IOException {
-    File tmpCkptDir = sd.getLastCheckpointTmp();
-    File prevCkptDir = sd.getPreviousCheckpoint();
-    // remove previous.checkpoint
-    if (prevCkptDir.exists())
-      deleteDir(prevCkptDir);
-    // rename lastcheckpoint.tmp -> previous.checkpoint
-    if(tmpCkptDir.exists())
-      rename(tmpCkptDir, prevCkptDir);
+    rollFSImage();
   }
 
   /**
@@ -1215,7 +1086,12 @@ public class FSImage extends Storage {
     sd.clearDirectory(); // create currrent dir
     sd.lock();
     try {
-      saveCurrent(sd);
+      NameNodeDirType dirType = (NameNodeDirType)sd.getStorageDirType();
+      if (dirType.isOfType(NameNodeDirType.IMAGE))
+        saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      if (dirType.isOfType(NameNodeDirType.EDITS))
+        editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
+      sd.write();
     } finally {
       sd.unlock();
     }

+ 5 - 12
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -678,8 +678,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    */
   public synchronized void setPermission(String src, FsPermission permission
       ) throws IOException {
-    if (isInSafeMode())
-       throw new SafeModeException("Cannot set permission for " + src, safeMode);
     checkOwner(src);
     dir.setPermission(src, permission);
     getEditLog().logSync();
@@ -697,8 +695,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    */
   public synchronized void setOwner(String src, String username, String group
       ) throws IOException {
-    if (isInSafeMode())
-       throw new SafeModeException("Cannot set owner for " + src, safeMode);
     PermissionChecker pc = checkOwner(src);
     if (!pc.isSuper) {
       if (username != null && !pc.user.equals(username)) {
@@ -763,7 +759,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
     if (length < 0) {
       throw new IOException("Negative length is not supported. File: " + src );
     }
-    final LocatedBlocks ret = getBlockLocationsInternal(src, 
+    final LocatedBlocks ret = getBlockLocationsInternal(src, dir.getFileINode(src),
         offset, length, Integer.MAX_VALUE, doAccessTime);  
     if (auditLog.isInfoEnabled()) {
       logAuditEvent(UserGroupInformation.getCurrentUGI(),
@@ -774,12 +770,12 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
   }
 
   private synchronized LocatedBlocks getBlockLocationsInternal(String src,
+                                                       INodeFile inode,
                                                        long offset, 
                                                        long length,
                                                        int nrBlocksToReturn,
                                                        boolean doAccessTime) 
                                                        throws IOException {
-    INodeFile inode = dir.getFileINode(src);
     if(inode == null) {
       return null;
     }
@@ -1702,7 +1698,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       checkPermission(src, false, null, FsAction.WRITE, null, FsAction.ALL);
     }
 
-    return dir.delete(src);
+    return dir.delete(src) != null;
   }
 
   void removePathAndBlocks(String src, List<Block> blocks) throws IOException {
@@ -1790,9 +1786,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    * contract.
    */
   void setQuota(String path, long nsQuota, long dsQuota) throws IOException {
-   if (isInSafeMode())
-      throw new SafeModeException("Cannot set quota on " + path, safeMode); 
-   if (isPermissionEnabled) {
+    if (isPermissionEnabled) {
       checkSuperuserPrivilege();
     }
     
@@ -3579,7 +3573,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       throw new IOException("Safe mode should be turned ON " +
                             "in order to create namespace image.");
     }
-    getFSImage().saveNamespace(true);
+    getFSImage().saveFSImage();
     LOG.info("New namespace image has been created.");
   }
 
@@ -4370,7 +4364,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    * @throws IOException
    */
   synchronized void enterSafeMode() throws IOException {
-    getEditLog().logSyncAll();
     if (!isInSafeMode()) {
       safeMode = new SafeModeInfo();
       return;

+ 0 - 4
src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -45,14 +45,10 @@ public class GetImageServlet extends HttpServlet {
       FSImage nnImage = (FSImage)context.getAttribute("name.system.image");
       TransferFsImage ff = new TransferFsImage(pmap, request, response);
       if (ff.getImage()) {
-        response.setHeader(TransferFsImage.CONTENT_LENGTH,
-          String.valueOf(nnImage.getFsImageName().length()));
         // send fsImage
         TransferFsImage.getFileServer(response.getOutputStream(),
                                       nnImage.getFsImageName()); 
       } else if (ff.getEdit()) {
-        response.setHeader(TransferFsImage.CONTENT_LENGTH,
-          String.valueOf(nnImage.getFsEditName().length()));
         // send edits
         TransferFsImage.getFileServer(response.getOutputStream(),
                                       nnImage.getFsEditName());

+ 0 - 9
src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -320,15 +320,6 @@ class INodeDirectory extends INode {
         child.computeContentSummary(summary);
       }
     }
-    if (this instanceof INodeDirectoryWithQuota) {
-      // Warn if the cached and computed diskspace values differ
-      INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
-      long space = node.diskspaceConsumed();
-      if (-1 != node.getDsQuota() && space != summary[3]) {
-        NameNode.LOG.warn("Inconsistent diskspace for directory "
-          +getLocalName()+". Cached: "+space+" Computed: "+summary[3]);
-      }
-    }
     summary[2]++;
     return summary;
   }

+ 19 - 13
src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java

@@ -116,10 +116,18 @@ class INodeDirectoryWithQuota extends INodeDirectory {
    * 
    * @param nsDelta the change of the tree size
    * @param dsDelta change to disk space occupied
+   * @throws QuotaExceededException if the changed size is greater 
+   *                                than the quota
    */
-  void updateNumItemsInTree(long nsDelta, long dsDelta) {
-    nsCount += nsDelta;
-    diskspace += dsDelta;
+  void updateNumItemsInTree(long nsDelta, long dsDelta) throws 
+                            QuotaExceededException {
+    long newCount = nsCount + nsDelta;
+    long newDiskspace = diskspace + dsDelta;
+    if (nsDelta>0 || dsDelta>0) {
+      verifyQuota(nsQuota, newCount, dsQuota, newDiskspace);
+    }
+    nsCount = newCount;
+    diskspace = newDiskspace;
   }
   
   /** 
@@ -138,16 +146,14 @@ class INodeDirectoryWithQuota extends INodeDirectory {
   /** Verify if the namespace count disk space satisfies the quota restriction 
    * @throws QuotaExceededException if the given quota is less than the count
    */
-  void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
-    long newCount = nsCount + nsDelta;
-    long newDiskspace = diskspace + dsDelta;
-    if (nsDelta>0 || dsDelta>0) {
-      if (nsQuota >= 0 && nsQuota < newCount) {
-        throw new NSQuotaExceededException(nsQuota, newCount);
-      }
-      if (dsQuota >= 0 && dsQuota < newDiskspace) {
-        throw new DSQuotaExceededException(dsQuota, newDiskspace);
-      }
+  private static void verifyQuota(long nsQuota, long nsCount, 
+                                  long dsQuota, long diskspace)
+                                  throws QuotaExceededException {
+    if (nsQuota >= 0 && nsQuota < nsCount) {
+      throw new NSQuotaExceededException(nsQuota, nsCount);
+    }
+    if (dsQuota >= 0 && dsQuota < diskspace) {
+      throw new DSQuotaExceededException(dsQuota, diskspace);
     }
   }
 }

+ 20 - 3
src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -21,6 +21,7 @@ import org.apache.commons.logging.*;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.ipc.*;
@@ -543,13 +544,29 @@ public class SecondaryNameNode implements Runnable {
      */
     void startCheckpoint() throws IOException {
       for(StorageDirectory sd : storageDirs) {
-        moveCurrent(sd);
+        File curDir = sd.getCurrentDir();
+        File tmpCkptDir = sd.getLastCheckpointTmp();
+        assert !tmpCkptDir.exists() : 
+          tmpCkptDir.getName() + " directory must not exist.";
+        if(curDir.exists()) {
+          // rename current to tmp
+          rename(curDir, tmpCkptDir);
+        }
+        if (!curDir.mkdir())
+          throw new IOException("Cannot create directory " + curDir);
       }
     }
 
     void endCheckpoint() throws IOException {
       for(StorageDirectory sd : storageDirs) {
-        moveLastCheckpoint(sd);
+        File tmpCkptDir = sd.getLastCheckpointTmp();
+        File prevCkptDir = sd.getPreviousCheckpoint();
+        // delete previous dir
+        if (prevCkptDir.exists())
+          deleteDir(prevCkptDir);
+        // rename tmp to previous
+        if (tmpCkptDir.exists())
+          rename(tmpCkptDir, prevCkptDir);
       }
     }
 
@@ -572,7 +589,7 @@ public class SecondaryNameNode implements Runnable {
       loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE));
       loadFSEdits(sdEdits);
       sig.validateStorageInfo(this);
-      saveNamespace(false);
+      saveFSImage();
     }
   }
 }

+ 0 - 28
src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -21,7 +21,6 @@ import java.io.*;
 import java.net.*;
 import java.util.Iterator;
 import java.util.Map;
-import java.lang.Math;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletRequest;
 
@@ -33,8 +32,6 @@ import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator;
  */
 class TransferFsImage implements FSConstants {
   
-  public final static String CONTENT_LENGTH = "Content-Length";
-  
   private boolean isGetImage;
   private boolean isGetEdit;
   private boolean isPutImage;
@@ -121,16 +118,6 @@ class TransferFsImage implements FSConstants {
         throw new IOException("If this exception is not caught by the " +
             "name-node fs image will be truncated.");
       }
-      
-      if (ErrorSimulator.getErrorSimulation(3)
-          && localfile.getAbsolutePath().contains("fsimage")) {
-          // Test sending image shorter than localfile
-          long len = localfile.length();
-          buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)];
-          // This will read at most half of the image
-          // and the rest of the image will be sent over the wire
-          infile.read(buf);
-      }
       int num = 1;
       while (num > 0) {
         num = infile.read(buf);
@@ -161,15 +148,6 @@ class TransferFsImage implements FSConstants {
     //
     URL url = new URL(str.toString());
     URLConnection connection = url.openConnection();
-    long advertisedSize;
-    String contentLength = connection.getHeaderField(CONTENT_LENGTH);
-    if (contentLength != null) {
-      advertisedSize = Long.parseLong(contentLength);
-    } else {
-      throw new IOException(CONTENT_LENGTH + " header is not provided " +
-                            "by the namenode when trying to fetch " + str);
-    }
-    long received = 0;
     InputStream stream = connection.getInputStream();
     FileOutputStream[] output = null;
 
@@ -184,7 +162,6 @@ class TransferFsImage implements FSConstants {
       while (num > 0) {
         num = stream.read(buf);
         if (num > 0 && localPath != null) {
-          received += num;
           for (int i = 0; i < output.length; i++) {
             output[i].write(buf, 0, num);
           }
@@ -199,11 +176,6 @@ class TransferFsImage implements FSConstants {
           }
         }
       }
-      if (received != advertisedSize) {
-        throw new IOException("File " + str + " received length " + received +
-                              " is not of the advertised size " +
-                              advertisedSize);
-      }
     }
   }
 }

+ 6 - 10
src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

@@ -48,8 +48,6 @@ public class NameNodeMetrics implements Updater {
     
     private NameNodeActivtyMBean namenodeActivityMBean;
     
-    public MetricsTimeVaryingInt numCreateFileOps = 
-                    new MetricsTimeVaryingInt("CreateFileOps", registry);
     public MetricsTimeVaryingInt numFilesCreated =
                           new MetricsTimeVaryingInt("FilesCreated", registry);
     public MetricsTimeVaryingInt numFilesAppended =
@@ -60,23 +58,21 @@ public class NameNodeMetrics implements Updater {
                     new MetricsTimeVaryingInt("FilesRenamed", registry);
     public MetricsTimeVaryingInt numGetListingOps = 
                     new MetricsTimeVaryingInt("GetListingOps", registry);
+    public MetricsTimeVaryingInt numCreateFileOps = 
+                    new MetricsTimeVaryingInt("CreateFileOps", registry);
     public MetricsTimeVaryingInt numDeleteFileOps = 
                           new MetricsTimeVaryingInt("DeleteFileOps", registry);
-    public MetricsTimeVaryingInt numFilesDeleted = new MetricsTimeVaryingInt(
-        "FilesDeleted", registry, 
-        "Number of files and directories deleted by delete or rename operation");
     public MetricsTimeVaryingInt numFileInfoOps =
                           new MetricsTimeVaryingInt("FileInfoOps", registry);
     public MetricsTimeVaryingInt numAddBlockOps = 
                           new MetricsTimeVaryingInt("AddBlockOps", registry);
 
-    public MetricsTimeVaryingRate transactions = new MetricsTimeVaryingRate(
-      "Transactions", registry, "Journal Transaction");
+    public MetricsTimeVaryingRate transactions =
+                    new MetricsTimeVaryingRate("Transactions", registry, "Journal Transaction");
     public MetricsTimeVaryingRate syncs =
                     new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync");
-    public MetricsTimeVaryingInt transactionsBatchedInSync = new MetricsTimeVaryingInt(
-      "JournalTransactionsBatchedInSync", registry,
-      "Journal Transactions Batched In Sync");
+    public MetricsTimeVaryingInt transactionsBatchedInSync = 
+                    new MetricsTimeVaryingInt("JournalTransactionsBatchedInSync", registry, "Journal Transactions Batched In Sync");
     public MetricsTimeVaryingRate blockReport =
                     new MetricsTimeVaryingRate("blockReport", registry, "Block Report");
     public MetricsIntValue safeModeTime =

+ 3 - 0
src/mapred/org/apache/hadoop/mapred/Counters.java

@@ -47,7 +47,9 @@ import org.apache.hadoop.util.StringUtils;
  * 
  * <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
  * counters from a particular <code>Enum</code> class. 
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.Counters} instead.
  */
+@Deprecated
 public class Counters implements Writable, Iterable<Counters.Group> {
   private static final Log LOG = LogFactory.getLog(Counters.class);
   private static final char GROUP_OPEN = '{';
@@ -108,6 +110,7 @@ public class Counters implements Writable, Iterable<Counters.Group> {
     }
     
     // Checks for (content) equality of two (basic) counters
+    @Deprecated
     synchronized boolean contentEquals(Counter c) {
       return this.equals(c);
     }

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java

@@ -29,8 +29,6 @@ import java.io.IOException;
  */
 class DisallowedTaskTrackerException extends IOException {
 
-  private static final long serialVersionUID = 1L;
-
   public DisallowedTaskTrackerException(TaskTrackerStatus tracker) {
     super("Tasktracker denied communication with jobtracker: " + tracker.getTrackerName());
   }

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java

@@ -27,8 +27,6 @@ import java.io.IOException;
 public class FileAlreadyExistsException
     extends IOException {
 
-  private static final long serialVersionUID = 1L;
-
   public FileAlreadyExistsException() {
     super();
   }

+ 3 - 0
src/mapred/org/apache/hadoop/mapred/FileInputFormat.java

@@ -51,7 +51,10 @@ import org.apache.hadoop.util.StringUtils;
  * Subclasses of <code>FileInputFormat</code> can also override the 
  * {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
  * not split-up and are processed as a whole by {@link Mapper}s.
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
+ *  instead.
  */
+@Deprecated
 public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
 
   public static final Log LOG =

+ 3 - 0
src/mapred/org/apache/hadoop/mapred/FileSplit.java

@@ -28,7 +28,10 @@ import org.apache.hadoop.fs.Path;
 /** A section of an input file.  Returned by {@link
  * InputFormat#getSplits(JobConf, int)} and passed to
  * {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}. 
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}
+ *  instead.
  */
+@Deprecated
 public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit 
                        implements InputSplit {
   private Path file;

+ 1 - 0
src/mapred/org/apache/hadoop/mapred/ID.java

@@ -27,6 +27,7 @@ package org.apache.hadoop.mapred;
  * @see TaskID
  * @see TaskAttemptID
  */
+@Deprecated
 public abstract class ID extends org.apache.hadoop.mapreduce.ID {
 
   /** constructs an ID object from the given int */

+ 2 - 0
src/mapred/org/apache/hadoop/mapred/InputFormat.java

@@ -61,7 +61,9 @@ import org.apache.hadoop.fs.FileSystem;
  * @see RecordReader
  * @see JobClient
  * @see FileInputFormat
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.InputFormat} instead.
  */
+@Deprecated
 public interface InputFormat<K, V> {
 
   /** 

+ 2 - 0
src/mapred/org/apache/hadoop/mapred/InputSplit.java

@@ -31,7 +31,9 @@ import org.apache.hadoop.io.Writable;
  * 
  * @see InputFormat
  * @see RecordReader
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.InputSplit} instead.
  */
+@Deprecated
 public interface InputSplit extends Writable {
 
   /**

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java

@@ -27,8 +27,6 @@ import java.io.IOException;
 public class InvalidFileTypeException
     extends IOException {
 
-  private static final long serialVersionUID = 1L;
-
   public InvalidFileTypeException() {
     super();
   }

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/InvalidInputException.java

@@ -27,8 +27,6 @@ import java.util.Iterator;
  * by one.
  */
 public class InvalidInputException extends IOException {
- 
-  private static final long serialVersionUID = 1L;
   private List<IOException> problems;
   
   /**

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java

@@ -27,8 +27,6 @@ import java.io.IOException;
 public class InvalidJobConfException
     extends IOException {
 
-  private static final long serialVersionUID = 1L;
-
   public InvalidJobConfException() {
     super();
   }

+ 45 - 75
src/mapred/org/apache/hadoop/mapred/JobConf.java

@@ -99,7 +99,9 @@ import org.apache.hadoop.util.Tool;
  * @see ClusterStatus
  * @see Tool
  * @see DistributedCache
+ * @deprecated Use {@link Configuration} instead
  */
+@Deprecated
 public class JobConf extends Configuration {
   
   private static final Log LOG = LogFactory.getLog(JobConf.class);
@@ -110,8 +112,7 @@ public class JobConf extends Configuration {
   }
 
   /**
-   * @deprecated Use {@link #MAPRED_JOB_MAP_MEMORY_MB_PROPERTY} and
-   * {@link #MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY}
+   * @deprecated
    */
   @Deprecated
   public static final String MAPRED_TASK_MAXVMEM_PROPERTY =
@@ -1426,68 +1427,32 @@ public class JobConf extends Configuration {
     return get("job.local.dir");
   }
 
-  /**
-   * Get memory required to run a map task of the job, in MB.
-   * 
-   * If a value is specified in the configuration, it is returned.
-   * Else, it returns {@link #DISABLED_MEMORY_LIMIT}.
-   * <p/>
-   * For backward compatibility, if the job configuration sets the
-   * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
-   * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
-   * after converting it from bytes to MB.
-   * @return memory required to run a map task of the job, in MB,
-   *          or {@link #DISABLED_MEMORY_LIMIT} if unset.
-   */
   public long getMemoryForMapTask() {
-    long value = getDeprecatedMemoryValue();
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
-                          DISABLED_MEMORY_LIMIT));
+    if (get(MAPRED_TASK_MAXVMEM_PROPERTY) != null) {
+      long val = getLong(
+        MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT);
+      return (val == DISABLED_MEMORY_LIMIT) ? val :
+        ((val < 0) ? DISABLED_MEMORY_LIMIT : val / (1024 * 1024));
     }
-    return value;
+    return getLong(
+      JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
+      DISABLED_MEMORY_LIMIT);
   }
 
   public void setMemoryForMapTask(long mem) {
     setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, mem);
   }
 
-  /**
-   * Get memory required to run a reduce task of the job, in MB.
-   * 
-   * If a value is specified in the configuration, it is returned.
-   * Else, it returns {@link #DISABLED_MEMORY_LIMIT}.
-   * <p/>
-   * For backward compatibility, if the job configuration sets the
-   * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
-   * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
-   * after converting it from bytes to MB.
-   * @return memory required to run a reduce task of the job, in MB,
-   *          or {@link #DISABLED_MEMORY_LIMIT} if unset.
-   */
   public long getMemoryForReduceTask() {
-    long value = getDeprecatedMemoryValue();
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = normalizeMemoryConfigValue(
-                getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
-                        DISABLED_MEMORY_LIMIT));
-    }
-    return value;
-  }
-  
-  // Return the value set to the key MAPRED_TASK_MAXVMEM_PROPERTY,
-  // converted into MBs.
-  // Returns DISABLED_MEMORY_LIMIT if unset, or set to a negative
-  // value.
-  private long getDeprecatedMemoryValue() {
-    long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, 
-        DISABLED_MEMORY_LIMIT);
-    oldValue = normalizeMemoryConfigValue(oldValue);
-    if (oldValue != DISABLED_MEMORY_LIMIT) {
-      oldValue /= (1024*1024);
+    if (get(MAPRED_TASK_MAXVMEM_PROPERTY) != null) {
+      long val = getLong(
+        MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT);
+      return (val == DISABLED_MEMORY_LIMIT) ? val :
+        ((val < 0) ? DISABLED_MEMORY_LIMIT : val / (1024 * 1024));
     }
-    return oldValue;
+    return getLong(
+      JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
+      DISABLED_MEMORY_LIMIT);
   }
 
   public void setMemoryForReduceTask(long mem) {
@@ -1559,21 +1524,18 @@ public class JobConf extends Configuration {
 
 
   /**
-   * Get the memory required to run a task of this job, in bytes. See
+   * The maximum amount of memory any task of this job will use. See
    * {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
    * <p/>
-   * This method is deprecated. Now, different memory limits can be
-   * set for map and reduce tasks of a job, in MB. 
-   * <p/>
-   * For backward compatibility, if the job configuration sets the
-   * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
-   * from {@link #DISABLED_MEMORY_LIMIT}, that value is returned. 
-   * Otherwise, this method will return the larger of the values returned by 
-   * {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()}
-   * after converting them into bytes.
-   * 
-   * @return Memory required to run a task of this job, in bytes,
-   *         or {@link #DISABLED_MEMORY_LIMIT}, if unset.
+   * mapred.task.maxvmem is split into
+   * mapred.job.map.memory.mb
+   * and mapred.job.map.memory.mb,mapred
+   * each of the new key are set
+   * as mapred.task.maxvmem / 1024
+   * as new values are in MB
+   *
+   * @return The maximum amount of memory any task of this job will use, in
+   *         bytes.
    * @see #setMaxVirtualMemoryForTask(long)
    * @deprecated Use {@link #getMemoryForMapTask()} and
    *             {@link #getMemoryForReduceTask()}
@@ -1584,16 +1546,24 @@ public class JobConf extends Configuration {
       "getMaxVirtualMemoryForTask() is deprecated. " +
       "Instead use getMemoryForMapTask() and getMemoryForReduceTask()");
 
-    long value = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT);
-    value = normalizeMemoryConfigValue(value);
-    if (value == DISABLED_MEMORY_LIMIT) {
-      value = Math.max(getMemoryForMapTask(), getMemoryForReduceTask());
-      value = normalizeMemoryConfigValue(value);
-      if (value != DISABLED_MEMORY_LIMIT) {
-        value *= 1024*1024;
+    if (get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) == null) {
+      if (get(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY) != null || get(
+        JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY) != null) {
+        long val = Math.max(getMemoryForMapTask(), getMemoryForReduceTask());
+        if (val == JobConf.DISABLED_MEMORY_LIMIT) {
+          return val;
+        } else {
+          if (val < 0) {
+            return JobConf.DISABLED_MEMORY_LIMIT;
+          }
+          return val * 1024 * 1024;
+          //Convert MB to byte as new value is in
+          // MB and old deprecated method returns bytes
+        }
       }
     }
-    return value;
+
+    return getLong(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT);
   }
 
   /**

+ 1 - 0
src/mapred/org/apache/hadoop/mapred/JobConfigurable.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 /** That what may be configured. */
+@Deprecated
 public interface JobConfigurable {
   /** Initializes a new instance from a {@link JobConf}.
    *

+ 4 - 0
src/mapred/org/apache/hadoop/mapred/JobContext.java

@@ -19,6 +19,10 @@ package org.apache.hadoop.mapred;
 
 import org.apache.hadoop.util.Progressable;
 
+/**
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.JobContext} instead.
+ */
+@Deprecated
 public class JobContext extends org.apache.hadoop.mapreduce.JobContext {
   private JobConf job;
   private Progressable progress;

+ 1 - 3
src/mapred/org/apache/hadoop/mapred/JobHistory.java

@@ -857,8 +857,7 @@ public class JobHistory {
      * @deprecated Use 
      *     {@link #logSubmitted(JobID, JobConf, String, long, boolean)} instead.
      */
-     @Deprecated
-     public static void logSubmitted(JobID jobId, JobConf jobConf, 
+    public static void logSubmitted(JobID jobId, JobConf jobConf, 
                                     String jobConfPath, long submitTime) 
     throws IOException {
       logSubmitted(jobId, jobConf, jobConfPath, submitTime, true);
@@ -1213,7 +1212,6 @@ public class JobHistory {
      * @param restartCount number of times the job got restarted
      * @deprecated Use {@link #logJobInfo(JobID, long, long)} instead.
      */
-    @Deprecated
     public static void logJobInfo(JobID jobid, long submitTime, long launchTime,
                                   int restartCount){
       logJobInfo(jobid, submitTime, launchTime);

+ 1 - 0
src/mapred/org/apache/hadoop/mapred/JobID.java

@@ -38,6 +38,7 @@ import java.io.IOException;
  * @see TaskID
  * @see TaskAttemptID
  */
+@Deprecated
 public class JobID extends org.apache.hadoop.mapreduce.JobID {
   /**
    * Constructs a JobID object 

+ 23 - 36
src/mapred/org/apache/hadoop/mapred/JobTracker.java

@@ -117,15 +117,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
   private DNSToSwitchMapping dnsToSwitchMapping;
   private NetworkTopology clusterMap = new NetworkTopology();
   private int numTaskCacheLevels; // the max level to which we cache tasks
-  /**
-   * {@link #nodesAtMaxLevel} is using the keySet from {@link ConcurrentHashMap}
-   * so that it can be safely written to and iterated on via 2 separate threads.
-   * Note: It can only be iterated from a single thread which is feasible since
-   *       the only iteration is done in {@link JobInProgress} under the 
-   *       {@link JobTracker} lock.
-   */
-  private Set<Node> nodesAtMaxLevel = 
-    Collections.newSetFromMap(new ConcurrentHashMap<Node, Boolean>());
+  private Set<Node> nodesAtMaxLevel = new HashSet<Node>();
   private final TaskScheduler taskScheduler;
   private final List<JobInProgressListener> jobInProgressListeners =
     new CopyOnWriteArrayList<JobInProgressListener>();
@@ -142,9 +134,6 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
    * A client tried to submit a job before the Job Tracker was ready.
    */
   public static class IllegalStateException extends IOException {
- 
-    private static final long serialVersionUID = 1L;
-
     public IllegalStateException(String msg) {
       super(msg);
     }
@@ -2396,27 +2385,25 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
   }
   
   private Node addHostToNodeMapping(String host, String networkLoc) {
-    Node node = null;
-    synchronized (nodesAtMaxLevel) {
-      if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) {
-        node = new NodeBase(host, networkLoc);
-        clusterMap.add(node);
-        if (node.getLevel() < getNumTaskCacheLevels()) {
-          LOG.fatal("Got a host whose level is: " + node.getLevel() + "." 
-              + " Should get at least a level of value: " 
-              + getNumTaskCacheLevels());
-          try {
-            stopTracker();
-          } catch (IOException ie) {
-            LOG.warn("Exception encountered during shutdown: " 
-                + StringUtils.stringifyException(ie));
-            System.exit(-1);
-          }
+    Node node;
+    if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) {
+      node = new NodeBase(host, networkLoc);
+      clusterMap.add(node);
+      if (node.getLevel() < getNumTaskCacheLevels()) {
+        LOG.fatal("Got a host whose level is: " + node.getLevel() + "." 
+                  + " Should get at least a level of value: " 
+                  + getNumTaskCacheLevels());
+        try {
+          stopTracker();
+        } catch (IOException ie) {
+          LOG.warn("Exception encountered during shutdown: " 
+                   + StringUtils.stringifyException(ie));
+          System.exit(-1);
         }
-        hostnameToNodeMap.put(host, node);
-        // Make an entry for the node at the max level in the cache
-        nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1));
       }
+      hostnameToNodeMap.put(host, node);
+      // Make an entry for the node at the max level in the cache
+      nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1));
     }
     return node;
   }
@@ -3312,13 +3299,13 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
       Vector<TaskInProgress> completeTasks = job.reportCleanupTIPs(true);
       for (Iterator<TaskInProgress> it = completeTasks.iterator();
            it.hasNext();) {
-        TaskInProgress tip = it.next();
+        TaskInProgress tip = (TaskInProgress) it.next();
         reports.add(tip.generateSingleReport());
       }
       Vector<TaskInProgress> incompleteTasks = job.reportCleanupTIPs(false);
       for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); 
            it.hasNext();) {
-        TaskInProgress tip = it.next();
+        TaskInProgress tip = (TaskInProgress) it.next();
         reports.add(tip.generateSingleReport());
       }
       return reports.toArray(new TaskReport[reports.size()]);
@@ -3335,13 +3322,13 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
       Vector<TaskInProgress> completeTasks = job.reportSetupTIPs(true);
       for (Iterator<TaskInProgress> it = completeTasks.iterator();
            it.hasNext();) {
-        TaskInProgress tip =  it.next();
+        TaskInProgress tip = (TaskInProgress) it.next();
         reports.add(tip.generateSingleReport());
       }
       Vector<TaskInProgress> incompleteTasks = job.reportSetupTIPs(false);
       for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); 
            it.hasNext();) {
-        TaskInProgress tip =  it.next();
+        TaskInProgress tip = (TaskInProgress) it.next();
         reports.add(tip.generateSingleReport());
       }
       return reports.toArray(new TaskReport[reports.size()]);
@@ -3753,7 +3740,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
         jobStatusList.add(status);
       }
     }
-    return  jobStatusList.toArray(
+    return (JobStatus[]) jobStatusList.toArray(
         new JobStatus[jobStatusList.size()]);
   }
 

Деякі файли не було показано, через те що забагато файлів було змінено