Browse Source

commit 8a29dde7fa440980e025832f664c844d134c23ee
Author: Owen O'Malley <omalley@apache.org>
Date: Wed Mar 2 16:25:19 2011 -0800

clean up sloppy merges done as part of patch line flattening.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077751 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 năm trước cách đây
mục cha
commit
14dcfb2a96
21 tập tin đã thay đổi với 42 bổ sung167 xóa
  1. 0 21
      bin/hadoop
  2. 16 18
      build.xml
  3. 0 57
      src/c++/jsvc/build.xml
  4. BIN
      src/c++/jsvc/commons-daemon-1.0.2-src.tar.gz
  5. 0 1
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
  6. 0 1
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
  7. 0 1
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
  8. 1 0
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
  9. 0 1
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
  10. 0 5
      src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
  11. 0 7
      src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
  12. 0 1
      src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
  13. 2 2
      src/core/org/apache/hadoop/util/ServletUtil.java
  14. 0 2
      src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java
  15. 0 2
      src/mapred/org/apache/hadoop/mapred/JobInProgress.java
  16. 0 27
      src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java
  17. 16 13
      src/mapred/org/apache/hadoop/mapred/Task.java
  18. 2 2
      src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
  19. 0 1
      src/test/org/apache/hadoop/filecache/TestTrackerDistributedCacheManager.java
  20. 1 1
      src/test/org/apache/hadoop/mapred/MiniMRCluster.java
  21. 4 4
      src/test/org/apache/hadoop/mapred/TestQueueManagerForJobKillAndJobPriority.java

+ 0 - 21
bin/hadoop

@@ -330,27 +330,6 @@ if [ "$starting_secure_dn" = "true" ]; then
     HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
   else
    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
-
-if [ "$COMMAND" = "datanode" ]; then
-  if [[ $EUID -eq 0 ]]; then
-    if [ "$HADOOP_SECURE_DN_USER" = "" ]; then
-      HADOOP_SECURE_DN_USER="hdfs"
-    fi
-
-    if [ "$HADOOP_PID_DIR" = "" ]; then
-      HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
-    else
-      HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
-    fi
-
-    exec "jsvc" -outfile "$HADOOP_LOG_DIR/jsvc.out" \
-                -errfile "$HADOOP_LOG_DIR/jsvc.err" \
-                -pidfile "$HADOOP_SECURE_DN_PID" \
-                -nodetach \
-                -user "$HADOOP_SECURE_DN_USER" \
-                -cp "$CLASSPATH" \
-                $JAVA_HEAP_MAX $HADOOP_OPTS \
-                org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
   fi
 
   exec "$HADOOP_HOME/bin/jsvc" -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \

+ 16 - 18
build.xml

@@ -26,7 +26,7 @@
   <property file="${user.home}/build.properties" />
   <property file="${basedir}/build.properties" />
  
-  <property name="Name" value="Yahoo! Distribution of Hadoop"/>
+  <property name="Name" value="Hadoop"/>
   <property name="name" value="hadoop"/>
   <property name="version" value="0.20.202.0-SNAPSHOT"/>
   <property name="final.name" value="${name}-${version}"/>
@@ -183,10 +183,6 @@
 
   <!-- end of task-controller properties -->
 
-  <!-- jsvc properties set here -->
-  <property name="c++.jsvc.src" 
-    value="${basedir}/src/c++/jsvc" />
-	
   <!-- IVY properteis set here -->
   <property name="ivy.dir" location="ivy" />
   <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
@@ -661,7 +657,7 @@
         <section name="org/apache/hadoop">
           <attribute name="Implementation-Title" value="Hadoop"/>
           <attribute name="Implementation-Version" value="${version}"/>
-          <attribute name="Implementation-Vendor" value="Yahoo!"/>
+          <attribute name="Implementation-Vendor" value="Apache"/>
         </section>
       </manifest>
       <fileset dir="${conf.dir}" includes="${jar.properties.list}" />
@@ -826,7 +822,7 @@
           <section name="org/apache/hadoop">
             <attribute name="Implementation-Title" value="Hadoop"/>
             <attribute name="Implementation-Version" value="${version}"/>
-            <attribute name="Implementation-Vendor" value="Yahoo!"/>
+            <attribute name="Implementation-Vendor" value="Apache"/>
           </section>
          </manifest>
     </jar>
@@ -1235,7 +1231,7 @@
       use="true"
       windowtitle="${Name} ${version} API"
       doctitle="${Name} ${version} Developer API"
-      bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation."
+      bottom="Copyright &amp;copy; ${year} The Apache Software Foundation"
       maxmemory="${javadoc.maxmemory}"
       >
         <packageset dir="${core.src.dir}"/>
@@ -1278,7 +1274,7 @@
       use="true"
       windowtitle="${Name} ${version} API"
       doctitle="${Name} ${version} API"
-      bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation."
+      bottom="Copyright &amp;copy; ${year} The Apache Software Foundation"
       maxmemory="${javadoc.maxmemory}"
       >
         <packageset dir="${core.src.dir}"/>
@@ -2315,14 +2311,16 @@
 
   <!-- end of task-controller targets -->
 
-  <!-- jsvc targets -->
-  <target name="jsvc" if="compile.c++">
-    <subant target="jsvc">
-      <property name="c++.jsvc.src" value="${c++.jsvc.src}" />
-      <property name="build.c++.jsvc" value="${build.c++}/jsvc" />
-      <property name="jsvc.install.dir" value="${dist.dir}/bin" /> 
-      <fileset file="${c++.jsvc.src}/build.xml"/>
-    </subant>
-  </target>
+  <target name="jsvc" >
+    <mkdir dir="${jsvc.build.dir}" />
+    <get src="${jsvc.location}" dest="${jsvc.build.dir}/${jsvc.dest.name}" />
+
+    <untar compression="gzip" src="${jsvc.build.dir}/${jsvc.dest.name}" dest="${jsvc.build.dir}" />
+
+    <copy file="${jsvc.build.dir}/jsvc" todir="${jsvc.install.dir}" verbose="true" />
+    <chmod perm="ugo+x" type="file">
+      <fileset file="${jsvc.install.dir}/jsvc"/>
+    </chmod>
+ </target>
 
 </project>

+ 0 - 57
src/c++/jsvc/build.xml

@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project name="hadoopjsvc" default="compile">
-
-  <property name="jsvc.version" value="1.0.2" />
-  <property name="jsvc.tar.ball" value="commons-daemon-${jsvc.version}-src.tar.gz" />
-  <property name="jsvc.src.code.dir" value="commons-daemon-${jsvc.version}-src/src/native/unix" />
-
-  <target name="jsvc">
-    <mkdir dir="${build.c++.jsvc}" />
-
-    <untar src="${c++.jsvc.src}/${jsvc.tar.ball}" compression="gzip" dest="${build.c++.jsvc}" />
-
-    <exec executable="sh" dir="${build.c++.jsvc}/${jsvc.src.code.dir}"
-        failonerror="yes">
-        <arg value="support/buildconf.sh" />
-    </exec>
-
-    <exec executable="sh" dir="${build.c++.jsvc}/${jsvc.src.code.dir}"
-        failonerror="yes">
-        <arg value="configure" />
-    </exec>
-
-    <exec executable="make" dir="${build.c++.jsvc}/${jsvc.src.code.dir}"
-        failonerror="yes">
-        <arg value="clean" />
-    </exec>
-
-    <exec executable="make" dir="${build.c++.jsvc}/${jsvc.src.code.dir}"
-        failonerror="yes">
-    </exec>
-
-    <copy file="${build.c++.jsvc}/${jsvc.src.code.dir}/jsvc" todir="${jsvc.install.dir}"
-        verbose="true" />
-    <chmod perm="ugo+x" type="file">
-      <fileset file="${jsvc.install.dir}/jsvc"/>
-    </chmod>
-  </target>
-
-</project>

BIN
src/c++/jsvc/commons-daemon-1.0.2-src.tar.gz


+ 0 - 1
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java

@@ -50,5 +50,4 @@ public class EchoUserResolver implements UserResolver {
       UserGroupInformation ugi) {
     return ugi;
   }
-
 }

+ 0 - 1
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java

@@ -22,7 +22,6 @@ import java.io.InputStream;
 import java.io.PrintStream;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;

+ 0 - 1
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java

@@ -43,7 +43,6 @@ import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.tools.rumen.JobStory;
 
 import org.apache.commons.logging.Log;

+ 1 - 0
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java

@@ -183,6 +183,7 @@ abstract class JobFactory<T> implements Gridmix.Component<Void>,StatListener<T>{
       };
    }
      
+
   /**
    * Obtain the error that caused the thread to exit unexpectedly.
    */

+ 0 - 1
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java

@@ -83,7 +83,6 @@ class JobSubmitter implements Gridmix.Component<GridmixJob> {
       try {
         // pre-compute split information
         try {
-          UserGroupInformation.setCurrentUser(job.getUgi());
           job.buildSplits(inputDir);
         } catch (IOException e) {
           LOG.warn("Failed to submit " + job.getJob().getJobName() + " as " +

+ 0 - 5
src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java

@@ -33,11 +33,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-
 public class RoundRobinUserResolver implements UserResolver {
   public static final Log LOG = LogFactory.getLog(RoundRobinUserResolver.class);
 

+ 0 - 7
src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
@@ -67,12 +66,6 @@ public class TestGridmixSubmission {
         ).getLogger().setLevel(Level.DEBUG);
   }
 
-  private static final Path DEST = new Path("/gridmix");
-
-  private static FileSystem dfs = null;
-  private static MiniDFSCluster dfsCluster = null;
-  private static MiniMRCluster mrCluster = null;
-
   private static final int NJOBS = 3;
   private static final long GENDATA = 30; // in megabytes
   private static final int GENSLOP = 100 * 1024; // +/- 100k for logs

+ 0 - 1
src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java

@@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestUserResolve {

+ 2 - 2
src/core/org/apache/hadoop/util/ServletUtil.java

@@ -52,8 +52,8 @@ public class ServletUtil {
   }
 
   public static final String HTML_TAIL = "<hr />\n"
-    + "<a href='http://hadoop.apache.org/core'>Hadoop</a>, " 
-    + Calendar.getInstance().get(Calendar.YEAR) + ".\n"
+    + "This release is based on the <a href='http://developer.yahoo.com/hadoop/'>Yahoo! Distribution of Hadoop</a>, " 
+    + "powering the largest Hadoop clusters in the Universe!\n"
     + "</body></html>";
   
   /**

+ 0 - 2
src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -31,8 +31,6 @@ import java.security.PrivilegedExceptionAction;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Random;
 import java.util.TimeZone;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.Delayed;

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/JobInProgress.java

@@ -73,8 +73,6 @@ import org.apache.hadoop.util.StringUtils;
  * and its latest JobStatus, plus a set of tables for 
  * doing bookkeeping of its Tasks.
  * ***********************************************************
- * 
- * This is NOT a public interface!
  */
 public class JobInProgress {
   /**

+ 0 - 27
src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java

@@ -176,33 +176,6 @@ class LinuxTaskController extends TaskController {
       LOG.debug("initializeJob: " + Arrays.toString(commandArray));
     }
     try {
-      FileSystem rawFs = FileSystem.getLocal(getConf()).getRaw();
-      long logSize = 0; //TODO, Ref BUG:2854624
-      // get the JVM command line.
-      String cmdLine = 
-        TaskLog.buildCommandLine(setup, jvmArguments,
-            new File(stdout), new File(stderr), logSize, true);
-
-      // write the command to a file in the
-      // task specific cache directory
-      Path p = new Path(allocator.getLocalPathForWrite(
-          TaskTracker.getPrivateDirTaskScriptLocation(user, jobId, attemptId),
-          getConf()), COMMAND_FILE);
-      String commandFile = writeCommand(cmdLine, rawFs, p); 
-
-      String[] command = 
-        new String[]{taskControllerExe, 
-          user,
-          Integer.toString(Commands.LAUNCH_TASK_JVM.getValue()),
-          jobId,
-          attemptId,
-          currentWorkDirectory.toString(),
-          commandFile};
-      shExec = new ShellCommandExecutor(command);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("launchTask: " + Arrays.toString(command));
-      }
       shExec.execute();
       if (LOG.isDebugEnabled()) {
         logOutput(shExec.getOutput());

+ 16 - 13
src/mapred/org/apache/hadoop/mapred/Task.java

@@ -149,9 +149,9 @@ abstract public class Task implements Writable, Configurable {
   protected org.apache.hadoop.mapreduce.OutputFormat<?,?> outputFormat;
   protected org.apache.hadoop.mapreduce.OutputCommitter committer;
   protected final Counters.Counter spilledRecordsCounter;
+  private int numSlotsRequired;
   private String pidFile = "";
   protected TaskUmbilicalProtocol umbilical;
-  private int numSlotsRequired;
   protected SecretKey tokenSecret;
 
   ////////////////////////////////////////////
@@ -888,27 +888,30 @@ abstract public class Task implements Writable, Configurable {
           }
           reporter.setProgressFlag();
         }
-        // task can Commit now  
-        try {
-          LOG.info("Task " + taskId + " is allowed to commit now");
-          committer.commitTask(taskContext);
-          return;
-        } catch (IOException iee) {
-          LOG.warn("Failure committing: " + 
-                    StringUtils.stringifyException(iee));
-          discardOutput(taskContext);
-          throw iee;
-        }
+        break;
       } catch (IOException ie) {
         LOG.warn("Failure asking whether task can commit: " + 
             StringUtils.stringifyException(ie));
         if (--retries == 0) {
-          //if it couldn't commit a successfully then delete the output
+          //if it couldn't query successfully then delete the output
           discardOutput(taskContext);
           System.exit(68);
         }
       }
     }
+    
+    // task can Commit now  
+    try {
+      LOG.info("Task " + taskId + " is allowed to commit now");
+      committer.commitTask(taskContext);
+      return;
+    } catch (IOException iee) {
+      LOG.warn("Failure committing: " + 
+        StringUtils.stringifyException(iee));
+      //if it couldn't commit a successfully then delete the output
+      discardOutput(taskContext);
+      throw iee;
+    }
   }
 
   private 

+ 2 - 2
src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java

@@ -56,12 +56,12 @@ public interface TaskUmbilicalProtocol extends VersionedProtocol {
    * Version 13 changed the getTask method signature for HADOOP-249
    * Version 14 changed the getTask method signature for HADOOP-4232
    * Version 15 Adds FAILED_UNCLEAN and KILLED_UNCLEAN states for HADOOP-4759
-   * Version 16 Added fatalError for child to communicate fatal errors to TT
    * Version 16 Added numRequiredSlots to TaskStatus for MAPREDUCE-516
    * Version 17 Change in signature of getTask() for HADOOP-5488
+   * Version 18 Added fatalError for child to communicate fatal errors to TT
    * */
 
-  public static final long versionID = 17L;
+  public static final long versionID = 18L;
   
   /**
    * Called when a child task process starts, to get its task.

+ 0 - 1
src/test/org/apache/hadoop/filecache/TestTrackerDistributedCacheManager.java

@@ -69,7 +69,6 @@ public class TestTrackerDistributedCacheManager extends TestCase {
   private static final int TEST_FILE_SIZE = 4 * 1024; // 4K
   private static final int LOCAL_CACHE_LIMIT = 5 * 1024; //5K
   private static final int LOCAL_CACHE_SUBDIR_LIMIT = 1;
-  private static final int LOCAL_CACHE_SUBDIR = 2;
   protected Configuration conf;
   protected Path firstCacheFile;
   protected Path firstCacheFilePublic;

+ 1 - 1
src/test/org/apache/hadoop/mapred/MiniMRCluster.java

@@ -706,7 +706,7 @@ public class MiniMRCluster {
   /**
    * Add a tasktracker to the Mini-MR cluster.
    */
-  void addTaskTracker(TaskTrackerRunner taskTracker) throws IOException {
+  void addTaskTracker(TaskTrackerRunner taskTracker) {
     Thread taskTrackerThread = new Thread(taskTracker);
     taskTrackerList.add(taskTracker);
     taskTrackerThreadList.add(taskTrackerThread);

+ 4 - 4
src/test/org/apache/hadoop/mapred/TestQueueManagerForJobKillAndJobPriority.java

@@ -196,7 +196,7 @@ public class TestQueueManagerForJobKillAndJobPriority extends TestQueueManager {
         //write out queue-acls.xml.
         UtilsForTests.setUpConfigFile(queueConfProps, queueConfigFile);
         //refresh configuration
-        queueManager.refreshAcls(conf);
+        queueManager.refreshQueues(conf);
         //Submission should succeed
         assertTrue("User Job Submission failed after refresh.",
                    queueManager.hasAccess("default", QueueACL.SUBMIT_JOB, ugi));
@@ -214,7 +214,7 @@ public class TestQueueManagerForJobKillAndJobPriority extends TestQueueManager {
         hadoopConfProps.put(QueueManager.toFullPropertyName
                             ("q1", submitAcl), ugi.getShortUserName());
         UtilsForTests.setUpConfigFile(hadoopConfProps, hadoopConfigFile);
-        queueManager.refreshAcls(conf);
+        queueManager.refreshQueues(conf);
         assertTrue("User Job Submission allowed after refresh and no queue acls file.",
                    queueManager.hasAccess("q1", QueueACL.SUBMIT_JOB, ugi));
       } finally{
@@ -235,7 +235,7 @@ public class TestQueueManagerForJobKillAndJobPriority extends TestQueueManager {
       String queueConfigPath =
         System.getProperty("test.build.extraconf", "build/test/extraconf");
       File queueConfigFile =
-        new File(queueConfigPath, QueueManager.QUEUE_ACLS_FILE_NAME);
+        new File(queueConfigPath, QueueManager.QUEUE_ACLS_FILE_NAME );
       File hadoopConfigFile = new File(queueConfigPath, "hadoop-site.xml");
       try {
         // queue properties with which the cluster is started.
@@ -274,7 +274,7 @@ public class TestQueueManagerForJobKillAndJobPriority extends TestQueueManager {
         try {
           //Exception to be thrown by queue manager because configuration passed
           //is invalid.
-          queueManager.refreshAcls(conf);
+          queueManager.refreshQueues(conf);
           fail("Refresh of ACLs should have failed with invalid conf file.");
         } catch (Exception e) {
         }