瀏覽代碼

commit d5d5bf4089091f1531d4b10a71d389357bc497c4
Author: Mahadev Konar <mahadev@cdev6022.inktomisearch.com>
Date: Thu Jul 22 21:34:03 2010 +0000

MAPREDUCE:1960 from https://issues.apache.org/jira/secure/attachment/12450332/MAPREDUCE-1960-yahoo-hadoop-0.20S.patch


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077594 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 年之前
父節點
當前提交
727ddcab73

+ 7 - 0
src/mapred/mapred-default.xml

@@ -643,6 +643,13 @@
   </description>
 </property>
 
+<property>
+  <name>mapred.user.jobconf.limit</name>
+  <value>5242880</value>
+  <description>The maximum allowed size of the user jobconf. The 
+  default is set to 5 MB</description>
+</property>
+
 <property>
   <name>mapred.hosts</name>
   <value></value>

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/JobClient.java

@@ -855,7 +855,6 @@ public class JobClient extends Configured implements MRConstants, Tool  {
           } finally {
             out.close();
           }
-
           //
           // Now, actually submit the job (using the submit name)
           //

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/JobConf.java

@@ -157,7 +157,7 @@ public class JobConf extends Configuration {
    * name is mentioned.
    */
   public static final String DEFAULT_QUEUE_NAME = "default";
-
+  
   static final String MAPRED_JOB_MAP_MEMORY_MB_PROPERTY =
       "mapred.job.map.memory.mb";
 

+ 8 - 0
src/mapred/org/apache/hadoop/mapred/JobInProgress.java

@@ -40,6 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
@@ -391,6 +392,13 @@ public class JobInProgress {
         public FileSystem run() throws IOException {
           return jobSubmitDir.getFileSystem(default_conf);
         }});
+      /** check for the size of jobconf **/
+      Path submitJobFile = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
+      FileStatus fstatus = fs.getFileStatus(submitJobFile);
+      if (fstatus.getLen() > jobtracker.MAX_JOBCONF_SIZE) {
+        throw new IOException("Exceeded max jobconf size: " 
+            + fstatus.getLen() + " limit: " + jobtracker.MAX_JOBCONF_SIZE);
+      }
       this.localJobFile = default_conf.getLocalPath(JobTracker.SUBDIR
           +"/"+jobId + ".xml");
       Path jobFilePath = JobSubmissionFiles.getJobConfPath(jobSubmitDir);

+ 8 - 1
src/mapred/org/apache/hadoop/mapred/JobTracker.java

@@ -104,6 +104,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 
 import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.JobSubmissionFiles;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.security.token.DelegationTokenRenewal;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
@@ -145,6 +146,11 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
   // The maximum number of blacklists for a tracker after which the 
   // tracker could be blacklisted across all jobs
   private int MAX_BLACKLISTS_PER_TRACKER = 4;
+  /** the maximum allowed size of the jobconf **/
+  long MAX_JOBCONF_SIZE = 5*1024*1024L;
+  /** the config key for max user jobconf size **/
+  public static final String MAX_USER_JOBCONF_SIZE_KEY = 
+      "mapred.user.jobconf.limit";
   
   //Delegation token related keys
   public static final String  DELEGATION_KEY_UPDATE_INTERVAL_KEY =  
@@ -2037,7 +2043,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
                                        DELEGATION_TOKEN_GC_INTERVAL);
     secretManager.startThreads();
        
-
+    MAX_JOBCONF_SIZE = conf.getLong(MAX_USER_JOBCONF_SIZE_KEY, MAX_JOBCONF_SIZE);
     //
     // Grab some static constants
     //
@@ -3686,6 +3692,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
       jobInfo = new JobInfo(jobId, new Text(ugi.getShortUserName()),
           new Path(jobSubmitDir));
     }
+    
     // Create the JobInProgress, do not lock the JobTracker since
     // we are about to copy job.xml from HDFS
     JobInProgress job = null;

+ 28 - 2
src/test/org/apache/hadoop/mapred/TestSubmitJob.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
 import junit.framework.TestCase;
@@ -71,6 +72,7 @@ public class TestSubmitJob extends TestCase {
     jt = null;
     fs = null;
   }
+  
   /**
    * Test to verify that jobs with invalid memory requirements are killed at the
    * JT.
@@ -118,11 +120,36 @@ public class TestSubmitJob extends TestCase {
     jobConf.setMemoryForReduceTask(5 * 1024L);
     runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L,
         "Exceeds the cluster's max-memory-limit.");
-    
     mrCluster.shutdown();
     mrCluster = null;
   }
+  
+  /** check for large jobconfs **/
+  public void testJobWithInvalidDiskReqs()
+      throws Exception {
+    JobConf jtConf = new JobConf();
+    jtConf
+        .setLong(JobTracker.MAX_USER_JOBCONF_SIZE_KEY, 1 * 1024L);
+ 
+    mrCluster = new MiniMRCluster(0, "file:///", 0, null, null, jtConf);
+
+    JobConf clusterConf = mrCluster.createJobConf();
+
+    // No map-memory configuration
+    JobConf jobConf = new JobConf(clusterConf);
+    String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
+    String msg = null;
+    try {
+      ToolRunner.run(jobConf, new SleepJob(), args);
+      assertTrue(false);
+    } catch (RemoteException re) {
+      System.out.println("Exception " + StringUtils.stringifyException(re));
+    }
 
+    mrCluster.shutdown();
+    mrCluster = null;
+  }
+  
   private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
       long memForReduceTasks, String expectedMsg)
       throws Exception,
@@ -165,7 +192,6 @@ public class TestSubmitJob extends TestCase {
            NetUtils.getSocketFactory(conf,
                org.apache.hadoop.hdfs.protocol.ClientProtocol.class));
   }
- 
    /**
     * Submit a job and check if the files are accessible to other users.
     */