浏览代码

Merge -r 754644:754645 from trunk to branch 0.20 to fix HADOOP-4783.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.20@754646 13f79535-47bb-0310-9956-ffa450edef68
Hemanth Yamijala 16 年之前
父节点
当前提交
d847fad20b

+ 4 - 0
CHANGES.txt

@@ -71,6 +71,10 @@ Release 0.20.0 - Unreleased
     WebUI if there are such blocks. '-report' and '-metaSave' have extra
     info to track such blocks. (Raghu Angadi)
 
+    HADOOP-4783. Change permissions on history files on the jobtracker
+    to be only group readable instead of world readable.
+    (Amareshwari Sriramadasu via yhemanth)
+
   NEW FEATURES
 
     HADOOP-4575. Add a proxy service for relaying HsftpFileSystem requests.

+ 16 - 3
src/mapred/org/apache/hadoop/mapred/JobHistory.java

@@ -98,6 +98,10 @@ public class JobHistory {
   private static final String SECONDARY_FILE_SUFFIX = ".recover";
   private static long jobHistoryBlockSize = 0;
   private static String jobtrackerHostname;
+  final static FsPermission HISTORY_DIR_PERMISSION =
+    FsPermission.createImmutable((short) 0750); // rwxr-x---
+  final static FsPermission HISTORY_FILE_PERMISSION =
+    FsPermission.createImmutable((short) 0740); // rwxr-----
   private static JobConf jtConf;
   /**
    * Record types are identifiers for each line of log in history files. 
@@ -154,7 +158,7 @@ public class JobHistory {
       Path logDir = new Path(LOG_DIR);
       FileSystem fs = logDir.getFileSystem(conf);
       if (!fs.exists(logDir)){
-        if (!fs.mkdirs(logDir)){
+        if (!fs.mkdirs(logDir, new FsPermission(HISTORY_DIR_PERMISSION))) {
           throw new IOException("Mkdirs failed to create " + logDir.toString());
         }
       }
@@ -828,7 +832,9 @@ public class JobHistory {
             
             int defaultBufferSize = 
               fs.getConf().getInt("io.file.buffer.size", 4096);
-            out = fs.create(logFile, FsPermission.getDefault(), true, 
+            out = fs.create(logFile, 
+                            new FsPermission(HISTORY_FILE_PERMISSION),
+                            true, 
                             defaultBufferSize, 
                             fs.getDefaultReplication(), 
                             jobHistoryBlockSize, null);
@@ -904,8 +910,15 @@ public class JobHistory {
       try {
         if (LOG_DIR != null) {
           fs = new Path(LOG_DIR).getFileSystem(jobConf);
+          int defaultBufferSize = 
+              fs.getConf().getInt("io.file.buffer.size", 4096);
           if (!fs.exists(jobFilePath)) {
-            jobFileOut = fs.create(jobFilePath);
+            jobFileOut = fs.create(jobFilePath, 
+                                   new FsPermission(HISTORY_FILE_PERMISSION),
+                                   true, 
+                                   defaultBufferSize, 
+                                   fs.getDefaultReplication(), 
+                                   fs.getDefaultBlockSize(), null);
             jobConf.writeXml(jobFileOut);
             jobFileOut.close();
           }

+ 6 - 1
src/test/org/apache/hadoop/mapred/TestJobHistory.java

@@ -33,6 +33,7 @@ import junit.framework.TestCase;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.mapred.JobHistory.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -933,7 +934,11 @@ public class TestJobHistory extends TestCase {
     // Check if the history file exists
     assertTrue("History file does not exist", fileSys.exists(logFile));
 
-
+    // check history file permission
+    assertTrue("History file permissions does not match", 
+    fileSys.getFileStatus(logFile).getPermission().equals(
+       new FsPermission(JobHistory.HISTORY_FILE_PERMISSION)));
+    
     // check if the history file is parsable
     String[] jobDetails = JobHistory.JobInfo.decodeJobHistoryFileName(
     		                                   logFileName).split("_");