浏览代码

MAPREDUCE-6288. Changed permissions on JobHistory server's done directory so that user's client can load the conf files directly. Contributed by Robert Kanter.

(cherry picked from commit 5358b83167777a7108b32c9900fb0d01ca0fe961)
Vinod Kumar Vavilapalli 10 年之前
父节点
当前提交
cc130a033a

+ 4 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -262,6 +262,10 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6285. ClientServiceDelegate should not retry upon
     AuthenticationException. (Jonathan Eagles via ozawa)
 
+    MAPREDUCE-6288. Changed permissions on JobHistory server's done directory
+    so that user's client can load the conf files directly. (Robert Kanter via
+    vinodkv)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java

@@ -72,7 +72,7 @@ public class JobHistoryUtils {
    * Permissions for the history done dir and derivatives.
    */
   public static final FsPermission HISTORY_DONE_DIR_PERMISSION =
-    FsPermission.createImmutable((short) 0770); 
+    FsPermission.createImmutable((short) 0771);
 
   public static final FsPermission HISTORY_DONE_FILE_PERMISSION =
     FsPermission.createImmutable((short) 0770); // rwx------
@@ -81,7 +81,7 @@ public class JobHistoryUtils {
    * Umask for the done dir and derivatives.
    */
   public static final FsPermission HISTORY_DONE_DIR_UMASK = FsPermission
-      .createImmutable((short) (0770 ^ 0777));
+      .createImmutable((short) (0771 ^ 0777));
 
   
   /**

+ 29 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java

@@ -571,8 +571,10 @@ public class HistoryFileManager extends AbstractService {
           new Path(doneDirPrefix));
       doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
       doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
-      mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
-          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
+      FsPermission doneDirPerm = new FsPermission(
+          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
+      mkdir(doneDirFc, doneDirPrefixPath, doneDirPerm);
+      checkHistoryDirsPermissions(doneDirFc, doneDirPrefixPath, doneDirPerm);
     } catch (ConnectException ex) {
       if (logWait) {
         LOG.info("Waiting for FileSystem at " +
@@ -659,6 +661,31 @@ public class HistoryFileManager extends AbstractService {
     }
   }
 
+  private void checkHistoryDirsPermissions(FileContext fc, Path donePath,
+      FsPermission fsp) throws IOException {
+    FileStatus fsStatus = fc.getFileStatus(donePath);
+    if (fsStatus.getPermission().toShort() != fsp.toShort()) {
+      fc.setPermission(donePath, fsp);
+    }
+    List<FileStatus> dirs = findTimestampedDirectories();
+    for (FileStatus dir : dirs) {
+      setPermission(fc, donePath, fsp, dir);
+    }
+  }
+
+  private void setPermission(FileContext fc, Path donePath, FsPermission fsp,
+       FileStatus dir) throws IOException {
+    FsPermission fsPerm = dir.getPermission();
+    if (fsPerm.toShort() != fsp.toShort()) {
+      fc.setPermission(dir.getPath(), fsp);
+      Path parentPath = dir.getPath().getParent();
+      if (!parentPath.equals(donePath)) {
+        FileStatus parentDir = fc.getFileStatus(parentPath);
+        setPermission(fc, donePath, fsp, parentDir);
+      }
+    }
+  }
+
   /**
    * Populates index data structures. Should only be called at initialization
    * times.

+ 73 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java

@@ -23,6 +23,9 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.util.UUID;
 
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -108,6 +111,76 @@ public class TestHistoryFileManager {
     testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
   }
 
+  @Test
+  public void testUpdateDirPermissions() throws Exception {
+    DistributedFileSystem fs = dfsCluster.getFileSystem();
+    fs.setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
+    Configuration conf = dfsCluster.getConfiguration(0);
+    conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, getDoneDirNameForTest());
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, getIntermediateDoneDirNameForTest());
+    Path p1a = new Path(getDoneDirNameForTest(), "2013");
+    Path p1b = new Path(p1a, "02");
+    Path p1c = new Path(p1b, "15");
+    Path p1d = new Path(p1c, "000000");
+    Path p2a = new Path(getDoneDirNameForTest(), "2013");
+    Path p2b = new Path(p2a, "03");
+    Path p2c = new Path(p2b, "14");
+    Path p2d = new Path(p2c, "000001");
+    FsPermission oldPerms = new FsPermission((short) 0770);
+    fs.mkdirs(p1d);
+    fs.mkdirs(p2d);
+    fs.setPermission(p1a, oldPerms);
+    fs.setPermission(p1b, oldPerms);
+    fs.setPermission(p1c, oldPerms);
+    fs.setPermission(p1d, oldPerms);
+    fs.setPermission(p2a, oldPerms);
+    fs.setPermission(p2b, oldPerms);
+    fs.setPermission(p2c, oldPerms);
+    fs.setPermission(p2d, oldPerms);
+    Path p1File = new Path(p1d, "foo.jhist");
+    Assert.assertTrue(fs.createNewFile(p1File));
+    fs.setPermission(p1File, JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION);
+    Path p2File = new Path(p2d, "bar.jhist");
+    Assert.assertTrue(fs.createNewFile(p2File));
+    fs.setPermission(p2File, JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION);
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1a).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1b).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1c).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p1File).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2a).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2b).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2c).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+    HistoryFileManager hfm = new HistoryFileManager();
+    hfm.conf = conf;
+    Assert.assertEquals(true, hfm.tryCreatingHistoryDirs(false));
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1a).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1b).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1c).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2a).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2b).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2c).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+  }
+
   @Test
   public void testCreateDirsWithAdditionalFileSystem() throws Exception {
     dfsCluster.getFileSystem().setSafeMode(