ソースを参照

Merge branch 'trunk' into HDFS-6581

arp 10 年 前
コミット
9a53c3699b

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -512,6 +512,10 @@ Release 2.6.0 - UNRELEASED
     HDFS-6956. Allow dynamically changing the tracing level in Hadoop servers
     (cmccabe)
 
+    HDFS-7156. Update fsck documentation. (Masahiro Yamaguch via shv)
+
+    HDFS-7093. Add config key to restrict setStoragePolicy. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -575,6 +575,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
   public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
 
+  public static final String  DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
+  public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
+
   // HA related configuration
   public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
   public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";

+ 26 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -86,6 +86,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static org.apache.hadoop.util.Time.now;
 
@@ -423,6 +425,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   private final CacheManager cacheManager;
   private final DatanodeStatistics datanodeStatistics;
 
+  // whether setStoragePolicy is allowed.
+  private final boolean isStoragePolicyEnabled;
+
   private String nameserviceId;
 
   private RollingUpgradeInfo rollingUpgradeInfo = null;
@@ -794,6 +799,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
       this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);
 
+      this.isStoragePolicyEnabled =
+          conf.getBoolean(DFS_STORAGE_POLICY_ENABLED_KEY,
+                          DFS_STORAGE_POLICY_ENABLED_DEFAULT);
+
       this.fsOwner = UserGroupInformation.getCurrentUser();
       this.fsOwnerShortUserName = fsOwner.getShortUserName();
       this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
@@ -2305,8 +2314,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
 
   private void setStoragePolicyInt(String src, final String policyName)
-      throws IOException {
-    checkSuperuserPrivilege();
+      throws IOException, UnresolvedLinkException, AccessControlException {
+
+    if (!isStoragePolicyEnabled) {
+      throw new IOException("Failed to set storage policy since "
+          + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
+    }
+    FSPermissionChecker pc = null;
+    if (isPermissionEnabled) {
+      pc = getPermissionChecker();
+    }
+
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     waitForLoadingFSImage();
@@ -2315,6 +2333,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set storage policy for " + src);
+
+      if (pc != null) {
+        checkPermission(pc, src, false, null, null, FsAction.WRITE, null,
+                        false, true);
+      }
+
       src = FSDirectory.resolvePath(src, pathComponents, dir);
 
       // get the corresponding policy and make sure the policy name is valid

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -77,7 +77,8 @@ public class DFSck extends Configured implements Tool {
   private static final String USAGE = "Usage: DFSck <path> "
       + "[-list-corruptfileblocks | "
       + "[-move | -delete | -openforwrite] "
-      + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
+      + "[-files [-blocks [-locations | -racks]]]] "
+      + "[-includeSnapshots] [-showprogress]\n"
       + "\t<path>\tstart checking from this path\n"
       + "\t-move\tmove corrupted files to /lost+found\n"
       + "\t-delete\tdelete corrupted files\n"

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -2142,4 +2142,12 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.storage.policy.enabled</name>
+  <value>true</value>
+  <description>
+    Allow users to change the storage policy on files and directories.
+  </description>
+</property>
+
 </configuration>

+ 32 - 24
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm

@@ -82,32 +82,40 @@ HDFS Commands Guide
    See {{{./HdfsUserGuide.html#fsck}fsck}} for more info.
 
    Usage: <<<hdfs fsck [GENERIC_OPTIONS] <path>
+          [-list-corruptfileblocks | 
           [-move | -delete | -openforwrite]
           [-files [-blocks [-locations | -racks]]]
-          [-showprogress]>>>
-
-*------------------+---------------------------------------------+
-||  COMMAND_OPTION || Description
-*------------------+---------------------------------------------+
-|   <path>         | Start checking from this path.
-*------------------+---------------------------------------------+
-|   -move          | Move corrupted files to /lost+found
-*------------------+---------------------------------------------+
-|   -delete        | Delete corrupted files.
-*------------------+---------------------------------------------+
-|   -openforwrite  | Print out files opened for write.
-*------------------+---------------------------------------------+
-|   -files         | Print out files being checked.
-*------------------+---------------------------------------------+
-|   -blocks        | Print out block report.
-*------------------+---------------------------------------------+
-|   -locations     | Print out locations for every block.
-*------------------+---------------------------------------------+
-|   -racks         | Print out network topology for data-node locations.
-*------------------+---------------------------------------------+
-|   -showprogress  | Print out dots for progress in output. Default is OFF
-|                  | (no progress).
-*------------------+---------------------------------------------+
+          [-includeSnapshots] [-showprogress]>>>
+
+*------------------------+---------------------------------------------+
+||  COMMAND_OPTION       || Description
+*------------------------+---------------------------------------------+
+|   <path>               | Start checking from this path.
+*------------------------+---------------------------------------------+
+| -move                  | Move corrupted files to /lost+found.
+*------------------------+---------------------------------------------+
+| -delete                | Delete corrupted files.
+*------------------------+---------------------------------------------+
+| -files                 | Print out files being checked.
+*------------------------+---------------------------------------------+
+| -openforwrite          | Print out files opened for write.
+*------------------------+---------------------------------------------+
+|                        | Include snapshot data if the given path 
+| -includeSnapshots      | indicates a snapshottable directory or 
+|                        | there are snapshottable directories under it.
+*------------------------+---------------------------------------------+
+| -list-corruptfileblocks| Print out list of missing blocks and 
+|                        | files they belong to.
+*------------------------+---------------------------------------------+
+| -blocks                | Print out block report.
+*------------------------+---------------------------------------------+
+| -locations             | Print out locations for every block.
+*------------------------+---------------------------------------------+
+| -racks                 | Print out network topology for data-node locations.
+*------------------------+---------------------------------------------+
+| -showprogress          | Print out dots for progress in output. Default is OFF
+|                        | (no progress).
+*------------------------+---------------------------------------------+
 
 * Administration Commands
 

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySu
 
 import java.io.File;
 import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.util.*;
 
 import com.google.common.collect.Lists;
@@ -69,6 +70,40 @@ public class TestBlockStoragePolicy {
   static final byte WARM = (byte) 8;
   static final byte HOT  = (byte) 12;
 
+
+  @Test (timeout=300000)
+  public void testConfigKeyEnabled() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      cluster.waitActive();
+      cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Ensure that setStoragePolicy throws IOException when
+   * dfs.storage.policy.enabled is set to false.
+   * @throws IOException
+   */
+  @Test (timeout=300000, expected=IOException.class)
+  public void testConfigKeyDisabled() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      cluster.waitActive();
+      cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   @Test
   public void testDefaultPolicies() {
     final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();