Pārlūkot izejas kodu

HADOOP-2716. Superuser privileges for the Balancer. Contributed by Tsz Wo (Nicholas), SZE.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@620577 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko 17 gadi atpakaļ
vecāks
revīzija
a998c44e87

+ 3 - 0
CHANGES.txt

@@ -48,6 +48,9 @@ Release 0.16.1 - Unrelease
     HADOOP-2780. The default socket buffer size for DataNodes is 128K.
     (dhruba)
 
+		HADOOP-2716. Superuser privileges for the Balancer.
+		(Tsz Wo (Nicholas), SZE via shv)
+
 Release 0.16.0 - 2008-02-07
 
   INCOMPATIBLE CHANGES

+ 15 - 3
src/java/org/apache/hadoop/dfs/Balancer.java

@@ -60,7 +60,10 @@ import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -858,12 +861,21 @@ public class Balancer implements Tool {
         new HashMap<String, RetryPolicy>();
     methodNameToPolicyMap.put("getBlocks", methodPolicy);
 
+    UserGroupInformation ugi;
+    try {
+      ugi = UnixUserGroupInformation.login(conf);
+    } catch (javax.security.auth.login.LoginException e) {
+      throw new IOException(StringUtils.stringifyException(e));
+    }
+
     return (NamenodeProtocol) RetryProxy.create(
         NamenodeProtocol.class,
         RPC.getProxy(NamenodeProtocol.class,
-                         NamenodeProtocol.versionID,
-                         nameNodeAddr, 
-                         conf),
+            NamenodeProtocol.versionID,
+            nameNodeAddr,
+            ugi,
+            conf,
+            NetUtils.getDefaultSocketFactory(conf)),
         methodNameToPolicyMap);
   }
   

+ 2 - 0
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -639,6 +639,8 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
    */
   synchronized BlocksWithLocations getBlocks(DatanodeID datanode, long size)
       throws IOException {
+    checkSuperuserPrivilege();
+
     DatanodeDescriptor node = getDatanode(datanode);
     if (node == null) {
       NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: "

+ 5 - 2
src/test/org/apache/hadoop/dfs/TestGetBlocks.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
 
 import junit.framework.TestCase;
 /**
@@ -37,7 +38,7 @@ import junit.framework.TestCase;
  */
 public class TestGetBlocks extends TestCase {
   /** test getBlocks */
-  public void testGetBlocks() throws IOException {
+  public void testGetBlocks() throws Exception {
     final Configuration CONF = new Configuration();
 
     final short REPLICATION_FACTOR = (short)2;
@@ -93,7 +94,9 @@ public class TestGetBlocks extends TestCase {
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
       NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
-          NamenodeProtocol.class, NamenodeProtocol.versionID, addr, CONF);
+          NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
+          UnixUserGroupInformation.login(CONF), CONF,
+          NetUtils.getDefaultSocketFactory(CONF));
 
       // get blocks of size fileLen from dataNodes[0]
       BlockWithLocations[] locs;