Browse Source

HDFS-905. Use the new UserGroupInformation from HDFS-6299.
(jghoman via omalley)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@903562 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 15 years ago
parent
commit
904c2d2161
54 changed files with 561 additions and 800 deletions
  1. 3 0
      CHANGES.txt
  2. 1 1
      src/contrib/hdfsproxy/build.xml
  3. 2 3
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
  4. 5 14
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
  5. 4 3
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileForward.java
  6. 4 24
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
  7. 3 12
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
  8. 18 18
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
  9. 0 153
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
  10. 2 6
      src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
  11. 0 107
      src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUgiManager.java
  12. 7 16
      src/java/org/apache/hadoop/hdfs/DFSClient.java
  13. 4 0
      src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  14. 20 1
      src/java/org/apache/hadoop/hdfs/DFSUtil.java
  15. 5 14
      src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  16. 1 1
      src/java/org/apache/hadoop/hdfs/security/AccessTokenHandler.java
  17. 1 7
      src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  18. 1 10
      src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  19. 16 16
      src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  20. 38 27
      src/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
  21. 28 15
      src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
  22. 19 24
      src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  23. 43 3
      src/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
  24. 13 6
      src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  25. 20 9
      src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  26. 25 15
      src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  27. 12 3
      src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
  28. 15 15
      src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  29. 21 7
      src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
  30. 5 14
      src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  31. 3 7
      src/java/org/apache/hadoop/hdfs/tools/DFSck.java
  32. 5 1
      src/test/hdfs-site.xml
  33. 6 6
      src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
  34. 2 2
      src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
  35. 2 2
      src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
  36. 3 3
      src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  37. 17 31
      src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
  38. 8 10
      src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
  39. 14 33
      src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
  40. 0 12
      src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
  41. 31 31
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
  42. 33 25
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
  43. 7 8
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
  44. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
  45. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
  46. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
  47. 5 6
      src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
  48. 27 13
      src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java
  49. 10 9
      src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
  50. 0 7
      src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  51. 30 13
      src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  52. 5 17
      src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
  53. 6 3
      src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java
  54. 6 12
      src/test/hdfs/org/apache/hadoop/security/TestPermission.java

+ 3 - 0
CHANGES.txt

@@ -4,6 +4,9 @@ Trunk (unreleased changes)
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    HDFS-905. Use the new UserGroupInformation from HDFS-6299. 
+    (jghoman via omalley)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-654. Add support new atomic rename functionality in HDFS for 
     HDFS-654. Add support new atomic rename functionality in HDFS for 

+ 1 - 1
src/contrib/hdfsproxy/build.xml

@@ -238,7 +238,7 @@
 				<sysproperty key="javax.net.ssl.keyStorePassword" value="changeme"/>
 				<sysproperty key="javax.net.ssl.keyStorePassword" value="changeme"/>
 				<sysproperty key="javax.net.ssl.keyPassword" value="changeme"/>			
 				<sysproperty key="javax.net.ssl.keyPassword" value="changeme"/>			
 				<sysproperty key="javax.net.ssl.clientCert" value="${ssl.client.cert}"/>
 				<sysproperty key="javax.net.ssl.clientCert" value="${ssl.client.cert}"/>
-        <formatter type="xml" />
+        <formatter type="plain" />
         <batchtest todir="${test.build.dir}" unless="testcase">
         <batchtest todir="${test.build.dir}" unless="testcase">
            <fileset dir="${src.test}">
            <fileset dir="${src.test}">
              <include name="**/TestHdfsProxy.java"/>
              <include name="**/TestHdfsProxy.java"/>

+ 2 - 3
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java

@@ -46,7 +46,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 
@@ -195,8 +195,7 @@ public class LdapIpDirFilter implements Filter {
           "User not authorized to access path");
           "User not authorized to access path");
       return;
       return;
     }
     }
-    UnixUserGroupInformation ugi = new UnixUserGroupInformation(userId,
-        groupName.split(","));
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userId);
     rqst.setAttribute("authorized.ugi", ugi);
     rqst.setAttribute("authorized.ugi", ugi);
     // since we cannot pass ugi object cross context as they are from different
     // since we cannot pass ugi object cross context as they are from different
     // classloaders in different war file, we have to use String attribute.
     // classloaders in different war file, we have to use String attribute.

+ 5 - 14
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 
@@ -49,28 +49,19 @@ public class ProxyFileDataServlet extends FileDataServlet {
 
 
   /** {@inheritDoc} */
   /** {@inheritDoc} */
   @Override
   @Override
-  protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
+  protected URI createUri(FileStatus i, UserGroupInformation ugi,
       ClientProtocol nnproxy, HttpServletRequest request) throws IOException,
       ClientProtocol nnproxy, HttpServletRequest request) throws IOException,
       URISyntaxException {
       URISyntaxException {
     return new URI(request.getScheme(), null, request.getServerName(), request
     return new URI(request.getScheme(), null, request.getServerName(), request
         .getServerPort(), "/streamFile", "filename=" + i.getPath() + "&ugi="
         .getServerPort(), "/streamFile", "filename=" + i.getPath() + "&ugi="
-        + ugi, null);
+        + ugi.getUserName(), null);
   }
   }
 
 
   /** {@inheritDoc} */
   /** {@inheritDoc} */
   @Override
   @Override
-  protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
+  protected UserGroupInformation getUGI(HttpServletRequest request) {
     String userID = (String) request
     String userID = (String) request
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    String groupName = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.role");
-    UnixUserGroupInformation ugi;
-    if (groupName != null) {
-      // get group info from ldap
-      ugi = new UnixUserGroupInformation(userID, groupName.split(","));
-    } else {// stronger ugi management
-      ugi = ProxyUgiManager.getUgiForUser(userID);
-    }
-    return ugi;
+    return UserGroupInformation.createRemoteUser(userID);
   }
   }
 }
 }

+ 4 - 3
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileForward.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfsproxy;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 
 
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 
 
 public class ProxyFileForward extends ProxyForwardServlet {
 public class ProxyFileForward extends ProxyForwardServlet {
@@ -31,9 +31,10 @@ public class ProxyFileForward extends ProxyForwardServlet {
   protected String buildForwardPath(HttpServletRequest request, String pathInfo) {
   protected String buildForwardPath(HttpServletRequest request, String pathInfo) {
     String path = "/streamFile";
     String path = "/streamFile";
     path += "?filename=" + request.getPathInfo();
     path += "?filename=" + request.getPathInfo();
-    UnixUserGroupInformation ugi = (UnixUserGroupInformation)request.getAttribute("authorized.ugi");
+    UserGroupInformation ugi = 
+                   (UserGroupInformation)request.getAttribute("authorized.ugi");
     if (ugi != null) {
     if (ugi != null) {
-      path += "&ugi=" + ugi.toString();
+      path += "&ugi=" + ugi.getUserName();
     }
     }
     return path;
     return path;
   }
   }

+ 4 - 24
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java

@@ -47,7 +47,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -58,9 +58,6 @@ public class ProxyFilter implements Filter {
   /** Pattern for triggering reload of user permissions */
   /** Pattern for triggering reload of user permissions */
   protected static final Pattern RELOAD_PATTERN = Pattern
   protected static final Pattern RELOAD_PATTERN = Pattern
       .compile("^(/reloadPermFiles)$");
       .compile("^(/reloadPermFiles)$");
-  /** Pattern for triggering clearing of ugi Cache */
-  protected static final Pattern CLEAR_PATTERN = Pattern
-      .compile("^(/clearUgiCache)$");
   /** Pattern for a filter to find out if a request is HFTP/HSFTP request */
   /** Pattern for a filter to find out if a request is HFTP/HSFTP request */
   protected static final Pattern HFTP_PATTERN = Pattern
   protected static final Pattern HFTP_PATTERN = Pattern
       .compile("^(/listPaths|/data|/streamFile|/file)$");
       .compile("^(/listPaths|/data|/streamFile|/file)$");
@@ -301,12 +298,6 @@ public class ProxyFilter implements Filter {
         LOG.info("User permissions and user certs files reloaded");
         LOG.info("User permissions and user certs files reloaded");
         rsp.setStatus(HttpServletResponse.SC_OK);
         rsp.setStatus(HttpServletResponse.SC_OK);
         return;
         return;
-      } else if (CLEAR_PATTERN.matcher(servletPath).matches()
-          && checkUser("Admin", certs[0])) {
-        ProxyUgiManager.clearCache();
-        LOG.info("Ugi cache cleared");
-        rsp.setStatus(HttpServletResponse.SC_OK);
-        return;
       } 
       } 
 
 
       if (!isAuthorized) {
       if (!isAuthorized) {
@@ -315,25 +306,14 @@ public class ProxyFilter implements Filter {
       }
       }
       
       
       // request is authorized, set ugi for servlets
       // request is authorized, set ugi for servlets
-      UnixUserGroupInformation ugi = ProxyUgiManager
-          .getUgiForUser(userID);
-      if (ugi == null) {
-        LOG.info("Can't retrieve ugi for user " + userID);
-        rsp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
-            "Can't retrieve ugi for user " + userID);
-        return;
-      }
+      UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userID);
       rqst.setAttribute("authorized.ugi", ugi);
       rqst.setAttribute("authorized.ugi", ugi);
       rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", userID);
       rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", userID);
     } else if(rqst.getScheme().equalsIgnoreCase("http")) { // http request, set ugi for servlets, only for testing purposes
     } else if(rqst.getScheme().equalsIgnoreCase("http")) { // http request, set ugi for servlets, only for testing purposes
       String ugi = rqst.getParameter("ugi");
       String ugi = rqst.getParameter("ugi");
       if (ugi != null) {
       if (ugi != null) {
-        rqst.setAttribute("authorized.ugi", new UnixUserGroupInformation(ugi
-          .split(",")));
-        String[] ugiStr = ugi.split(",");
-        if(ugiStr.length > 0) {
-          rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", ugiStr[0]);
-        }
+        rqst.setAttribute("authorized.ugi", UserGroupInformation.createRemoteUser(ugi));
+        rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", ugi);
       } 
       } 
     }
     }
     chain.doFilter(request, response);
     chain.doFilter(request, response);

+ 3 - 12
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java

@@ -24,7 +24,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** {@inheritDoc} */
 /** {@inheritDoc} */
 public class ProxyListPathsServlet extends ListPathsServlet {
 public class ProxyListPathsServlet extends ListPathsServlet {
@@ -42,18 +42,9 @@ public class ProxyListPathsServlet extends ListPathsServlet {
 
 
   /** {@inheritDoc} */
   /** {@inheritDoc} */
   @Override
   @Override
-  protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
+  protected UserGroupInformation getUGI(HttpServletRequest request) {
     String userID = (String) request
     String userID = (String) request
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    String groupName = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.role");
-    UnixUserGroupInformation ugi;
-    if (groupName != null) {
-      // group info stored in ldap
-      ugi = new UnixUserGroupInformation(userID, groupName.split(","));
-    } else {// stronger ugi management
-      ugi = ProxyUgiManager.getUgiForUser(userID);
-    }
-    return ugi;
+    return UserGroupInformation.createRemoteUser(userID);
   }
   }
 }
 }

+ 18 - 18
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfsproxy;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
@@ -28,7 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** {@inheritDoc} */
 /** {@inheritDoc} */
 public class ProxyStreamFile extends StreamFile {
 public class ProxyStreamFile extends StreamFile {
@@ -47,32 +48,31 @@ public class ProxyStreamFile extends StreamFile {
   /** {@inheritDoc} */
   /** {@inheritDoc} */
   @Override
   @Override
   protected DFSClient getDFSClient(HttpServletRequest request)
   protected DFSClient getDFSClient(HttpServletRequest request)
-      throws IOException {
+      throws IOException, InterruptedException {
     ServletContext context = getServletContext();
     ServletContext context = getServletContext();
-    Configuration conf = new HdfsConfiguration((Configuration) context
+    final Configuration conf = new HdfsConfiguration((Configuration) context
         .getAttribute("name.conf"));
         .getAttribute("name.conf"));
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
-    InetSocketAddress nameNodeAddr = (InetSocketAddress) context
+    final InetSocketAddress nameNodeAddr = (InetSocketAddress) context
         .getAttribute("name.node.address");
         .getAttribute("name.node.address");
-    return new DFSClient(nameNodeAddr, conf);
+    
+    DFSClient client = 
+              getUGI(request).doAs(new PrivilegedExceptionAction<DFSClient>() {
+      @Override
+      public DFSClient run() throws IOException {
+        return new DFSClient(nameNodeAddr, conf);
+      }
+    });
+    
+    return client;
   }
   }
 
 
   /** {@inheritDoc} */
   /** {@inheritDoc} */
   @Override
   @Override
-  protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
+  protected UserGroupInformation getUGI(HttpServletRequest request) {
     String userID = (String) request
     String userID = (String) request
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
         .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    String groupName = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.role");
-    UnixUserGroupInformation ugi;
-    if (groupName != null) {
-      // get group info from ldap
-      ugi = new UnixUserGroupInformation(userID, groupName.split(","));
-    } else {// stronger ugi management
-      ugi = ProxyUgiManager.getUgiForUser(userID);
-    }
-    return ugi;
+
+    return UserGroupInformation.createRemoteUser(userID);
   }
   }
 
 
 }
 }

+ 0 - 153
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java

@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.util.Shell;
-
-/** An ugi manager that maintains a temporary ugi cache */
-public class ProxyUgiManager {
-  private static final Map<String, CachedUgi> ugiCache = new HashMap<String, CachedUgi>();
-  private static long ugiLifetime;
-  /** username can only comprise of 0-9a-zA-Z and underscore, i.e. \w */
-  private static final Pattern USERNAME_PATTERN = Pattern.compile("^\\w+$");
-  static final int CLEANUP_THRESHOLD = 1000;
-
-  static {
-    Configuration conf = new HdfsConfiguration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    ugiLifetime = conf.getLong("hdfsproxy.ugi.cache.ugi.lifetime", 15) * 60 * 1000L;
-  }
-
-  /**
-   * retrieve an ugi for a user. try the cache first, if not found, get it by
-   * running a shell command
-   */
-  public static synchronized UnixUserGroupInformation getUgiForUser(
-      String userName) {
-    long now = System.currentTimeMillis();
-    long cutoffTime = now - ugiLifetime;
-    CachedUgi cachedUgi = ugiCache.get(userName);
-    if (cachedUgi != null && cachedUgi.getInitTime() > cutoffTime)
-      return cachedUgi.getUgi();
-    UnixUserGroupInformation ugi = null;
-    try {
-      ugi = getUgi(userName);
-    } catch (IOException e) {
-      return null;
-    }
-    if (ugiCache.size() > CLEANUP_THRESHOLD) { // remove expired ugi's first
-      for (Iterator<Map.Entry<String, CachedUgi>> it = ugiCache.entrySet()
-          .iterator(); it.hasNext();) {
-        Map.Entry<String, CachedUgi> e = it.next();
-        if (e.getValue().getInitTime() < cutoffTime) {
-          it.remove();
-        }
-      }
-    }
-    ugiCache.put(ugi.getUserName(), new CachedUgi(ugi, now));
-    return ugi;
-  }
-
-  /** clear the ugi cache */
-  public static synchronized void clearCache() {
-    ugiCache.clear();
-  }
-
-  /** set ugi lifetime, only for junit testing purposes */
-  static synchronized void setUgiLifetime(long lifetime) {
-    ugiLifetime = lifetime;
-  }
-
-  /** save an ugi to cache, only for junit testing purposes */
-  static synchronized void saveToCache(UnixUserGroupInformation ugi) {
-    ugiCache.put(ugi.getUserName(), new CachedUgi(ugi, System
-        .currentTimeMillis()));
-  }
-
-  /** get cache size, only for junit testing purposes */
-  static synchronized int getCacheSize() {
-    return ugiCache.size();
-  }
-
-  /**
-   * Get the ugi for a user by running shell command "id -Gn"
-   * 
-   * @param userName name of the user
-   * @return ugi of the user
-   * @throws IOException if encounter any error while running the command
-   */
-  private static UnixUserGroupInformation getUgi(String userName)
-      throws IOException {
-    if (userName == null || !USERNAME_PATTERN.matcher(userName).matches())
-      throw new IOException("Invalid username=" + userName);
-    String[] cmd = new String[] { "bash", "-c", "id -Gn '" + userName + "'"};
-    String[] groups = Shell.execCommand(cmd).split("\\s+");
-    return new UnixUserGroupInformation(userName, groups);
-  }
-
-  /** cached ugi object with its associated init time */
-  private static class CachedUgi {
-    final UnixUserGroupInformation ugi;
-    final long initTime;
-
-    CachedUgi(UnixUserGroupInformation ugi, long initTime) {
-      this.ugi = ugi;
-      this.initTime = initTime;
-    }
-
-    UnixUserGroupInformation getUgi() {
-      return ugi;
-    }
-
-    long getInitTime() {
-      return initTime;
-    }
-
-    /** {@inheritDoc} */
-    public int hashCode() {
-      return ugi.hashCode();
-    }
-
-    static boolean isEqual(Object a, Object b) {
-      return a == b || (a != null && a.equals(b));
-    }
-
-    /** {@inheritDoc} */
-    public boolean equals(Object obj) {
-      if (obj == this) {
-        return true;
-      }
-      if (obj != null && obj instanceof CachedUgi) {
-        CachedUgi that = (CachedUgi) obj;
-        return isEqual(this.ugi, that.ugi) && this.initTime == that.initTime;
-      }
-      return false;
-    }
-
-  }
-}

+ 2 - 6
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java

@@ -64,7 +64,7 @@ public class ProxyUtil {
   // warning
   // warning
 
 
   private static enum UtilityOption {
   private static enum UtilityOption {
-    RELOAD("-reloadPermFiles"), CLEAR("-clearUgiCache"), GET("-get"), CHECKCERTS(
+    RELOAD("-reloadPermFiles"), GET("-get"), CHECKCERTS(
         "-checkcerts");
         "-checkcerts");
 
 
     private String name = null;
     private String name = null;
@@ -303,13 +303,12 @@ public class ProxyUtil {
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
     if (args.length < 1
     if (args.length < 1
         || (!UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])
         || (!UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])
-            && !UtilityOption.CLEAR.getName().equalsIgnoreCase(args[0])
             && !UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && !UtilityOption.CHECKCERTS
             && !UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && !UtilityOption.CHECKCERTS
             .getName().equalsIgnoreCase(args[0]))
             .getName().equalsIgnoreCase(args[0]))
         || (UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && args.length != 4)
         || (UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && args.length != 4)
         || (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0]) && args.length != 3)) {
         || (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0]) && args.length != 3)) {
       System.err.println("Usage: ProxyUtil [" + UtilityOption.RELOAD.getName()
       System.err.println("Usage: ProxyUtil [" + UtilityOption.RELOAD.getName()
-          + "] | [" + UtilityOption.CLEAR.getName() + "] | ["
+          + "] | ["
           + UtilityOption.GET.getName() + " <hostname> <#port> <path> ] | ["
           + UtilityOption.GET.getName() + " <hostname> <#port> <path> ] | ["
           + UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
           + UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
       System.exit(0);
       System.exit(0);
@@ -321,9 +320,6 @@ public class ProxyUtil {
     if (UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])) {
     if (UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])) {
       // reload user-certs.xml and user-permissions.xml files
       // reload user-certs.xml and user-permissions.xml files
       sendCommand(conf, "/reloadPermFiles");
       sendCommand(conf, "/reloadPermFiles");
-    } else if (UtilityOption.CLEAR.getName().equalsIgnoreCase(args[0])) {
-      // clear UGI caches
-      sendCommand(conf, "/clearUgiCache");
     } else if (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0])) {
     } else if (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0])) {
       checkServerCertsExpirationDays(conf, args[1], Integer.parseInt(args[2]));
       checkServerCertsExpirationDays(conf, args[1], Integer.parseInt(args[2]));
     } else {
     } else {

+ 0 - 107
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUgiManager.java

@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import org.apache.hadoop.security.UnixUserGroupInformation;
-
-import junit.framework.TestCase;
-
-/** Unit tests for ProxyUgiManager */
-public class TestProxyUgiManager extends TestCase {
-
-  private static final UnixUserGroupInformation root1Ugi = new UnixUserGroupInformation(
-      "root", new String[] { "group1" });
-  private static final UnixUserGroupInformation root2Ugi = new UnixUserGroupInformation(
-      "root", new String[] { "group2" });
-  private static final long ugiLifetime = 1000L; // milliseconds
-
-  /** Test caching functionality */
-  public void testCache() throws Exception {
-    ProxyUgiManager.saveToCache(root1Ugi);
-    UnixUserGroupInformation ugi = ProxyUgiManager.getUgiForUser(root1Ugi
-        .getUserName());
-    assertEquals(root1Ugi, ugi);
-    ProxyUgiManager.saveToCache(root2Ugi);
-    ugi = ProxyUgiManager.getUgiForUser(root2Ugi.getUserName());
-    assertEquals(root2Ugi, ugi);
-  }
-
-  /** Test clearCache method */
-  public void testClearCache() throws Exception {
-    UnixUserGroupInformation ugi = ProxyUgiManager.getUgiForUser(root1Ugi
-        .getUserName());
-    if (root1Ugi.equals(ugi)) {
-      ProxyUgiManager.saveToCache(root2Ugi);
-      ugi = ProxyUgiManager.getUgiForUser(root2Ugi.getUserName());
-      assertEquals(root2Ugi, ugi);
-      ProxyUgiManager.clearCache();
-      ugi = ProxyUgiManager.getUgiForUser(root2Ugi.getUserName());
-      assertFalse(root2Ugi.equals(ugi));
-    } else {
-      ProxyUgiManager.saveToCache(root1Ugi);
-      ugi = ProxyUgiManager.getUgiForUser(root1Ugi.getUserName());
-      assertEquals(root1Ugi, ugi);
-      ProxyUgiManager.clearCache();
-      ugi = ProxyUgiManager.getUgiForUser(root1Ugi.getUserName());
-      assertFalse(root1Ugi.equals(ugi));
-    }
-  }
-
-  /** Test cache timeout */
-  public void testTimeOut() throws Exception {
-    String[] users = new String[] { "root", "nobody", "SYSTEM",
-        "Administrator", "Administrators", "Guest" };
-    String realUser = null;
-    UnixUserGroupInformation ugi = null;
-    ProxyUgiManager.clearCache();
-    for (String user : users) {
-      ugi = ProxyUgiManager.getUgiForUser(user);
-      if (ugi != null) {
-        realUser = user;
-        break;
-      }
-    }
-    if (realUser != null) {
-      ProxyUgiManager.setUgiLifetime(ugiLifetime);
-      ProxyUgiManager.clearCache();
-      UnixUserGroupInformation[] fakedUgis = generateUgi(ProxyUgiManager.CLEANUP_THRESHOLD);
-      for (int i = 0; i < ProxyUgiManager.CLEANUP_THRESHOLD; i++) {
-        ProxyUgiManager.saveToCache(fakedUgis[i]);
-      }
-      assertTrue(ProxyUgiManager.getCacheSize() == ProxyUgiManager.CLEANUP_THRESHOLD);
-      Thread.sleep(ugiLifetime + 1000L);
-      UnixUserGroupInformation newugi = ProxyUgiManager.getUgiForUser(realUser);
-      assertTrue(ProxyUgiManager.getCacheSize() == ProxyUgiManager.CLEANUP_THRESHOLD + 1);
-      assertEquals(newugi, ugi);
-      Thread.sleep(ugiLifetime + 1000L);
-      newugi = ProxyUgiManager.getUgiForUser(realUser);
-      assertTrue(ProxyUgiManager.getCacheSize() == 1);
-      assertEquals(newugi, ugi);
-    }
-  }
-
-  private static UnixUserGroupInformation[] generateUgi(int size) {
-    UnixUserGroupInformation[] ugis = new UnixUserGroupInformation[size];
-    for (int i = 0; i < size; i++) {
-      ugis[i] = new UnixUserGroupInformation("user" + i,
-          new String[] { "group" });
-    }
-    return ugis;
-  }
-}

+ 7 - 16
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -110,7 +110,6 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -138,7 +137,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   private final ClientProtocol namenode;
   private final ClientProtocol namenode;
   private final ClientProtocol rpcNamenode;
   private final ClientProtocol rpcNamenode;
-  final UnixUserGroupInformation ugi;
+  final UserGroupInformation ugi;
   volatile boolean clientRunning = true;
   volatile boolean clientRunning = true;
   private volatile FsServerDefaults serverDefaults;
   private volatile FsServerDefaults serverDefaults;
   private volatile long serverDefaultsLastUpdate;
   private volatile long serverDefaultsLastUpdate;
@@ -166,16 +165,13 @@ public class DFSClient implements FSConstants, java.io.Closeable {
 
 
   public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
   public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
       Configuration conf) throws IOException {
       Configuration conf) throws IOException {
-    try {
-      return createNamenode(createRPCNamenode(nameNodeAddr, conf,
-        UnixUserGroupInformation.login(conf, true)));
-    } catch (LoginException e) {
-      throw (IOException)(new IOException().initCause(e));
-    }
+    return createNamenode(createRPCNamenode(nameNodeAddr, conf,
+        UserGroupInformation.getCurrentUser()));
+    
   }
   }
 
 
   private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
   private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
-      Configuration conf, UnixUserGroupInformation ugi) 
+      Configuration conf, UserGroupInformation ugi) 
     throws IOException {
     throws IOException {
     return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
     return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
         ClientProtocol.versionID, nameNodeAddr, ugi, conf,
         ClientProtocol.versionID, nameNodeAddr, ugi, conf,
@@ -269,12 +265,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     // The hdfsTimeout is currently the same as the ipc timeout 
     // The hdfsTimeout is currently the same as the ipc timeout 
     this.hdfsTimeout = Client.getTimeout(conf);
     this.hdfsTimeout = Client.getTimeout(conf);
 
 
-    try {
-      this.ugi = UnixUserGroupInformation.login(conf, true);
-    } catch (LoginException e) {
-      throw (IOException)(new IOException().initCause(e));
-    }
-
+    this.ugi = UserGroupInformation.getCurrentUser();
+    
     String taskId = conf.get("mapred.task.id");
     String taskId = conf.get("mapred.task.id");
     if (taskId != null) {
     if (taskId != null) {
       this.clientName = "DFSClient_" + taskId; 
       this.clientName = "DFSClient_" + taskId; 
@@ -1146,7 +1138,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
                                          diskspaceQuota);
                                          diskspaceQuota);
                                          
                                          
     }
     }
-    
     try {
     try {
       namenode.setQuota(src, namespaceQuota, diskspaceQuota);
       namenode.setQuota(src, namespaceQuota, diskspaceQuota);
     } catch(RemoteException re) {
     } catch(RemoteException re) {

+ 4 - 0
src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -190,4 +190,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
   public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
   public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
   public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
   public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
   public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
+  public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
+  public static final String  DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.user.name.key";
+  public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
+  public static final String  DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.user.name.key";
 }
 }

+ 20 - 1
src/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -18,8 +18,12 @@
 
 
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import java.io.IOException;
 import java.util.StringTokenizer;
 import java.util.StringTokenizer;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 public class DFSUtil {
 public class DFSUtil {
   /**
   /**
@@ -76,6 +80,21 @@ public class DFSUtil {
       simulation[index] = false;
       simulation[index] = false;
     }
     }
   }
   }
-
+  
+  /**
+   * If a keytab has been provided, login as that user.
+   */
+  public static void login(final Configuration conf,
+                           final String keytabFileKey,
+                           final String userNameKey)
+                           throws IOException {
+    String keytabFilename = conf.get(keytabFileKey);
+    
+    if(keytabFilename == null)
+      return;
+    
+    String user = conf.get(userNameKey, System.getProperty("user.name"));
+    UserGroupInformation.loginUserFromKeytab(user, keytabFilename);
+  }
 }
 }
 
 

+ 5 - 14
src/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -33,8 +33,6 @@ import java.util.EnumSet;
 import java.util.Random;
 import java.util.Random;
 import java.util.TimeZone;
 import java.util.TimeZone;
 
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -48,10 +46,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
 import org.xml.sax.Attributes;
 import org.xml.sax.Attributes;
 import org.xml.sax.InputSource;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
 import org.xml.sax.SAXException;
@@ -96,12 +92,7 @@ public class HftpFileSystem extends FileSystem {
   public void initialize(URI name, Configuration conf) throws IOException {
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
     super.initialize(name, conf);
     setConf(conf);
     setConf(conf);
-    try {
-      this.ugi = UnixUserGroupInformation.login(conf, true);
-    } catch (LoginException le) {
-      throw new IOException(StringUtils.stringifyException(le));
-    }
-
+    this.ugi = UserGroupInformation.getCurrentUser(); 
     nnAddr = NetUtils.createSocketAddr(name.toString());
     nnAddr = NetUtils.createSocketAddr(name.toString());
   }
   }
   
   
@@ -121,7 +112,7 @@ public class HftpFileSystem extends FileSystem {
     Construct URL pointing to file on namenode
     Construct URL pointing to file on namenode
   */
   */
   URL getNamenodeFileURL(Path f) throws IOException {
   URL getNamenodeFileURL(Path f) throws IOException {
-    return getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi);
+    return getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi.getUserName());
   }
   }
 
 
   /* 
   /* 
@@ -156,7 +147,7 @@ public class HftpFileSystem extends FileSystem {
 
 
   @Override
   @Override
   public FSDataInputStream open(Path f, int buffersize) throws IOException {
   public FSDataInputStream open(Path f, int buffersize) throws IOException {
-    URL u = getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi);
+    URL u = getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi.getUserName());
     return new FSDataInputStream(new ByteRangeInputStream(u));
     return new FSDataInputStream(new ByteRangeInputStream(u));
   }
   }
 
 
@@ -206,7 +197,7 @@ public class HftpFileSystem extends FileSystem {
         XMLReader xr = XMLReaderFactory.createXMLReader();
         XMLReader xr = XMLReaderFactory.createXMLReader();
         xr.setContentHandler(this);
         xr.setContentHandler(this);
         HttpURLConnection connection = openConnection("/listPaths" + path,
         HttpURLConnection connection = openConnection("/listPaths" + path,
-            "ugi=" + ugi + (recur? "&recursive=yes" : ""));
+            "ugi=" + ugi.getUserName() + (recur? "&recursive=yes" : ""));
 
 
         InputStream resp = connection.getInputStream();
         InputStream resp = connection.getInputStream();
         xr.parse(new InputSource(resp));
         xr.parse(new InputSource(resp));
@@ -270,7 +261,7 @@ public class HftpFileSystem extends FileSystem {
 
 
     private FileChecksum getFileChecksum(String f) throws IOException {
     private FileChecksum getFileChecksum(String f) throws IOException {
       final HttpURLConnection connection = openConnection(
       final HttpURLConnection connection = openConnection(
-          "/fileChecksum" + f, "ugi=" + ugi);
+          "/fileChecksum" + f, "ugi=" + ugi.getUserName());
       try {
       try {
         final XMLReader xr = XMLReaderFactory.createXMLReader();
         final XMLReader xr = XMLReaderFactory.createXMLReader();
         xr.setContentHandler(this);
         xr.setContentHandler(this);

+ 1 - 1
src/java/org/apache/hadoop/hdfs/security/AccessTokenHandler.java

@@ -220,7 +220,7 @@ public class AccessTokenHandler {
   /** Generate an access token for current user */
   /** Generate an access token for current user */
   public BlockAccessToken generateToken(long blockID, EnumSet<AccessMode> modes)
   public BlockAccessToken generateToken(long blockID, EnumSet<AccessMode> modes)
       throws IOException {
       throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String userID = (ugi == null ? null : ugi.getUserName());
     String userID = (ugi == null ? null : ugi.getUserName());
     return generateToken(userID, blockID, modes);
     return generateToken(userID, blockID, modes);
   }
   }

+ 1 - 7
src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -25,7 +25,6 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
-import java.lang.Class;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.Socket;
@@ -81,7 +80,6 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -923,11 +921,7 @@ public class Balancer implements Tool {
     methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
     methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
 
 
     UserGroupInformation ugi;
     UserGroupInformation ugi;
-    try {
-      ugi = UnixUserGroupInformation.login(conf);
-    } catch (javax.security.auth.login.LoginException e) {
-      throw new IOException(StringUtils.stringifyException(e));
-    }
+    ugi = UserGroupInformation.getCurrentUser();
 
 
     return (NamenodeProtocol) RetryProxy.create(
     return (NamenodeProtocol) RetryProxy.create(
         NamenodeProtocol.class,
         NamenodeProtocol.class,

+ 1 - 10
src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -42,26 +42,17 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 
 
 public class JspHelper {
 public class JspHelper {
   final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
   final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
 
 
   public static final Configuration conf = new HdfsConfiguration();
   public static final Configuration conf = new HdfsConfiguration();
-  public static final UnixUserGroupInformation webUGI
-  = UnixUserGroupInformation.createImmutable(
-      conf.getStrings(WEB_UGI_PROPERTY_NAME));
-
+  
   private static final int defaultChunkSizeToView = 
   private static final int defaultChunkSizeToView = 
     conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
     conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
   static final Random rand = new Random();
   static final Random rand = new Random();
 
 
-  static {
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, webUGI);
-  }
-
   /** Private constructor for preventing creating JspHelper object. */
   /** Private constructor for preventing creating JspHelper object. */
   private JspHelper() {} 
   private JspHelper() {} 
 
 

+ 16 - 16
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -93,9 +94,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
-import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker;
@@ -227,19 +226,25 @@ public class DataNode extends Configured
    * Create the DataNode given a configuration and an array of dataDirs.
    * Create the DataNode given a configuration and an array of dataDirs.
    * 'dataDirs' is where the blocks are stored.
    * 'dataDirs' is where the blocks are stored.
    */
    */
-  DataNode(Configuration conf, 
-           AbstractList<File> dataDirs) throws IOException {
+  DataNode(final Configuration conf, 
+           final AbstractList<File> dataDirs) throws IOException {
     super(conf);
     super(conf);
+
+    UserGroupInformation.setConfiguration(conf);
+    DFSUtil.login(conf, 
+        DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,
+        DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY);
+    
     DataNode.setDataNode(this);
     DataNode.setDataNode(this);
+    
     try {
     try {
       startDataNode(conf, dataDirs);
       startDataNode(conf, dataDirs);
     } catch (IOException ie) {
     } catch (IOException ie) {
       shutdown();
       shutdown();
-      throw ie;
-    }
+     throw ie;
+   }
   }
   }
-    
-  
+
   /**
   /**
    * This method starts the data node with the specified conf.
    * This method starts the data node with the specified conf.
    * 
    * 
@@ -392,13 +397,8 @@ public class DataNode extends Configured
     // set service-level authorization security policy
     // set service-level authorization security policy
     if (conf.getBoolean(
     if (conf.getBoolean(
           ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
           ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
-      PolicyProvider policyProvider = 
-        (PolicyProvider)(ReflectionUtils.newInstance(
-            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
-                HDFSPolicyProvider.class, PolicyProvider.class), 
-            conf));
-      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
-    }
+      ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
+       }
 
 
     //init ipc server
     //init ipc server
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(

+ 38 - 27
src/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 
 
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
@@ -27,7 +28,7 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
 
 
 /** Servlets for file checksum */
 /** Servlets for file checksum */
@@ -36,34 +37,44 @@ public class ContentSummaryServlet extends DfsServlet {
   private static final long serialVersionUID = 1L;
   private static final long serialVersionUID = 1L;
   
   
   /** {@inheritDoc} */
   /** {@inheritDoc} */
-  public void doGet(HttpServletRequest request, HttpServletResponse response
-      ) throws ServletException, IOException {
-    final UnixUserGroupInformation ugi = getUGI(request);
-    final String path = request.getPathInfo();
-
-    final PrintWriter out = response.getWriter();
-    final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
-    xml.declaration();
+  public void doGet(final HttpServletRequest request,
+      final HttpServletResponse response) throws ServletException, IOException {
+    final UserGroupInformation ugi = getUGI(request);
     try {
     try {
-      //get content summary
-      final ClientProtocol nnproxy = createNameNodeProxy(ugi);
-      final ContentSummary cs = nnproxy.getContentSummary(path);
+      ugi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final String path = request.getPathInfo();
+
+          final PrintWriter out = response.getWriter();
+          final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
+          xml.declaration();
+          try {
+            //get content summary
+            final ClientProtocol nnproxy = createNameNodeProxy();
+            final ContentSummary cs = nnproxy.getContentSummary(path);
 
 
-      //write xml
-      xml.startTag(ContentSummary.class.getName());
-      if (cs != null) {
-        xml.attribute("length"        , "" + cs.getLength());
-        xml.attribute("fileCount"     , "" + cs.getFileCount());
-        xml.attribute("directoryCount", "" + cs.getDirectoryCount());
-        xml.attribute("quota"         , "" + cs.getQuota());
-        xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed());
-        xml.attribute("spaceQuota"    , "" + cs.getSpaceQuota());
-      }
-      xml.endTag();
-    } catch(IOException ioe) {
-      new RemoteException(ioe.getClass().getName(), ioe.getMessage()
-          ).writeXml(path, xml);
+            //write xml
+            xml.startTag(ContentSummary.class.getName());
+            if (cs != null) {
+              xml.attribute("length"        , "" + cs.getLength());
+              xml.attribute("fileCount"     , "" + cs.getFileCount());
+              xml.attribute("directoryCount", "" + cs.getDirectoryCount());
+              xml.attribute("quota"         , "" + cs.getQuota());
+              xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed());
+              xml.attribute("spaceQuota"    , "" + cs.getSpaceQuota());
+            }
+            xml.endTag();
+          } catch(IOException ioe) {
+            new RemoteException(ioe.getClass().getName(), ioe.getMessage()
+                ).writeXml(path, xml);
+          }
+          xml.endDocument();
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
     }
     }
-    xml.endDocument();
   }
   }
 }
 }

+ 28 - 15
src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /**
 /**
@@ -48,29 +47,43 @@ abstract class DfsServlet extends HttpServlet {
 
 
   static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName());
   static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName());
 
 
-  /** Get {@link UserGroupInformation} from request */
-  protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
-    String ugi = request.getParameter("ugi");
-    try {
-      return new UnixUserGroupInformation(ugi.split(","));
+  /** Get {@link UserGroupInformation} from request 
+   * @throws IOException */
+  protected UserGroupInformation getUGI(HttpServletRequest request) 
+        throws IOException {
+    UserGroupInformation u = null;
+    if(UserGroupInformation.isSecurityEnabled()) {
+      String user = request.getRemoteUser();
+      if(user != null)
+        throw new IOException("Security enabled but user not " +
+        		"authenticated by filter");
+      
+      u = UserGroupInformation.createRemoteUser(user);
+    } else { // Security's not on, pull from url
+      String ugi = request.getParameter("ugi");
+      
+      if(ugi == null) // not specified in request
+        ugi = new Configuration().get(JspHelper.WEB_UGI_PROPERTY_NAME);
+      
+      if(ugi == null) // not specified in conf either
+        throw new IOException("Cannot determine UGI from request or conf");
+      
+      u = UserGroupInformation.createRemoteUser(ugi);
     }
     }
-    catch(Exception e) {
-      LOG.warn("Invalid ugi (= " + ugi + ")");
-    }
-    return JspHelper.webUGI;
+    
+    if(LOG.isDebugEnabled())
+      LOG.debug("getUGI is returning: " + u.getUserName());
+    return u;
   }
   }
 
 
   /**
   /**
    * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
    * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
    */
    */
-  protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi
-      ) throws IOException {
+  protected ClientProtocol createNameNodeProxy() throws IOException {
     ServletContext context = getServletContext();
     ServletContext context = getServletContext();
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
     Configuration conf = new HdfsConfiguration(
     Configuration conf = new HdfsConfiguration(
         (Configuration)context.getAttribute("name.conf"));
         (Configuration)context.getAttribute("name.conf"));
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
     return DFSClient.createNamenode(nnAddr, conf);
     return DFSClient.createNamenode(nnAddr, conf);
   }
   }
 
 
@@ -85,7 +98,7 @@ abstract class DfsServlet extends HttpServlet {
         : host.getInfoPort();
         : host.getInfoPort();
     final String filename = request.getPathInfo();
     final String filename = request.getPathInfo();
     return new URI(scheme, null, hostname, port, servletpath,
     return new URI(scheme, null, hostname, port, servletpath,
-        "filename=" + filename + "&ugi=" + ugi, null);
+        "filename=" + filename + "&ugi=" + ugi.getUserName(), null);
   }
   }
 
 
   /** Get filename from the request */
   /** Get filename from the request */

+ 19 - 24
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -33,8 +33,6 @@ import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.PermissionChecker;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -393,11 +391,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
    */
    */
   private void setConfigurationParameters(Configuration conf) 
   private void setConfigurationParameters(Configuration conf) 
                                           throws IOException {
                                           throws IOException {
-    try {
-      fsOwner = UnixUserGroupInformation.login(conf);
-    } catch (LoginException e) {
-      throw new IOException(StringUtils.stringifyException(e));
-    }
+    fsOwner = UserGroupInformation.getCurrentUser();
+    
     LOG.info("fsOwner=" + fsOwner);
     LOG.info("fsOwner=" + fsOwner);
 
 
     this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
     this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
@@ -646,7 +641,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     getEditLog().logSync();
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(src);
       final FileStatus stat = dir.getFileInfo(src);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "setPermission", src, null, stat);
                     "setPermission", src, null, stat);
     }
     }
@@ -674,7 +669,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     getEditLog().logSync();
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(src);
       final FileStatus stat = dir.getFileInfo(src);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "setOwner", src, null, stat);
                     "setOwner", src, null, stat);
     }
     }
@@ -718,7 +713,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     final LocatedBlocks ret = getBlockLocationsInternal(src,
     final LocatedBlocks ret = getBlockLocationsInternal(src,
         offset, length, doAccessTime);  
         offset, length, doAccessTime);  
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "open", src, null, null);
                     "open", src, null, null);
     }
     }
@@ -911,7 +906,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     
     
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(target);
       final FileStatus stat = dir.getFileInfo(target);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getLoginUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "concat", Arrays.toString(srcs), target, stat);
                     "concat", Arrays.toString(srcs), target, stat);
     }
     }
@@ -938,7 +933,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       dir.setTimes(src, inode, mtime, atime, true);
       dir.setTimes(src, inode, mtime, atime, true);
       if (auditLog.isInfoEnabled()) {
       if (auditLog.isInfoEnabled()) {
         final FileStatus stat = dir.getFileInfo(src);
         final FileStatus stat = dir.getFileInfo(src);
-        logAuditEvent(UserGroupInformation.getCurrentUGI(),
+        logAuditEvent(UserGroupInformation.getCurrentUser(),
                       Server.getRemoteIp(),
                       Server.getRemoteIp(),
                       "setTimes", src, null, stat);
                       "setTimes", src, null, stat);
       }
       }
@@ -965,7 +960,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     boolean status = setReplicationInternal(src, replication);
     boolean status = setReplicationInternal(src, replication);
     getEditLog().logSync();
     getEditLog().logSync();
     if (status && auditLog.isInfoEnabled()) {
     if (status && auditLog.isInfoEnabled()) {
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "setReplication", src, null, null);
                     "setReplication", src, null, null);
     }
     }
@@ -1051,7 +1046,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     getEditLog().logSync();
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(src);
       final FileStatus stat = dir.getFileInfo(src);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "create", src, null, stat);
                     "create", src, null, stat);
     }
     }
@@ -1298,7 +1293,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     }
     }
 
 
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "append", src, null, null);
                     "append", src, null, null);
     }
     }
@@ -1606,7 +1601,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     getEditLog().logSync();
     getEditLog().logSync();
     if (status && auditLog.isInfoEnabled()) {
     if (status && auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(dst);
       final FileStatus stat = dir.getFileInfo(dst);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "rename", src, dst, stat);
                     "rename", src, dst, stat);
     }
     }
@@ -1653,7 +1648,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
         cmd.append(option.value()).append(" ");
         cmd.append(option.value()).append(" ");
       }
       }
       final FileStatus stat = dir.getFileInfo(dst);
       final FileStatus stat = dir.getFileInfo(dst);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(), Server.getRemoteIp(),
                     cmd.toString(), src, dst, stat);
                     cmd.toString(), src, dst, stat);
     }
     }
   }
   }
@@ -1693,7 +1688,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       }
       }
       boolean status = deleteInternal(src, true);
       boolean status = deleteInternal(src, true);
       if (status && auditLog.isInfoEnabled()) {
       if (status && auditLog.isInfoEnabled()) {
-        logAuditEvent(UserGroupInformation.getCurrentUGI(),
+        logAuditEvent(UserGroupInformation.getCurrentUser(),
                       Server.getRemoteIp(),
                       Server.getRemoteIp(),
                       "delete", src, null, null);
                       "delete", src, null, null);
       }
       }
@@ -1793,7 +1788,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     getEditLog().logSync();
     getEditLog().logSync();
     if (status && auditLog.isInfoEnabled()) {
     if (status && auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(src);
       final FileStatus stat = dir.getFileInfo(src);
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "mkdirs", src, null, stat);
                     "mkdirs", src, null, stat);
     }
     }
@@ -2153,7 +2148,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       }
       }
     }
     }
     if (auditLog.isInfoEnabled()) {
     if (auditLog.isInfoEnabled()) {
-      logAuditEvent(UserGroupInformation.getCurrentUGI(),
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     Server.getRemoteIp(),
                     "listStatus", src, null, null);
                     "listStatus", src, null, null);
     }
     }
@@ -3892,7 +3887,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
 
 
   private void checkSuperuserPrivilege() throws AccessControlException {
   private void checkSuperuserPrivilege() throws AccessControlException {
     if (isPermissionEnabled) {
     if (isPermissionEnabled) {
-      PermissionChecker.checkSuperuserPrivilege(fsOwner, supergroup);
+      FSPermissionChecker.checkSuperuserPrivilege(fsOwner, supergroup);
     }
     }
   }
   }
 
 
@@ -4346,7 +4341,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
 
 
   public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
   public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
       throws IOException {
       throws IOException {
-    String user = UserGroupInformation.getCurrentUGI().getUserName();
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     Text owner = new Text(user);
     Text owner = new Text(user);
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, renewer);
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, renewer);
     return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
     return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
@@ -4354,13 +4349,13 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
 
 
   public Boolean renewDelegationToken(Token<DelegationTokenIdentifier> token)
   public Boolean renewDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
-    String renewer = UserGroupInformation.getCurrentUGI().getUserName();
+    String renewer = UserGroupInformation.getCurrentUser().getUserName();
     return dtSecretManager.renewToken(token, renewer);
     return dtSecretManager.renewToken(token, renewer);
   }
   }
 
 
   public Boolean cancelDelegationToken(Token<DelegationTokenIdentifier> token)
   public Boolean cancelDelegationToken(Token<DelegationTokenIdentifier> token)
       throws IOException {
       throws IOException {
-    String canceller = UserGroupInformation.getCurrentUGI().getUserName();
+    String canceller = UserGroupInformation.getCurrentUser().getUserName();
     return dtSecretManager.cancelToken(token, canceller);
     return dtSecretManager.cancelToken(token, canceller);
   }
   }
 }
 }

+ 43 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java

@@ -17,6 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
 import java.util.Stack;
 import java.util.Stack;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -24,18 +28,54 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.PermissionChecker;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** Perform permission checking in {@link FSNamesystem}. */
 /** Perform permission checking in {@link FSNamesystem}. */
-class FSPermissionChecker extends PermissionChecker {
+class FSPermissionChecker {
   static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
   static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
 
 
+  private final UserGroupInformation ugi;
+  public final String user;
+  private final Set<String> groups = new HashSet<String>();
+  public final boolean isSuper;
+  
   FSPermissionChecker(String fsOwner, String supergroup
   FSPermissionChecker(String fsOwner, String supergroup
       ) throws AccessControlException{
       ) throws AccessControlException{
-    super(fsOwner, supergroup);
+    try {
+      ugi = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {
+      throw new AccessControlException(e); 
+    } 
+    
+    groups.addAll(Arrays.asList(ugi.getGroupNames()));
+    user = ugi.getUserName();
+    
+    isSuper = user.equals(fsOwner) || groups.contains(supergroup);
   }
   }
 
 
+  /**
+   * Check if the callers group contains the required values.
+   * @param group group to check
+   */
+  public boolean containsGroup(String group) {return groups.contains(group);}
+
+  /**
+   * Verify if the caller has the required permission. This will result into 
+   * an exception if the caller is not allowed to access the resource.
+   * @param owner owner of the system
+   * @param supergroup supergroup of the system
+   */
+  public static void checkSuperuserPrivilege(UserGroupInformation owner, 
+                                             String supergroup) 
+                     throws AccessControlException {
+    FSPermissionChecker checker = 
+      new FSPermissionChecker(owner.getUserName(), supergroup);
+    if (!checker.isSuper) {
+      throw new AccessControlException("Access denied for user " 
+          + checker.user + ". Superuser privilege is required");
+    }
+  }
+  
   /**
   /**
    * Check whether current user have permissions to access the path.
    * Check whether current user have permissions to access the path.
    * Traverse is always checked.
    * Traverse is always checked.

+ 13 - 6
src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
 
 
@@ -77,7 +77,6 @@ public class FileChecksumServlets {
     /** {@inheritDoc} */
     /** {@inheritDoc} */
     public void doGet(HttpServletRequest request, HttpServletResponse response
     public void doGet(HttpServletRequest request, HttpServletResponse response
         ) throws ServletException, IOException {
         ) throws ServletException, IOException {
-      final UnixUserGroupInformation ugi = getUGI(request);
       final PrintWriter out = response.getWriter();
       final PrintWriter out = response.getWriter();
       final String filename = getFilename(request, response);
       final String filename = getFilename(request, response);
       final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
       final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
@@ -86,17 +85,25 @@ public class FileChecksumServlets {
       final Configuration conf = new HdfsConfiguration(DataNode.getDataNode().getConf());
       final Configuration conf = new HdfsConfiguration(DataNode.getDataNode().getConf());
       final int socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
       final int socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
       final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
       final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-      final ClientProtocol nnproxy = DFSClient.createNamenode(conf);
-
+      
       try {
       try {
+        ClientProtocol nnproxy = getUGI(request).doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+          @Override
+          public ClientProtocol run() throws IOException {
+            return DFSClient.createNamenode(conf);
+          }
+        });
+        
         final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
         final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
             filename, nnproxy, socketFactory, socketTimeout);
             filename, nnproxy, socketFactory, socketTimeout);
         MD5MD5CRC32FileChecksum.write(xml, checksum);
         MD5MD5CRC32FileChecksum.write(xml, checksum);
       } catch(IOException ioe) {
       } catch(IOException ioe) {
         new RemoteException(ioe.getClass().getName(), ioe.getMessage()
         new RemoteException(ioe.getClass().getName(), ioe.getMessage()
             ).writeXml(filename, xml);
             ).writeXml(filename, xml);
+      } catch (InterruptedException e) {
+        new RemoteException(e.getClass().getName(), e.getMessage()
+        ).writeXml(filename, xml);
+        
       }
       }
       xml.endDocument();
       xml.endDocument();
     }
     }

+ 20 - 9
src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
+
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
@@ -29,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** Redirect queries about the hosted filesystem to an appropriate datanode.
 /** Redirect queries about the hosted filesystem to an appropriate datanode.
  * @see org.apache.hadoop.hdfs.HftpFileSystem
  * @see org.apache.hadoop.hdfs.HftpFileSystem
@@ -39,7 +41,7 @@ public class FileDataServlet extends DfsServlet {
   private static final long serialVersionUID = 1L;
   private static final long serialVersionUID = 1L;
 
 
   /** Create a redirection URI */
   /** Create a redirection URI */
-  protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
+  protected URI createUri(FileStatus i, UserGroupInformation ugi,
       ClientProtocol nnproxy, HttpServletRequest request)
       ClientProtocol nnproxy, HttpServletRequest request)
       throws IOException, URISyntaxException {
       throws IOException, URISyntaxException {
     String scheme = request.getScheme();
     String scheme = request.getScheme();
@@ -54,7 +56,8 @@ public class FileDataServlet extends DfsServlet {
         "https".equals(scheme)
         "https".equals(scheme)
           ? (Integer)getServletContext().getAttribute("datanode.https.port")
           ? (Integer)getServletContext().getAttribute("datanode.https.port")
           : host.getInfoPort(),
           : host.getInfoPort(),
-        "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
+            "/streamFile", "filename=" + i.getPath() + 
+            "&ugi=" + ugi.getUserName(), null);
   }
   }
 
 
   /** Select a datanode to service this request.
   /** Select a datanode to service this request.
@@ -81,13 +84,20 @@ public class FileDataServlet extends DfsServlet {
    * }
    * }
    */
    */
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   public void doGet(HttpServletRequest request, HttpServletResponse response)
-    throws IOException {
-    final UnixUserGroupInformation ugi = getUGI(request);
-    final ClientProtocol nnproxy = createNameNodeProxy(ugi);
+      throws IOException {
+    final UserGroupInformation ugi = getUGI(request);
 
 
     try {
     try {
-      final String path = request.getPathInfo() != null
-        ? request.getPathInfo() : "/";
+      final ClientProtocol nnproxy = ugi
+          .doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+            @Override
+            public ClientProtocol run() throws IOException {
+              return createNameNodeProxy();
+            }
+          });
+
+      final String path = request.getPathInfo() != null ? 
+                                                    request.getPathInfo() : "/";
       FileStatus info = nnproxy.getFileInfo(path);
       FileStatus info = nnproxy.getFileInfo(path);
       if ((info != null) && !info.isDir()) {
       if ((info != null) && !info.isDir()) {
         response.sendRedirect(createUri(info, ugi, nnproxy,
         response.sendRedirect(createUri(info, ugi, nnproxy,
@@ -101,8 +111,9 @@ public class FileDataServlet extends DfsServlet {
       response.getWriter().println(e.toString());
       response.getWriter().println(e.toString());
     } catch (IOException e) {
     } catch (IOException e) {
       response.sendError(400, e.getMessage());
       response.sendError(400, e.getMessage());
+    } catch (InterruptedException e) {
+      response.sendError(400, e.getMessage());
     }
     }
   }
   }
 
 
 }
 }
-

+ 25 - 15
src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 import java.util.Map;
 
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
@@ -28,7 +29,6 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /**
 /**
@@ -45,20 +45,30 @@ public class FsckServlet extends DfsServlet {
     final Map<String,String[]> pmap = request.getParameterMap();
     final Map<String,String[]> pmap = request.getParameterMap();
     final PrintWriter out = response.getWriter();
     final PrintWriter out = response.getWriter();
 
 
-    final UnixUserGroupInformation ugi = getUGI(request);
-    UserGroupInformation.setCurrentUser(ugi);
+    final UserGroupInformation ugi = getUGI(request);
+    try {
+      ugi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final ServletContext context = getServletContext();
+          final Configuration conf = 
+            new HdfsConfiguration((Configuration)context.getAttribute("name.conf"));
+          
+          NameNode nn = (NameNode) context.getAttribute("name.node");
+          
+          final FSNamesystem namesystem = nn.getNamesystem();
+          final int totalDatanodes = 
+            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
+          final short minReplication = namesystem.getMinReplication();
 
 
-    final ServletContext context = getServletContext();
-    final Configuration conf = new HdfsConfiguration((Configuration) context.getAttribute("name.conf"));
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-
-    final NameNode nn = (NameNode) context.getAttribute("name.node");
-    final FSNamesystem namesystem = nn.getNamesystem();
-    final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
-    final short minReplication = namesystem.getMinReplication();
-
-    new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
-        totalDatanodes, minReplication).fsck();
+          new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
+              totalDatanodes, minReplication).fsck();
+          
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
+      response.sendError(400, e.getMessage());
+    }
   }
   }
 }
 }

+ 12 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java

@@ -21,13 +21,13 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 
 
 import org.znerd.xmlenc.*;
 import org.znerd.xmlenc.*;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashMap;
@@ -125,7 +125,6 @@ public class ListPathsServlet extends DfsServlet {
    */
    */
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   public void doGet(HttpServletRequest request, HttpServletResponse response)
     throws ServletException, IOException {
     throws ServletException, IOException {
-    final UnixUserGroupInformation ugi = getUGI(request);
     final PrintWriter out = response.getWriter();
     final PrintWriter out = response.getWriter();
     final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
     final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
     try {
     try {
@@ -134,7 +133,14 @@ public class ListPathsServlet extends DfsServlet {
       final boolean recur = "yes".equals(root.get("recursive"));
       final boolean recur = "yes".equals(root.get("recursive"));
       final Pattern filter = Pattern.compile(root.get("filter"));
       final Pattern filter = Pattern.compile(root.get("filter"));
       final Pattern exclude = Pattern.compile(root.get("exclude"));
       final Pattern exclude = Pattern.compile(root.get("exclude"));
-      ClientProtocol nnproxy = createNameNodeProxy(ugi);
+      
+      ClientProtocol nnproxy = 
+        getUGI(request).doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+        @Override
+        public ClientProtocol run() throws IOException {
+          return createNameNodeProxy();
+        }
+      });
 
 
       doc.declaration();
       doc.declaration();
       doc.startTag("listing");
       doc.startTag("listing");
@@ -173,6 +179,9 @@ public class ListPathsServlet extends DfsServlet {
       if (doc != null) {
       if (doc != null) {
         doc.endDocument();
         doc.endDocument();
       }
       }
+    } catch (InterruptedException e) {
+      LOG.warn("ListPathServlet encountered InterruptedException", e);
+      response.sendError(400, e.getMessage());
     } finally {
     } finally {
       if (out != null) {
       if (out != null) {
         out.close();
         out.close();

+ 15 - 15
src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
-import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
@@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -78,11 +78,10 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
-import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -293,12 +292,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     if (serviceAuthEnabled = 
     if (serviceAuthEnabled = 
           conf.getBoolean(
           conf.getBoolean(
             ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
             ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
-      PolicyProvider policyProvider = 
-        (PolicyProvider)(ReflectionUtils.newInstance(
-            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
-                HDFSPolicyProvider.class, PolicyProvider.class), 
-            conf));
-      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+      ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
     }
     }
 
 
     // create rpc server 
     // create rpc server 
@@ -417,6 +411,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
   }
   }
 
 
   protected NameNode(Configuration conf, NamenodeRole role) throws IOException {
   protected NameNode(Configuration conf, NamenodeRole role) throws IOException {
+    UserGroupInformation.setConfiguration(conf);
+    DFSUtil.login(conf, 
+        DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
+        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
+
     this.role = role;
     this.role = role;
     try {
     try {
       initialize(conf);
       initialize(conf);
@@ -608,7 +607,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     }
     namesystem.startFile(src,
     namesystem.startFile(src,
-        new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
+        new PermissionStatus(UserGroupInformation.getCurrentUser().getUserName(),
             null, masked),
             null, masked),
         clientName, clientMachine, flag.get(), createParent, replication, blockSize);
         clientName, clientMachine, flag.get(), createParent, replication, blockSize);
     myMetrics.numFilesCreated.inc();
     myMetrics.numFilesCreated.inc();
@@ -815,7 +814,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     }
     return namesystem.mkdirs(src,
     return namesystem.mkdirs(src,
-        new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
+        new PermissionStatus(UserGroupInformation.getCurrentUser().getUserName(),
             null, masked), createParent);
             null, masked), createParent);
   }
   }
 
 
@@ -1176,14 +1175,15 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
       throw new AuthorizationException("Service Level Authorization not enabled!");
       throw new AuthorizationException("Service Level Authorization not enabled!");
     }
     }
 
 
-    SecurityUtil.getPolicy().refresh();
+    ServiceAuthorizationManager.refresh(
+        new Configuration(), new HDFSPolicyProvider());
   }
   }
 
 
   @Override
   @Override
   public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
   public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
     LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + 
     LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + 
-             UserGroupInformation.getCurrentUGI().getUserName());
-    SecurityUtil.getUserToGroupsMappingService(conf).refresh();
+             UserGroupInformation.getCurrentUser().getUserName());
+    Groups.getUserToGroupsMappingService(conf).refresh();
   }
   }
 
 
   private static void printUsage() {
   private static void printUsage() {

+ 21 - 7
src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 import java.util.Enumeration;
 import java.util.Enumeration;
 import java.util.List;
 import java.util.List;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.mortbay.jetty.InclusiveByteRange;
 import org.mortbay.jetty.InclusiveByteRange;
 
 
 public class StreamFile extends DfsServlet {
 public class StreamFile extends DfsServlet {
@@ -50,11 +50,18 @@ public class StreamFile extends DfsServlet {
   
   
   /** getting a client for connecting to dfs */
   /** getting a client for connecting to dfs */
   protected DFSClient getDFSClient(HttpServletRequest request)
   protected DFSClient getDFSClient(HttpServletRequest request)
-      throws IOException {
-    Configuration conf = new HdfsConfiguration(masterConf);
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
-    return new DFSClient(nameNodeAddr, conf);
+      throws IOException, InterruptedException {
+    final Configuration conf = new HdfsConfiguration(masterConf);
+    
+    DFSClient client = 
+      getUGI(request).doAs(new PrivilegedExceptionAction<DFSClient>() {
+      @Override
+      public DFSClient run() throws IOException {
+        return new DFSClient(nameNodeAddr, conf);
+      }
+    });
+    
+    return client;
   }
   }
   
   
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   public void doGet(HttpServletRequest request, HttpServletResponse response)
@@ -72,7 +79,14 @@ public class StreamFile extends DfsServlet {
     if (reqRanges != null && !reqRanges.hasMoreElements())
     if (reqRanges != null && !reqRanges.hasMoreElements())
       reqRanges = null;
       reqRanges = null;
 
 
-    DFSClient dfs = getDFSClient(request);  
+    DFSClient dfs;
+    try {
+      dfs = getDFSClient(request);
+    } catch (InterruptedException e) {
+      response.sendError(400, e.getMessage());
+      return;
+    }
+    
     long fileLen = dfs.getFileInfo(filename).getLen();
     long fileLen = dfs.getFileInfo(filename).getLen();
     FSInputStream in = dfs.open(filename);
     FSInputStream in = dfs.open(filename);
     OutputStream os = response.getOutputStream();
     OutputStream os = response.getOutputStream();

+ 5 - 14
src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -24,8 +24,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.TreeSet;
 import java.util.TreeSet;
 
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsShell;
@@ -45,7 +43,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
 import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
@@ -716,16 +714,9 @@ public class DFSAdmin extends FsShell {
     return 0;
     return 0;
   }
   }
   
   
-  private static UnixUserGroupInformation getUGI(Configuration conf) 
+  private static UserGroupInformation getUGI() 
   throws IOException {
   throws IOException {
-    UnixUserGroupInformation ugi = null;
-    try {
-      ugi = UnixUserGroupInformation.login(conf, true);
-    } catch (LoginException e) {
-      throw (IOException)(new IOException(
-          "Failed to get the current user's information.").initCause(e));
-    }
-    return ugi;
+    return UserGroupInformation.getCurrentUser();
   }
   }
 
 
   /**
   /**
@@ -742,7 +733,7 @@ public class DFSAdmin extends FsShell {
       (RefreshAuthorizationPolicyProtocol) 
       (RefreshAuthorizationPolicyProtocol) 
       RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
       RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                    RefreshAuthorizationPolicyProtocol.versionID, 
                    RefreshAuthorizationPolicyProtocol.versionID, 
-                   NameNode.getAddress(conf), getUGI(conf), conf,
+                   NameNode.getAddress(conf), getUGI(), conf,
                    NetUtils.getSocketFactory(conf, 
                    NetUtils.getSocketFactory(conf, 
                                              RefreshAuthorizationPolicyProtocol.class));
                                              RefreshAuthorizationPolicyProtocol.class));
     
     
@@ -766,7 +757,7 @@ public class DFSAdmin extends FsShell {
       (RefreshUserToGroupMappingsProtocol) 
       (RefreshUserToGroupMappingsProtocol) 
       RPC.getProxy(RefreshUserToGroupMappingsProtocol.class, 
       RPC.getProxy(RefreshUserToGroupMappingsProtocol.class, 
                    RefreshUserToGroupMappingsProtocol.versionID, 
                    RefreshUserToGroupMappingsProtocol.versionID, 
-                   NameNode.getAddress(conf), getUGI(conf), conf,
+                   NameNode.getAddress(conf), getUGI(), conf,
                    NetUtils.getSocketFactory(conf, 
                    NetUtils.getSocketFactory(conf, 
                                              RefreshUserToGroupMappingsProtocol.class));
                                              RefreshUserToGroupMappingsProtocol.class));
     
     

+ 3 - 7
src/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -25,14 +25,11 @@ import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLConnection;
 import java.net.URLEncoder;
 import java.net.URLEncoder;
 
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
@@ -72,11 +69,10 @@ public class DFSck extends Configured implements Tool {
   /**
   /**
    * Filesystem checker.
    * Filesystem checker.
    * @param conf current Configuration
    * @param conf current Configuration
-   * @throws LoginException if login failed 
    */
    */
-  public DFSck(Configuration conf) throws LoginException {
+  public DFSck(Configuration conf) throws IOException {
     super(conf);
     super(conf);
-    this.ugi = UnixUserGroupInformation.login(conf, true);
+    this.ugi = UserGroupInformation.getCurrentUser();
   }
   }
 
 
   /**
   /**
@@ -110,7 +106,7 @@ public class DFSck extends Configured implements Tool {
     final StringBuffer url = new StringBuffer("http://");
     final StringBuffer url = new StringBuffer("http://");
     url.append(getConf().get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, 
     url.append(getConf().get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, 
                              DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
                              DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
-    url.append("/fsck?ugi=").append(ugi).append("&path=");
+    url.append("/fsck?ugi=").append(ugi.getUserName()).append("&path=");
 
 
     String dir = "/";
     String dir = "/";
     // find top-level dir first
     // find top-level dir first

+ 5 - 1
src/test/hdfs-site.xml

@@ -4,6 +4,10 @@
 <!-- Put site-specific property overrides in this file. -->
 <!-- Put site-specific property overrides in this file. -->
 
 
 <configuration>
 <configuration>
-
+  <!-- Turn security off for tests by default -->
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+  </property>
 
 
 </configuration>
 </configuration>

+ 6 - 6
src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml

@@ -16329,17 +16329,17 @@
           <expected-output></expected-output>
           <expected-output></expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
-    </test>
+    </test><!--
     
     
-    <test> <!--Tested -->
+    <test> Tested 
       <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for namenode</description>
       <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for namenode</description>
       <test-commands>
       <test-commands>
-        <!-- hadoop-policy.xml for tests has 
-             security.refresh.policy.protocol.acl = ${user.name} -->
+         hadoop-policy.xml for tests has 
+             security.refresh.policy.protocol.acl = ${user.name} 
         <dfs-admin-command>-fs NAMENODE -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </dfs-admin-command>
         <dfs-admin-command>-fs NAMENODE -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </dfs-admin-command>
       </test-commands>
       </test-commands>
       <cleanup-commands>
       <cleanup-commands>
-        <!-- No cleanup -->
+         No cleanup 
       </cleanup-commands>
       </cleanup-commands>
       <comparators>
       <comparators>
         <comparator>
         <comparator>
@@ -16349,7 +16349,7 @@
       </comparators>
       </comparators>
     </test>
     </test>
    
    
-    <!-- Test for safemode -->
+    --><!-- Test for safemode -->
     <test> <!-- TESTED -->
     <test> <!-- TESTED -->
       <description>safemode: Test for enter - Namenode is not in safemode</description>
       <description>safemode: Test for enter - Namenode is not in safemode</description>
       <test-commands>
       <test-commands>

+ 2 - 2
src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java

@@ -26,7 +26,7 @@ import javax.security.auth.login.LoginException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Before;
@@ -45,7 +45,7 @@ public class TestFcHdfsCreateMkdir extends
     cluster = new MiniDFSCluster(conf, 2, true, null);
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
-        UnixUserGroupInformation.login().getUserName()));
+        UserGroupInformation.getCurrentUser().getUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
   }
 
 

+ 2 - 2
src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Before;
@@ -45,7 +45,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
     cluster = new MiniDFSCluster(conf, 2, true, null);
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
-        UnixUserGroupInformation.login().getUserName()));
+        UserGroupInformation.getCurrentUser().getUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
   }
 
 

+ 3 - 3
src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -52,7 +52,7 @@ public class TestHDFSFileContextMainOperations extends
     cluster.waitClusterUp();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
-        UnixUserGroupInformation.login().getUserName()));
+        UserGroupInformation.getCurrentUser().getUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
   }
 
 
@@ -65,7 +65,7 @@ public class TestHDFSFileContextMainOperations extends
     cluster.waitClusterUp();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
-        UnixUserGroupInformation.login().getUserName()));
+        UserGroupInformation.getCurrentUser().getUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
   }
       
       

+ 17 - 31
src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.fs.permission;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import javax.security.auth.login.LoginException;
-
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -28,25 +26,26 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 public class TestStickyBit extends TestCase {
 public class TestStickyBit extends TestCase {
 
 
-  static UnixUserGroupInformation user1 = new UnixUserGroupInformation(
-      "theDoctor", new String[] { "tardis" });
-  static UnixUserGroupInformation user2 = new UnixUserGroupInformation("rose",
-      new String[] { "powellestates" });
-
+  static UserGroupInformation user1 = 
+    UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
+  static UserGroupInformation user2 = 
+    UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
+  
   /**
   /**
    * Ensure that even if a file is in a directory with the sticky bit on,
    * Ensure that even if a file is in a directory with the sticky bit on,
    * another user can write to that file (assuming correct permissions).
    * another user can write to that file (assuming correct permissions).
    */
    */
   private void confirmCanAppend(Configuration conf, FileSystem hdfs,
   private void confirmCanAppend(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException {
+      Path baseDir) throws IOException, InterruptedException {
     // Create a tmp directory with wide-open permissions and sticky bit
     // Create a tmp directory with wide-open permissions and sticky bit
     Path p = new Path(baseDir, "tmp");
     Path p = new Path(baseDir, "tmp");
 
 
@@ -54,13 +53,13 @@ public class TestStickyBit extends TestCase {
     hdfs.setPermission(p, new FsPermission((short) 01777));
     hdfs.setPermission(p, new FsPermission((short) 01777));
 
 
     // Write a file to the new tmp directory as a regular user
     // Write a file to the new tmp directory as a regular user
-    hdfs = logonAs(user1, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
     Path file = new Path(p, "foo");
     writeFile(hdfs, file);
     writeFile(hdfs, file);
     hdfs.setPermission(file, new FsPermission((short) 0777));
     hdfs.setPermission(file, new FsPermission((short) 0777));
 
 
     // Log onto cluster as another user and attempt to append to file
     // Log onto cluster as another user and attempt to append to file
-    hdfs = logonAs(user2, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
     Path file2 = new Path(p, "foo");
     Path file2 = new Path(p, "foo");
     FSDataOutputStream h = hdfs.append(file2);
     FSDataOutputStream h = hdfs.append(file2);
     h.write("Some more data".getBytes());
     h.write("Some more data".getBytes());
@@ -72,13 +71,13 @@ public class TestStickyBit extends TestCase {
    * set.
    * set.
    */
    */
   private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
   private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException {
+      Path baseDir) throws IOException, InterruptedException {
     Path p = new Path(baseDir, "contemporary");
     Path p = new Path(baseDir, "contemporary");
     hdfs.mkdirs(p);
     hdfs.mkdirs(p);
     hdfs.setPermission(p, new FsPermission((short) 01777));
     hdfs.setPermission(p, new FsPermission((short) 01777));
 
 
     // Write a file to the new temp directory as a regular user
     // Write a file to the new temp directory as a regular user
-    hdfs = logonAs(user1, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
     Path file = new Path(p, "foo");
     writeFile(hdfs, file);
     writeFile(hdfs, file);
 
 
@@ -86,7 +85,7 @@ public class TestStickyBit extends TestCase {
     assertEquals(user1.getUserName(), hdfs.getFileStatus(file).getOwner());
     assertEquals(user1.getUserName(), hdfs.getFileStatus(file).getOwner());
 
 
     // Log onto cluster as another user and attempt to delete the file
     // Log onto cluster as another user and attempt to delete the file
-    FileSystem hdfs2 = logonAs(user2, conf, hdfs);
+    FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
 
 
     try {
     try {
       hdfs2.delete(file, false);
       hdfs2.delete(file, false);
@@ -159,7 +158,7 @@ public class TestStickyBit extends TestCase {
     assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
     assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
   }
   }
 
 
-  public void testGeneralSBBehavior() throws IOException {
+  public void testGeneralSBBehavior() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
       Configuration conf = new HdfsConfiguration();
       Configuration conf = new HdfsConfiguration();
@@ -197,7 +196,7 @@ public class TestStickyBit extends TestCase {
    * Test that one user can't rename/move another user's file when the sticky
    * Test that one user can't rename/move another user's file when the sticky
    * bit is set.
    * bit is set.
    */
    */
-  public void testMovingFiles() throws IOException, LoginException {
+  public void testMovingFiles() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
 
 
     try {
     try {
@@ -220,12 +219,12 @@ public class TestStickyBit extends TestCase {
       // Write a file to the new tmp directory as a regular user
       // Write a file to the new tmp directory as a regular user
       Path file = new Path(tmpPath, "foo");
       Path file = new Path(tmpPath, "foo");
 
 
-      FileSystem hdfs2 = logonAs(user1, conf, hdfs);
+      FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
 
 
       writeFile(hdfs2, file);
       writeFile(hdfs2, file);
 
 
       // Log onto cluster as another user and attempt to move the file
       // Log onto cluster as another user and attempt to move the file
-      FileSystem hdfs3 = logonAs(user2, conf, hdfs);
+      FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
 
 
       try {
       try {
         hdfs3.rename(file, new Path(tmpPath2, "renamed"));
         hdfs3.rename(file, new Path(tmpPath2, "renamed"));
@@ -289,19 +288,6 @@ public class TestStickyBit extends TestCase {
     }
     }
   }
   }
 
 
-  /***
-   * Create a new configuration for the specified user and return a filesystem
-   * accessed by that user
-   */
-  static private FileSystem logonAs(UnixUserGroupInformation user,
-      Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new HdfsConfiguration(conf);
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
-
-    return FileSystem.get(conf2);
-  }
-
   /***
   /***
    * Write a quick file to the specified file system at specified path
    * Write a quick file to the specified file system at specified path
    */
    */

+ 8 - 10
src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -29,9 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 
 /** Utilities for append-related tests */ 
 /** Utilities for append-related tests */ 
 public class AppendTestUtil {
 public class AppendTestUtil {
@@ -92,15 +90,15 @@ public class AppendTestUtil {
    * @param conf current Configuration
    * @param conf current Configuration
    * @return FileSystem instance
    * @return FileSystem instance
    * @throws IOException
    * @throws IOException
+   * @throws InterruptedException 
    */
    */
-  public static FileSystem createHdfsWithDifferentUsername(Configuration conf
-      ) throws IOException {
-    Configuration conf2 = new HdfsConfiguration(conf);
-    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
-    return FileSystem.get(conf2);
+  public static FileSystem createHdfsWithDifferentUsername(final Configuration conf
+      ) throws IOException, InterruptedException {
+    String username = UserGroupInformation.getCurrentUser().getUserName()+"_XXX";
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
+    
+    return DFSTestUtil.getFileSystemAs(ugi, conf);
   }
   }
 
 
   static void write(OutputStream out, int offset, int length) throws IOException {
   static void write(OutputStream out, int offset, int length) throws IOException {

+ 14 - 33
src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -25,6 +25,7 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URL;
 import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLConnection;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** Utilities for HDFS tests */
 /** Utilities for HDFS tests */
@@ -286,38 +286,6 @@ public class DFSTestUtil {
     IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
     IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
     return out.toString();
     return out.toString();
   }
   }
-
-  static public Configuration getConfigurationWithDifferentUsername(Configuration conf
-      ) throws IOException {
-    final Configuration c = new HdfsConfiguration(conf);
-    final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
-    final String username = ugi.getUserName()+"_XXX";
-    final String[] groups = {ugi.getGroupNames()[0] + "_XXX"};
-    UnixUserGroupInformation.saveToConf(c,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, groups));
-    return c;
-  }
-  
-  
-  /**
-   * modify conf to contain fake users with fake group
-   * @param conf to modify
-   * @throws IOException
-   */
-  static public void updateConfigurationWithFakeUsername(Configuration conf) {
-    // fake users
-    String username="fakeUser1";
-    String[] groups = {"fakeGroup1"};
-    // mapping to groups
-    Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
-    u2g_map.put(username, groups);
-    updateConfWithFakeGroupMapping(conf, u2g_map);
-    
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, groups));
-  }
   
   
   /**
   /**
    * mock class to get group mapping for fake users
    * mock class to get group mapping for fake users
@@ -378,4 +346,17 @@ public class DFSTestUtil {
     
     
   }
   }
   
   
+  /**
+   * Get a FileSystem instance as specified user in a doAs block.
+   */
+  static public FileSystem getFileSystemAs(UserGroupInformation ugi, 
+                                   final Configuration conf) throws IOException, 
+                                                        InterruptedException {
+    return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+  }
 }
 }

+ 0 - 12
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -28,8 +28,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Random;
 import java.util.Random;
 
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
@@ -46,12 +44,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
@@ -245,13 +240,6 @@ public class MiniDFSCluster {
                         String[] racks, String hosts[],
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
                         long[] simulatedCapacities) throws IOException {
     this.conf = conf;
     this.conf = conf;
-    try {
-      UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf));
-    } catch (LoginException e) {
-      IOException ioe = new IOException();
-      ioe.initCause(e);
-      throw ioe;
-    }
     base_dir = new File(getBaseDirectory());
     base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
     data_dir = new File(base_dir, "data");
     
     

+ 31 - 31
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** Unit tests for permission */
 /** Unit tests for permission */
 public class TestDFSPermission extends TestCase {
 public class TestDFSPermission extends TestCase {
@@ -55,11 +55,11 @@ public class TestDFSPermission extends TestCase {
   final private static String USER2_NAME = "user2";
   final private static String USER2_NAME = "user2";
   final private static String USER3_NAME = "user3";
   final private static String USER3_NAME = "user3";
 
 
-  private static UnixUserGroupInformation SUPERUSER;
-  private static UnixUserGroupInformation USER1;
-  private static UnixUserGroupInformation USER2;
-  private static UnixUserGroupInformation USER3;
-  
+  private static UserGroupInformation SUPERUSER;
+  private static UserGroupInformation USER1;
+  private static UserGroupInformation USER2;
+  private static UserGroupInformation USER3;
+
   final private static short MAX_PERMISSION = 511;
   final private static short MAX_PERMISSION = 511;
   final private static short DEFAULT_UMASK = 022;
   final private static short DEFAULT_UMASK = 022;
   final private static short FILE_MASK = 0666;
   final private static short FILE_MASK = 0666;
@@ -96,14 +96,14 @@ public class TestDFSPermission extends TestCase {
       DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
       DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
       
       
       // Initiate all four users
       // Initiate all four users
-      SUPERUSER = UnixUserGroupInformation.login(conf);
-      USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] {
-          GROUP1_NAME, GROUP2_NAME });
-      USER2 = new UnixUserGroupInformation(USER2_NAME, new String[] {
-          GROUP2_NAME, GROUP3_NAME });
-      USER3 = new UnixUserGroupInformation(USER3_NAME, new String[] {
-          GROUP3_NAME, GROUP4_NAME });
-    } catch (LoginException e) {
+      SUPERUSER = UserGroupInformation.getCurrentUser();
+      USER1 = UserGroupInformation.createUserForTesting(USER1_NAME,
+          new String[] { GROUP1_NAME, GROUP2_NAME });
+      USER2 = UserGroupInformation.createUserForTesting(USER2_NAME,
+          new String[] { GROUP2_NAME, GROUP3_NAME });
+      USER3 = UserGroupInformation.createUserForTesting(USER3_NAME,
+          new String[] { GROUP3_NAME, GROUP4_NAME });
+    } catch (IOException e) {
       throw new RuntimeException(e);
       throw new RuntimeException(e);
     }
     }
   }
   }
@@ -390,7 +390,7 @@ public class TestDFSPermission extends TestCase {
    * for the given user for operations mkdir, open, setReplication, 
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,
    * and delete */
    * and delete */
-  private void testPermissionCheckingPerUser(UnixUserGroupInformation ugi,
+  private void testPermissionCheckingPerUser(UserGroupInformation ugi,
       short[] ancestorPermission, short[] parentPermission,
       short[] ancestorPermission, short[] parentPermission,
       short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
       short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
       throws Exception {
       throws Exception {
@@ -477,7 +477,7 @@ public class TestDFSPermission extends TestCase {
     final static protected short opAncestorPermission = SEARCH_MASK;
     final static protected short opAncestorPermission = SEARCH_MASK;
     protected short opParentPermission;
     protected short opParentPermission;
     protected short opPermission;
     protected short opPermission;
-    protected UnixUserGroupInformation ugi;
+    protected UserGroupInformation ugi;
 
 
     /* initialize */
     /* initialize */
     protected void set(Path path, short ancestorPermission,
     protected void set(Path path, short ancestorPermission,
@@ -491,7 +491,7 @@ public class TestDFSPermission extends TestCase {
     }
     }
 
 
     /* Perform an operation and verify if the permission checking is correct */
     /* Perform an operation and verify if the permission checking is correct */
-    void verifyPermission(UnixUserGroupInformation ugi) throws LoginException,
+    void verifyPermission(UserGroupInformation ugi) throws LoginException,
         IOException {
         IOException {
       if (this.ugi != ugi) {
       if (this.ugi != ugi) {
         setRequiredPermissions(ugi);
         setRequiredPermissions(ugi);
@@ -535,7 +535,7 @@ public class TestDFSPermission extends TestCase {
     }
     }
 
 
     /* Set the permissions required to pass the permission checking */
     /* Set the permissions required to pass the permission checking */
-    protected void setRequiredPermissions(UnixUserGroupInformation ugi)
+    protected void setRequiredPermissions(UserGroupInformation ugi)
         throws IOException {
         throws IOException {
       if (SUPERUSER.equals(ugi)) {
       if (SUPERUSER.equals(ugi)) {
         requiredAncestorPermission = SUPER_MASK;
         requiredAncestorPermission = SUPER_MASK;
@@ -612,7 +612,7 @@ public class TestDFSPermission extends TestCase {
   private CreatePermissionVerifier createVerifier =
   private CreatePermissionVerifier createVerifier =
     new CreatePermissionVerifier();
     new CreatePermissionVerifier();
   /* test if the permission checking of create/mkdir is correct */
   /* test if the permission checking of create/mkdir is correct */
-  private void testCreateMkdirs(UnixUserGroupInformation ugi, Path path,
+  private void testCreateMkdirs(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission) throws Exception {
       short ancestorPermission, short parentPermission) throws Exception {
     createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
     createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
         parentPermission);
         parentPermission);
@@ -641,7 +641,7 @@ public class TestDFSPermission extends TestCase {
 
 
   private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier();
   private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier();
   /* test if the permission checking of open is correct */
   /* test if the permission checking of open is correct */
-  private void testOpen(UnixUserGroupInformation ugi, Path path,
+  private void testOpen(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
       throws Exception {
     openVerifier
     openVerifier
@@ -667,7 +667,7 @@ public class TestDFSPermission extends TestCase {
   private SetReplicationPermissionVerifier replicatorVerifier =
   private SetReplicationPermissionVerifier replicatorVerifier =
     new SetReplicationPermissionVerifier();
     new SetReplicationPermissionVerifier();
   /* test if the permission checking of setReplication is correct */
   /* test if the permission checking of setReplication is correct */
-  private void testSetReplication(UnixUserGroupInformation ugi, Path path,
+  private void testSetReplication(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
       throws Exception {
     replicatorVerifier.set(path, ancestorPermission, parentPermission,
     replicatorVerifier.set(path, ancestorPermission, parentPermission,
@@ -695,7 +695,7 @@ public class TestDFSPermission extends TestCase {
   private SetTimesPermissionVerifier timesVerifier =
   private SetTimesPermissionVerifier timesVerifier =
     new SetTimesPermissionVerifier();
     new SetTimesPermissionVerifier();
   /* test if the permission checking of setReplication is correct */
   /* test if the permission checking of setReplication is correct */
-  private void testSetTimes(UnixUserGroupInformation ugi, Path path,
+  private void testSetTimes(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
       throws Exception {
     timesVerifier.set(path, ancestorPermission, parentPermission,
     timesVerifier.set(path, ancestorPermission, parentPermission,
@@ -750,7 +750,7 @@ public class TestDFSPermission extends TestCase {
   private StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier();
   private StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier();
   /* test if the permission checking of isDirectory, exist,
   /* test if the permission checking of isDirectory, exist,
    * getFileInfo, getContentSummary is correct */
    * getFileInfo, getContentSummary is correct */
-  private void testStats(UnixUserGroupInformation ugi, Path path,
+  private void testStats(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission) throws Exception {
       short ancestorPermission, short parentPermission) throws Exception {
     statsVerifier.set(path, OpType.GET_FILEINFO, ancestorPermission,
     statsVerifier.set(path, OpType.GET_FILEINFO, ancestorPermission,
         parentPermission);
         parentPermission);
@@ -809,7 +809,7 @@ public class TestDFSPermission extends TestCase {
 
 
   ListPermissionVerifier listVerifier = new ListPermissionVerifier();
   ListPermissionVerifier listVerifier = new ListPermissionVerifier();
   /* test if the permission checking of list is correct */
   /* test if the permission checking of list is correct */
-  private void testList(UnixUserGroupInformation ugi, Path file, Path dir,
+  private void testList(UserGroupInformation ugi, Path file, Path dir,
       short ancestorPermission, short parentPermission, short filePermission)
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
       throws Exception {
     listVerifier.set(file, InodeType.FILE, ancestorPermission,
     listVerifier.set(file, InodeType.FILE, ancestorPermission,
@@ -864,7 +864,7 @@ public class TestDFSPermission extends TestCase {
 
 
   RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier();
   RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier();
   /* test if the permission checking of rename is correct */
   /* test if the permission checking of rename is correct */
-  private void testRename(UnixUserGroupInformation ugi, Path src, Path dst,
+  private void testRename(UserGroupInformation ugi, Path src, Path dst,
       short srcAncestorPermission, short srcParentPermission,
       short srcAncestorPermission, short srcParentPermission,
       short dstAncestorPermission, short dstParentPermission) throws Exception {
       short dstAncestorPermission, short dstParentPermission) throws Exception {
     renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
     renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
@@ -928,7 +928,7 @@ public class TestDFSPermission extends TestCase {
     new DeletePermissionVerifier();
     new DeletePermissionVerifier();
 
 
   /* test if the permission checking of file deletion is correct */
   /* test if the permission checking of file deletion is correct */
-  private void testDeleteFile(UnixUserGroupInformation ugi, Path file,
+  private void testDeleteFile(UserGroupInformation ugi, Path file,
       short ancestorPermission, short parentPermission) throws Exception {
       short ancestorPermission, short parentPermission) throws Exception {
     fileDeletionVerifier.set(file, ancestorPermission, parentPermission);
     fileDeletionVerifier.set(file, ancestorPermission, parentPermission);
     fileDeletionVerifier.verifyPermission(ugi);
     fileDeletionVerifier.verifyPermission(ugi);
@@ -938,7 +938,7 @@ public class TestDFSPermission extends TestCase {
     new DeleteDirPermissionVerifier();
     new DeleteDirPermissionVerifier();
 
 
   /* test if the permission checking of directory deletion is correct */
   /* test if the permission checking of directory deletion is correct */
-  private void testDeleteDir(UnixUserGroupInformation ugi, Path path,
+  private void testDeleteDir(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short permission,
       short ancestorPermission, short parentPermission, short permission,
       short[] childPermissions) throws Exception {
       short[] childPermissions) throws Exception {
     dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
     dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
@@ -948,13 +948,13 @@ public class TestDFSPermission extends TestCase {
   }
   }
 
 
   /* log into dfs as the given user */
   /* log into dfs as the given user */
-  private void login(UnixUserGroupInformation ugi) throws IOException {
+  private void login(UserGroupInformation ugi) throws IOException,
+      InterruptedException {
     if (fs != null) {
     if (fs != null) {
       fs.close();
       fs.close();
     }
     }
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-    fs = FileSystem.get(conf); // login as ugi
+
+    fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   }
   }
 
 
   /* test non-existent file */
   /* test non-existent file */

+ 33 - 25
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -25,6 +25,7 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.security.Permission;
 import java.security.Permission;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.List;
 import java.util.List;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
@@ -1121,33 +1121,38 @@ public class TestDFSShell extends TestCase {
   }
   }
 
 
   public void testRemoteException() throws Exception {
   public void testRemoteException() throws Exception {
-    UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation("tmpname",
-        new String[] {
-        "mygroup"});
+    UserGroupInformation tmpUGI = 
+      UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
     MiniDFSCluster dfs = null;
     MiniDFSCluster dfs = null;
     PrintStream bak = null;
     PrintStream bak = null;
     try {
     try {
-      Configuration conf = new HdfsConfiguration();
+      final Configuration conf = new HdfsConfiguration();
       dfs = new MiniDFSCluster(conf, 2, true, null);
       dfs = new MiniDFSCluster(conf, 2, true, null);
       FileSystem fs = dfs.getFileSystem();
       FileSystem fs = dfs.getFileSystem();
       Path p = new Path("/foo");
       Path p = new Path("/foo");
       fs.mkdirs(p);
       fs.mkdirs(p);
       fs.setPermission(p, new FsPermission((short)0700));
       fs.setPermission(p, new FsPermission((short)0700));
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
-      FsShell fshell = new FsShell(conf);
       bak = System.err;
       bak = System.err;
-      ByteArrayOutputStream out = new ByteArrayOutputStream();
-      PrintStream tmp = new PrintStream(out);
-      System.setErr(tmp);
-      String[] args = new String[2];
-      args[0] = "-ls";
-      args[1] = "/foo";
-      int ret = ToolRunner.run(fshell, args);
-      assertTrue("returned should be -1", (ret == -1));
-      String str = out.toString();
-      assertTrue("permission denied printed", str.indexOf("Permission denied") != -1);
-      out.reset();
+      
+      tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          FsShell fshell = new FsShell(conf);
+          ByteArrayOutputStream out = new ByteArrayOutputStream();
+          PrintStream tmp = new PrintStream(out);
+          System.setErr(tmp);
+          String[] args = new String[2];
+          args[0] = "-ls";
+          args[1] = "/foo";
+          int ret = ToolRunner.run(fshell, args);
+          assertEquals("returned should be -1", -1, ret);
+          String str = out.toString();
+          assertTrue("permission denied printed", 
+                     str.indexOf("Permission denied") != -1);
+          out.reset();           
+          return null;
+        }
+      });
     } finally {
     } finally {
       if (bak != null) {
       if (bak != null) {
         System.setErr(bak);
         System.setErr(bak);
@@ -1218,7 +1223,7 @@ public class TestDFSShell extends TestCase {
   }
   }
 
 
   public void testLsr() throws Exception {
   public void testLsr() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
 
@@ -1231,13 +1236,16 @@ public class TestDFSShell extends TestCase {
       final Path sub = new Path(root, "sub");
       final Path sub = new Path(root, "sub");
       dfs.setPermission(sub, new FsPermission((short)0));
       dfs.setPermission(sub, new FsPermission((short)0));
 
 
-      final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+      final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
       final String tmpusername = ugi.getUserName() + "1";
       final String tmpusername = ugi.getUserName() + "1";
-      UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation(
+      UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
           tmpusername, new String[] {tmpusername});
           tmpusername, new String[] {tmpusername});
-      UnixUserGroupInformation.saveToConf(conf,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
-      String results = runLsr(new FsShell(conf), root, -1);
+      String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
+        @Override
+        public String run() throws Exception {
+          return runLsr(new FsShell(conf), root, -1);
+        }
+      });
       assertTrue(results.contains("zzz"));
       assertTrue(results.contains("zzz"));
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 7 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -80,7 +79,7 @@ public class TestFileAppend2 extends TestCase {
    * @throws IOException an exception might be thrown
    * @throws IOException an exception might be thrown
    */ 
    */ 
   public void testSimpleAppend() throws IOException {
   public void testSimpleAppend() throws IOException {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     }
@@ -153,16 +152,16 @@ public class TestFileAppend2 extends TestCase {
         fs.close();
         fs.close();
 
 
         // login as a different user
         // login as a different user
-        final UserGroupInformation superuser = UserGroupInformation.getCurrentUGI();
+        final UserGroupInformation superuser = 
+          UserGroupInformation.getCurrentUser();
         String username = "testappenduser";
         String username = "testappenduser";
         String group = "testappendgroup";
         String group = "testappendgroup";
         assertFalse(superuser.getUserName().equals(username));
         assertFalse(superuser.getUserName().equals(username));
         assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
         assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
-        UnixUserGroupInformation appenduser = UnixUserGroupInformation.createImmutable(
-            new String[]{username, group});
-        UnixUserGroupInformation.saveToConf(conf,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME, appenduser);
-        fs = FileSystem.get(conf);
+        UserGroupInformation appenduser = 
+          UserGroupInformation.createUserForTesting(username, new String[]{group});
+        
+        fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
 
 
         // create a file
         // create a file
         Path dir = new Path(root, getClass().getSimpleName());
         Path dir = new Path(root, getClass().getSimpleName());

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -569,7 +569,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   /**
   /**
    * Test that all open files are closed when client dies abnormally.
    * Test that all open files are closed when client dies abnormally.
    */
    */
-  public void testDFSClientDeath() throws IOException {
+  public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
     System.out.println("Testing adbornal client death.");
     if (simulatedStorage) {
     if (simulatedStorage) {

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 /**
 /**
@@ -99,7 +99,7 @@ public class TestGetBlocks extends TestCase {
           cluster.getNameNodePort());
           cluster.getNameNodePort());
       NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
       NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
           NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
           NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
-          UnixUserGroupInformation.login(CONF), CONF,
+          UserGroupInformation.getCurrentUser(), CONF,
           NetUtils.getDefaultSocketFactory(CONF));
           NetUtils.getDefaultSocketFactory(CONF));
 
 
       // get blocks of size fileLen from dataNodes[0]
       // get blocks of size fileLen from dataNodes[0]

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 
 public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
 public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   
   
@@ -33,7 +33,7 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
     cluster = new MiniDFSCluster(conf, 2, true, null);
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 
     defaultWorkingDirectory = "/user/" + 
-           UnixUserGroupInformation.login().getUserName();
+           UserGroupInformation.getCurrentUser().getUserName();
   }
   }
   
   
   @Override
   @Override

+ 5 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 
 
@@ -103,11 +102,11 @@ public class TestLeaseRecovery2 extends junit.framework.TestCase {
       // try to re-open the file before closing the previous handle. This
       // try to re-open the file before closing the previous handle. This
       // should fail but will trigger lease recovery.
       // should fail but will trigger lease recovery.
       {
       {
-        Configuration conf2 = new HdfsConfiguration(conf);
-        UnixUserGroupInformation.saveToConf(conf2,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME,
-            new UnixUserGroupInformation(fakeUsername, new String[]{fakeGroup}));
-        FileSystem dfs2 = FileSystem.get(conf2);
+        UserGroupInformation ugi = 
+          UserGroupInformation.createUserForTesting(fakeUsername, 
+                                                    new String [] { fakeGroup});
+        
+        FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
   
   
         boolean done = false;
         boolean done = false;
         for(int i = 0; i < 10 && !done; i++) {
         for(int i = 0; i < 10 && !done; i++) {

+ 27 - 13
src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
@@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 
 
@@ -242,18 +243,31 @@ public class TestQuota extends TestCase {
                  (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
                  (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
       
       
       // 17:  setQuota by a non-administrator
       // 17:  setQuota by a non-administrator
-      UnixUserGroupInformation.saveToConf(conf, 
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, 
-          new UnixUserGroupInformation(new String[]{"userxx\n", "groupyy\n"}));
-      DFSAdmin userAdmin = new DFSAdmin(conf);
-      args[1] = "100";
-      runCommand(userAdmin, args, true);
-      runCommand(userAdmin, true, "-setSpaceQuota", "1g", args[2]);
-      
-      // 18: clrQuota by a non-administrator
-      args = new String[] {"-clrQuota", parent.toString()};
-      runCommand(userAdmin, args, true);
-      runCommand(userAdmin, true, "-clrSpaceQuota",  args[1]);      
+      final String username = "userxx";
+      UserGroupInformation ugi = 
+        UserGroupInformation.createUserForTesting(username, 
+                                                  new String[]{"groupyy"});
+      
+      final String[] args2 = args.clone(); // need final ref for doAs block
+      ugi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          assertEquals("Not running as new user", username, 
+              UserGroupInformation.getCurrentUser().getUserName());
+          DFSAdmin userAdmin = new DFSAdmin(conf);
+          
+          args2[1] = "100";
+          runCommand(userAdmin, args2, true);
+          runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
+          
+          // 18: clrQuota by a non-administrator
+          String[] args3 = new String[] {"-clrQuota", parent.toString()};
+          runCommand(userAdmin, args3, true);
+          runCommand(userAdmin, true, "-clrSpaceQuota",  args3[1]); 
+          
+          return null;
+        }
+      });
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }

+ 10 - 9
src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
 
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -29,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -123,16 +123,17 @@ public class TestReadWhileWriting {
 
 
   static private int userCount = 0;
   static private int userCount = 0;
   //check the file
   //check the file
-  static void checkFile(Path p, int expectedsize, Configuration conf
-      ) throws IOException {
+  static void checkFile(Path p, int expectedsize, final Configuration conf
+      ) throws IOException, InterruptedException {
     //open the file with another user account
     //open the file with another user account
-    final Configuration conf2 = new HdfsConfiguration(conf);
-    final String username = UserGroupInformation.getCurrentUGI().getUserName()
+    final String username = UserGroupInformation.getCurrentUser().getUserName()
         + "_" + ++userCount;
         + "_" + ++userCount;
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
-    final FileSystem fs = FileSystem.get(conf2);
+
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
+                                 new String[] {"supergroup"});
+    
+    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
+    
     final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
     final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
 
 
     //Check visible length
     //Check visible length

+ 0 - 7
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
@@ -86,13 +85,8 @@ public class NNThroughputBenchmark {
   static Configuration config;
   static Configuration config;
   static NameNode nameNode;
   static NameNode nameNode;
 
 
-  private final UserGroupInformation ugi;
-
   NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
   NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
     config = conf;
     config = conf;
-    ugi = UnixUserGroupInformation.login(config);
-    UserGroupInformation.setCurrentUser(ugi);
-
     // We do not need many handlers, since each thread simulates a handler
     // We do not need many handlers, since each thread simulates a handler
     // by calling name-node methods directly
     // by calling name-node methods directly
     config.setInt("dfs.namenode.handler.count", 1);
     config.setInt("dfs.namenode.handler.count", 1);
@@ -341,7 +335,6 @@ public class NNThroughputBenchmark {
     }
     }
 
 
     public void run() {
     public void run() {
-      UserGroupInformation.setCurrentUser(ugi);
       localNumOpsExecuted = 0;
       localNumOpsExecuted = 0;
       localCumulativeTime = 0;
       localCumulativeTime = 0;
       arg1 = statsOp.getExecutionArgument(daemonId);
       arg1 = statsOp.getExecutionArgument(daemonId);

+ 30 - 13
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -25,6 +25,7 @@ import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
+import java.security.PrivilegedExceptionAction;
 import java.util.Random;
 import java.util.Random;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 
 
@@ -131,24 +133,39 @@ public class TestFsck extends TestCase {
 
 
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
+      // Create a cluster with the current user, write some files
       cluster = new MiniDFSCluster(conf, 4, true, null);
       cluster = new MiniDFSCluster(conf, 4, true, null);
-
-      final FileSystem fs = cluster.getFileSystem();
+      final MiniDFSCluster c2 = cluster;
       final String dir = "/dfsck";
       final String dir = "/dfsck";
       final Path dirpath = new Path(dir);
       final Path dirpath = new Path(dir);
-      util.createFiles(fs, dir);
-      util.waitReplication(fs, dir, (short)3);
-      fs.setPermission(dirpath, new FsPermission((short)0700));
+      final FileSystem fs = c2.getFileSystem();
 
 
-      //run DFSck as another user
-      final Configuration c2 = DFSTestUtil.getConfigurationWithDifferentUsername(conf);
-      System.out.println(runFsck(c2, -1, true, dir));
+      util.createFiles(fs, dir);
+      util.waitReplication(fs, dir, (short) 3);
+      fs.setPermission(dirpath, new FsPermission((short) 0700));
 
 
-      //set permission and try DFSck again
-      fs.setPermission(dirpath, new FsPermission((short)0777));
-      final String outStr = runFsck(c2, 0, true, dir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      // run DFSck as another user, should fail with permission issue
+      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+          "ProbablyNotARealUserName", new String[] { "ShangriLa" });
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          System.out.println(runFsck(conf, -1, true, dir));
+          return null;
+        }
+      });
+      
+      // set permission and try DFSck again as the fake user, should succeed
+      fs.setPermission(dirpath, new FsPermission((short) 0777));
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final String outStr = runFsck(conf, 0, true, dir);
+          System.out.println(outStr);
+          assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+          return null;
+        }
+      });
 
 
       util.cleanup(fs, dir);
       util.cleanup(fs, dir);
     } finally {
     } finally {

+ 5 - 17
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -102,7 +102,7 @@ public class TestHDFSConcat {
    * @throws IOException
    * @throws IOException
    */
    */
   @Test
   @Test
-  public void testConcat() throws IOException {
+  public void testConcat() throws IOException, InterruptedException {
     final int numFiles = 10;
     final int numFiles = 10;
     long fileLen = blockSize*3;
     long fileLen = blockSize*3;
     FileStatus fStatus;
     FileStatus fStatus;
@@ -142,9 +142,10 @@ public class TestHDFSConcat {
     }
     }
     
     
     // check permissions -try the operation with the "wrong" user
     // check permissions -try the operation with the "wrong" user
-    final UnixUserGroupInformation user1 = new UnixUserGroupInformation(
+    final UserGroupInformation user1 = UserGroupInformation.createUserForTesting(
         "theDoctor", new String[] { "tardis" });
         "theDoctor", new String[] { "tardis" });
-    DistributedFileSystem hdfs = (DistributedFileSystem)logonAs(user1, conf, dfs);
+    DistributedFileSystem hdfs = 
+      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1, conf);
     try {
     try {
       hdfs.concat(trgPath, files);
       hdfs.concat(trgPath, files);
       fail("Permission exception expected");
       fail("Permission exception expected");
@@ -239,19 +240,6 @@ public class TestHDFSConcat {
     assertFalse("File content of concatenated file is different", mismatch);
     assertFalse("File content of concatenated file is different", mismatch);
   }
   }
 
 
-  /***
-   * Create a new configuration for the specified user and return a filesystem
-   * accessed by that user
-   */
-  static private FileSystem logonAs(UnixUserGroupInformation user,
-      Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new Configuration(conf);
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
-
-    return FileSystem.get(conf2);
-  }
-
   // test case when final block is not of a full length
   // test case when final block is not of a full length
   @Test
   @Test
   public void testConcatNotCompleteBlock() throws IOException {
   public void testConcatNotCompleteBlock() throws IOException {

+ 6 - 3
src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java

@@ -48,6 +48,7 @@ public class TestGroupMappingServiceRefresh {
     
     
     @Override
     @Override
     public List<String> getGroups(String user) throws IOException {
     public List<String> getGroups(String user) throws IOException {
+      System.err.println("Getting groups in MockUnixGroupsMapping");
       String g1 = user + (10 * i + 1);
       String g1 = user + (10 * i + 1);
       String g2 = user + (10 * i + 2);
       String g2 = user + (10 * i + 2);
       List<String> l = new ArrayList<String>(2);
       List<String> l = new ArrayList<String>(2);
@@ -67,6 +68,7 @@ public class TestGroupMappingServiceRefresh {
     config.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
     config.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
         groupRefreshTimeoutSec);
         groupRefreshTimeoutSec);
     
     
+    Groups.getUserToGroupsMappingService(config);
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
     cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
     cluster.waitActive();
     cluster.waitActive();
@@ -83,8 +85,8 @@ public class TestGroupMappingServiceRefresh {
   public void testGroupMappingRefresh() throws Exception {
   public void testGroupMappingRefresh() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
     String [] args =  new String[]{"-refreshUserToGroupsMappings"};
     String [] args =  new String[]{"-refreshUserToGroupsMappings"};
-    Groups groups = SecurityUtil.getUserToGroupsMappingService(config);
-    String user = UnixUserGroupInformation.getUnixUserName();
+    Groups groups = Groups.getUserToGroupsMappingService(config);
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     System.out.println("first attempt:");
     System.out.println("first attempt:");
     List<String> g1 = groups.getGroups(user);
     List<String> g1 = groups.getGroups(user);
     String [] str_groups = new String [g1.size()];
     String [] str_groups = new String [g1.size()];
@@ -104,7 +106,8 @@ public class TestGroupMappingServiceRefresh {
     g3.toArray(str_groups);
     g3.toArray(str_groups);
     System.out.println(Arrays.toString(str_groups));
     System.out.println(Arrays.toString(str_groups));
     for(int i=0; i<g3.size(); i++) {
     for(int i=0; i<g3.size(); i++) {
-      assertFalse("Should be different group ", g1.get(i).equals(g3.get(i)));
+      assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i), 
+          g1.get(i).equals(g3.get(i)));
     }
     }
     
     
     // test time out
     // test time out

+ 6 - 12
src/test/hdfs/org/apache/hadoop/security/TestPermission.java

@@ -23,15 +23,14 @@ import java.util.Random;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -39,10 +38,6 @@ import junit.framework.TestCase;
 public class TestPermission extends TestCase {
 public class TestPermission extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestPermission.class);
   public static final Log LOG = LogFactory.getLog(TestPermission.class);
 
 
-  {
-    ((Log4JLogger)UserGroupInformation.LOG).getLogger().setLevel(Level.ALL);
-  }
-
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
@@ -120,7 +115,7 @@ public class TestPermission extends TestCase {
   }
   }
 
 
   public void testFilePermision() throws Exception {
   public void testFilePermision() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
     cluster.waitActive();
     cluster.waitActive();
@@ -163,11 +158,10 @@ public class TestPermission extends TestCase {
 
 
       ////////////////////////////////////////////////////////////////
       ////////////////////////////////////////////////////////////////
       // test illegal file/dir creation
       // test illegal file/dir creation
-      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
-          USER_NAME, GROUP_NAMES );
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo);
-      FileSystem userfs = FileSystem.get(conf);
+      UserGroupInformation userGroupInfo = 
+        UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
+      
+      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
 
 
       // make sure mkdir of a existing directory that is not owned by 
       // make sure mkdir of a existing directory that is not owned by 
       // this user does not throw an exception.
       // this user does not throw an exception.