Procházet zdrojové kódy

Remove parent's env vars from child processes

(cherry picked from commit ac8fb579c6058fec60caf30682f902413d68edf3)
Robert Kanter před 9 roky
rodič
revize
3c3d003402
9 změnil soubory, kde provedl 87 přidání a 13 odebrání
  1. 28 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  2. 42 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
  3. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
  4. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
  5. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
  6. 5 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
  7. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
  8. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
  9. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java

+ 28 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -362,6 +363,9 @@ public abstract class Shell {
   /** If or not script timed out*/
   private final AtomicBoolean timedOut = new AtomicBoolean(false);
 
+  /** Indicates if the parent env vars should be inherited or not*/
+  protected boolean inheritParentEnv = true;
+
   /**
    *  Centralized logic to discover and validate the sanity of the Hadoop
    *  home directory.
@@ -846,6 +850,20 @@ public abstract class Shell {
     if (environment != null) {
       builder.environment().putAll(this.environment);
     }
+
+    // Remove all env vars from the Builder to prevent leaking of env vars from
+    // the parent process.
+    if (!inheritParentEnv) {
+      // branch-2: Only do this for HADOOP_CREDSTORE_PASSWORD
+      // Sometimes daemons are configured to use the CredentialProvider feature
+      // and given their jceks password via an environment variable.  We need to
+      // make sure to remove it so it doesn't leak to child processes, which
+      // might be owned by a different user.  For example, the NodeManager
+      // running a User's container.
+      builder.environment().remove(
+          AbstractJavaKeyStoreProvider.CREDENTIAL_PASSWORD_NAME);
+    }
+
     if (dir != null) {
       builder.directory(this.dir);
     }
@@ -1073,6 +1091,11 @@ public abstract class Shell {
       this(execString, dir, env , 0L);
     }
 
+    public ShellCommandExecutor(String[] execString, File dir,
+                                Map<String, String> env, long timeout) {
+      this(execString, dir, env , timeout, true);
+    }
+
     /**
      * Create a new instance of the ShellCommandExecutor to execute a command.
      * 
@@ -1085,10 +1108,12 @@ public abstract class Shell {
      *            environment is not modified.
      * @param timeout Specifies the time in milliseconds, after which the
      *                command will be killed and the status marked as timed-out.
-     *                If 0, the command will not be timed out. 
+     *                If 0, the command will not be timed out.
+     * @param inheritParentEnv Indicates if the process should inherit the env
+     *                         vars from the parent process or not.
      */
     public ShellCommandExecutor(String[] execString, File dir, 
-        Map<String, String> env, long timeout) {
+        Map<String, String> env, long timeout, boolean inheritParentEnv) {
       command = execString.clone();
       if (dir != null) {
         setWorkingDirectory(dir);
@@ -1097,6 +1122,7 @@ public abstract class Shell {
         setEnvironment(env);
       }
       timeOutInterval = timeout;
+      this.inheritParentEnv = inheritParentEnv;
     }
 
     /**

+ 42 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.util;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
 import org.junit.Assert;
 
 import java.io.BufferedReader;
@@ -29,6 +30,9 @@ import java.io.PrintWriter;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.hadoop.fs.FileUtil;
 import static org.apache.hadoop.util.Shell.*;
@@ -143,6 +147,44 @@ public class TestShell extends Assert {
     shellFile.delete();
     assertTrue("Script did not timeout" , shexc.isTimedOut());
   }
+
+  @Test
+  public void testEnvVarsWithInheritance() throws Exception {
+    Assume.assumeFalse(WINDOWS);
+    testEnvHelper(true);
+  }
+
+  @Test
+  public void testEnvVarsWithoutInheritance() throws Exception {
+    Assume.assumeFalse(WINDOWS);
+    testEnvHelper(false);
+  }
+
+  private void testEnvHelper(boolean inheritParentEnv) throws Exception {
+    Map<String, String> customEnv = Collections.singletonMap(
+        AbstractJavaKeyStoreProvider.CREDENTIAL_PASSWORD_NAME, "foo");
+    Shell.ShellCommandExecutor command = new ShellCommandExecutor(
+        new String[]{"env"}, null, customEnv, 0L,
+        inheritParentEnv);
+    command.execute();
+    String[] varsArr = command.getOutput().split("\n");
+    Map<String, String> vars = new HashMap<>();
+    for (String var : varsArr) {
+      int eqIndex = var.indexOf('=');
+      vars.put(var.substring(0, eqIndex), var.substring(eqIndex + 1));
+    }
+    Map<String, String> expectedEnv = new HashMap<>();
+    expectedEnv.putAll(System.getenv());
+    if (inheritParentEnv) {
+      expectedEnv.putAll(customEnv);
+    } else {
+      assertFalse("child process environment should not have contained "
+              + AbstractJavaKeyStoreProvider.CREDENTIAL_PASSWORD_NAME,
+          vars.containsKey(
+              AbstractJavaKeyStoreProvider.CREDENTIAL_PASSWORD_NAME));
+    }
+    assertEquals(expectedEnv, vars);
+  }
   
   private static int countTimerThreads() {
     ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java

@@ -282,7 +282,9 @@ public class DefaultContainerExecutor extends ContainerExecutor {
       return new ShellCommandExecutor(
           command,
           wordDir,
-          environment); 
+          environment,
+          0L,
+          false);
   }
 
   protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder(

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java

@@ -284,7 +284,9 @@ public class DockerContainerExecutor extends ContainerExecutor {
       shExec = new ShellCommandExecutor(
         command,
         new File(containerWorkDir.toUri().getPath()),
-        container.getLaunchContext().getEnvironment());      // sanitized env
+        container.getLaunchContext().getEnvironment(),      // sanitized env
+        0L,
+        false);
       if (isContainerActive(containerId)) {
         shExec.execute();
       } else {

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java

@@ -262,7 +262,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
           PrivilegedOperationExecutor.getInstance(conf);
 
       privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
-          initializeContainerOp, null, null, false);
+          initializeContainerOp, null, null, false, true);
 
     } catch (PrivilegedOperationException e) {
       int exitCode = e.getExitCode();

+ 5 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java

@@ -133,18 +133,19 @@ public class PrivilegedOperationExecutor {
    * @param workingDir     (optional) working directory for execution
    * @param env            (optional) env of the command will include specified vars
    * @param grabOutput     return (possibly large) shell command output
+   * @param inheritParentEnv inherit the env vars from the parent process
    * @return stdout contents from shell executor - useful for some privileged
    * operations - e.g --tc_read
    * @throws org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException
    */
   public String executePrivilegedOperation(List<String> prefixCommands,
       PrivilegedOperation operation, File workingDir,
-      Map<String, String> env, boolean grabOutput)
+      Map<String, String> env, boolean grabOutput, boolean inheritParentEnv)
       throws PrivilegedOperationException {
     String[] fullCommandArray = getPrivilegedOperationExecutionCommand
         (prefixCommands, operation);
     ShellCommandExecutor exec = new ShellCommandExecutor(fullCommandArray,
-        workingDir, env);
+        workingDir, env, 0L, inheritParentEnv);
 
     try {
       exec.execute();
@@ -199,7 +200,8 @@ public class PrivilegedOperationExecutor {
    */
   public String executePrivilegedOperation(PrivilegedOperation operation,
       boolean grabOutput) throws PrivilegedOperationException {
-    return executePrivilegedOperation(null, operation, null, null, grabOutput);
+    return executePrivilegedOperation(null, operation, null, null, grabOutput,
+        true);
   }
 
   //Utility functions for squashing together operations in supported ways

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java

@@ -102,7 +102,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
     try {
       privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
             launchOp, null, container.getLaunchContext().getEnvironment(),
-            false);
+            false, false);
     } catch (PrivilegedOperationException e) {
       LOG.warn("Launch container failed. Exception: ", e);
 
@@ -134,7 +134,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
 
       executor.executePrivilegedOperation(null,
           signalOp, null, container.getLaunchContext().getEnvironment(),
-          false);
+          false, true);
     } catch (PrivilegedOperationException e) {
       //Don't log the failure here. Some kinds of signaling failures are
       // acceptable. Let the calling executor decide what to do.

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java

@@ -311,7 +311,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     try {
       privilegedOperationExecutor.executePrivilegedOperation(null,
           launchOp, null, container.getLaunchContext().getEnvironment(),
-          false);
+          false, false);
     } catch (PrivilegedOperationException e) {
       LOG.warn("Launch container failed. Exception: ", e);
 
@@ -340,7 +340,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
       executor.executePrivilegedOperation(null,
           signalOp, null, container.getLaunchContext().getEnvironment(),
-          false);
+          false, true);
     } catch (PrivilegedOperationException e) {
       LOG.warn("Signal container failed. Exception: ", e);
 

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java

@@ -167,7 +167,7 @@ public class TestDockerContainerRuntime {
     // warning annotation on the entire method
     verify(mockExecutor, times(1))
         .executePrivilegedOperation(anyList(), opCaptor.capture(), any(
-            File.class), any(Map.class), eq(false));
+            File.class), any(Map.class), eq(false), eq(false));
 
     PrivilegedOperation op = opCaptor.getValue();