Browse Source

AMBARI-9828. Ability to handle envs where sudo is not available (aonishuk)

Andrew Onishuk 10 years ago
parent
commit
6076060d76
64 changed files with 337 additions and 247 deletions
  1. 8 9
      ambari-agent/conf/unix/ambari-agent
  2. 1 0
      ambari-agent/conf/unix/ambari-env.sh
  3. 52 0
      ambari-agent/conf/unix/ambari-sudo.sh
  4. 23 0
      ambari-agent/pom.xml
  5. 2 1
      ambari-agent/src/main/python/ambari_agent/Hardware.py
  6. 2 1
      ambari-agent/src/main/python/ambari_agent/HostCleanup.py
  7. 3 2
      ambari-agent/src/main/python/ambari_agent/main.py
  8. 3 3
      ambari-agent/src/test/python/ambari_agent/TestMain.py
  9. 2 2
      ambari-agent/src/test/python/resource_management/TestCopyFromLocal.py
  10. 1 1
      ambari-agent/src/test/python/resource_management/TestExecuteResource.py
  11. 3 3
      ambari-agent/src/test/python/resource_management/TestGroupResource.py
  12. 4 4
      ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py
  13. 10 10
      ambari-agent/src/test/python/resource_management/TestUserResource.py
  14. 0 0
      ambari-agent/src/test/python/tmp_hostcheck.result
  15. 1 0
      ambari-common/src/main/python/ambari_commons/constants.py
  16. 14 8
      ambari-common/src/main/python/resource_management/core/shell.py
  17. 1 0
      ambari-server/conf/unix/ambari-env.sh
  18. 7 7
      ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh
  19. 1 1
      ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
  20. 1 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
  21. 2 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
  22. 17 23
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
  23. 5 5
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh
  24. 3 3
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh
  25. 5 5
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh
  26. 2 2
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py
  27. 2 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
  28. 15 15
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh
  29. 3 3
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
  30. 2 0
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py
  31. 2 0
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py
  32. 2 2
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service.py
  33. 1 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
  34. 1 1
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh
  35. 7 7
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh
  36. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  37. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
  38. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
  39. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
  40. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
  41. 3 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
  42. 2 2
      ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
  43. 2 2
      ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
  44. 2 2
      ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
  45. 2 2
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  46. 2 2
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
  47. 8 8
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
  48. 4 4
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
  49. 37 39
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  50. 1 1
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
  51. 4 4
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
  52. 6 6
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
  53. 4 4
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
  54. 4 4
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  55. 4 4
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
  56. 1 1
      ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
  57. 4 4
      ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
  58. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
  59. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
  60. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
  61. 8 8
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
  62. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
  63. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
  64. 2 2
      ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py

+ 8 - 9
ambari-agent/conf/unix/ambari-agent

@@ -48,20 +48,19 @@ AMBARI_AGENT_PY_SCRIPT=/usr/lib/python2.6/site-packages/ambari_agent/AmbariAgent
 OK=1
 OK=1
 NOTOK=0
 NOTOK=0
 
 
+current_user=`awk -v val=$EUID -F ":" '$3==val{print $1}' /etc/passwd`
 echo "" | sudo -S -l > /dev/null 2>&1
 echo "" | sudo -S -l > /dev/null 2>&1
-if [ "$?" != "0" ]; then
+if [ "$?" != "0" ] && [ "$current_user" != "root" ] ; then
  echo "You can't perform this operation as non-sudoer user. Please, re-login as one"
  echo "You can't perform this operation as non-sudoer user. Please, re-login as one"
  exit 0
  exit 0
 fi
 fi
 
 
-current_user=`awk -v val=$EUID -F ":" '$3==val{print $1}' /etc/passwd`
-
 change_files_permissions() {
 change_files_permissions() {
-	sudo chown -R $current_user "/var/run/ambari-agent"
-	sudo chown -R $current_user "/var/log/ambari-agent"
-	sudo chown -R $current_user "/var/lib/ambari-agent/data"
-	sudo chown -R $current_user "/var/lib/ambari-agent/cache"
-	sudo chown 	  $current_user "/usr/lib/ambari-agent"
+	ambari-sudo.sh chown -R $current_user "/var/run/ambari-agent"
+	ambari-sudo.sh chown -R $current_user "/var/log/ambari-agent"
+	ambari-sudo.sh chown -R $current_user "/var/lib/ambari-agent/data"
+	ambari-sudo.sh chown -R $current_user "/var/lib/ambari-agent/cache"
+	ambari-sudo.sh chown 	  $current_user "/usr/lib/ambari-agent"
 }
 }
 
 
 if [ -a /usr/bin/python2.7 ] && [ -z "$PYTHON" ]; then
 if [ -a /usr/bin/python2.7 ] && [ -z "$PYTHON" ]; then
@@ -85,7 +84,7 @@ fi
 
 
 # Reading the environment file
 # Reading the environment file
 if [ -a /var/lib/ambari-agent/ambari-env.sh ]; then
 if [ -a /var/lib/ambari-agent/ambari-env.sh ]; then
-  sudo chown -R $current_user "/var/lib/ambari-agent/ambari-env.sh"
+  /var/lib/ambari-agent/ambari-sudo.sh chown -R $current_user "/var/lib/ambari-agent/ambari-env.sh"
   . /var/lib/ambari-agent/ambari-env.sh
   . /var/lib/ambari-agent/ambari-env.sh
 fi
 fi
 
 

+ 1 - 0
ambari-agent/conf/unix/ambari-env.sh

@@ -16,3 +16,4 @@
 # To change a passphrase used by the agent adjust the line below. This value is used when no passphrase is
 # To change a passphrase used by the agent adjust the line below. This value is used when no passphrase is
 # given through environment variable
 # given through environment variable
 AMBARI_PASSPHRASE="DEV"
 AMBARI_PASSPHRASE="DEV"
+export PATH=$PATH:/var/lib/ambari-agent

+ 52 - 0
ambari-agent/conf/unix/ambari-sudo.sh

@@ -0,0 +1,52 @@
+#/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+
+SUDO_BINARY="/usr/bin/sudo"
+
+if [[ $# -eq 0 ]] ; then
+  echo 'usage: ambari-sudo.sh [sudo_arg1, sudo_arg2 ...] command [arg1, arg2 ...]'
+  exit 1
+fi
+
+$SUDO_BINARY -S -l 2>/dev/null 1>/dev/null
+
+if [ $? == 0 ] ; then
+  $SUDO_BINARY "$@"
+else
+  ENV=()
+  SUDO_ARGS=()
+
+  for i ; do
+    if [[ "$i" == *"="* ]] ; then
+      ENV+=("$i")
+      shift
+    elif [[ "$i" == "-"* ]] ; then
+      SUDO_ARGS+=("$i")
+      shift
+    else
+      break
+    fi
+  done
+  
+  #echo "sudo arguments: ${SUDO_ARGS[@]}"
+  #echo "env: ${ENV[@]}"
+  #echo "args: $@"
+
+  if [ "$ENV" ] ; then
+    export "${ENV[@]}"
+  fi
+
+  "$@"
+fi

+ 23 - 0
ambari-agent/pom.xml

@@ -237,6 +237,18 @@
                 </source>
                 </source>
               </sources>
               </sources>
             </mapping>
             </mapping>
+            <mapping>
+              <directory>/var/lib/ambari-agent/</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <directoryIncluded>false</directoryIncluded>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-sudo.sh</location>
+                </source>
+              </sources>
+            </mapping>
             <mapping>
             <mapping>
               <directory>${ambari_commons.install.dir}</directory>
               <directory>${ambari_commons.install.dir}</directory>
               <username>root</username>
               <username>root</username>
@@ -557,6 +569,17 @@
                 <filemode>755</filemode>
                 <filemode>755</filemode>
               </mapper>
               </mapper>
             </data>
             </data>
+            <data>
+              <src>conf/unix/ambari-sudo.sh</src>
+              <type>file</type>
+              <mapper>
+                <type>perm</type>
+                <prefix>/var/lib/ambari-agent</prefix>
+                <user>root</user>
+                <group>root</group>
+                <filemode>755</filemode>
+              </mapper>
+            </data>
             <data>
             <data>
               <src>${target.cache.dir}</src>
               <src>${target.cache.dir}</src>
               <type>directory</type>
               <type>directory</type>

+ 2 - 1
ambari-agent/src/main/python/ambari_agent/Hardware.py

@@ -22,6 +22,7 @@ import os.path
 import logging
 import logging
 import subprocess
 import subprocess
 import platform
 import platform
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from ambari_commons.shell import shellRunner
 from ambari_commons.shell import shellRunner
 from Facter import Facter
 from Facter import Facter
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_check import OSConst, OSCheck
@@ -86,7 +87,7 @@ class Hardware:
 
 
   @staticmethod
   @staticmethod
   def _chk_mount(mountpoint):
   def _chk_mount(mountpoint):
-    if subprocess.call("sudo test -w '{0}'".format(mountpoint), shell=True) == 0:
+    if subprocess.call("{0} test -w '{1}'".format(AMBARI_SUDO_BINARY, mountpoint), shell=True) == 0:
       return True
       return True
     else:
     else:
       return False
       return False

+ 2 - 1
ambari-agent/src/main/python/ambari_agent/HostCleanup.py

@@ -36,6 +36,7 @@ import shlex
 import datetime
 import datetime
 from AmbariConfig import AmbariConfig
 from AmbariConfig import AmbariConfig
 from ambari_commons import OSCheck, OSConst
 from ambari_commons import OSCheck, OSConst
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
@@ -476,7 +477,7 @@ class HostCleanup:
   # Run command as sudoer by default, if root no issues
   # Run command as sudoer by default, if root no issues
   def run_os_command(self, cmd, runWithSudo=True):
   def run_os_command(self, cmd, runWithSudo=True):
     if runWithSudo:
     if runWithSudo:
-      cmd = 'sudo ' + cmd
+      cmd = AMBARI_SUDO_BINARY + cmd
     logger.info('Executing command: ' + str(cmd))
     logger.info('Executing command: ' + str(cmd))
     if type(cmd) == str:
     if type(cmd) == str:
       cmd = shlex.split(cmd)
       cmd = shlex.split(cmd)

+ 3 - 2
ambari-agent/src/main/python/ambari_agent/main.py

@@ -41,6 +41,7 @@ from ambari_commons.shell import shellRunner
 from ambari_commons import shell
 from ambari_commons import shell
 import HeartbeatHandlers
 import HeartbeatHandlers
 from HeartbeatHandlers import bind_signal_handlers
 from HeartbeatHandlers import bind_signal_handlers
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
 formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
 formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
@@ -153,7 +154,7 @@ def stop_agent():
     pid = int(pid)
     pid = int(pid)
     f.close()
     f.close()
     runner = shellRunner()
     runner = shellRunner()
-    runner.run(['sudo', 'kill', '-15', str(pid)])
+    runner.run([AMBARI_SUDO_BINARY, 'kill', '-15', str(pid)])
     time.sleep(5)
     time.sleep(5)
     if os.path.exists(ProcessHelper.pidfile):
     if os.path.exists(ProcessHelper.pidfile):
       raise Exception("PID file still exists.")
       raise Exception("PID file still exists.")
@@ -162,7 +163,7 @@ def stop_agent():
     if pid == -1:
     if pid == -1:
       print ("Agent process is not running")
       print ("Agent process is not running")
     else:
     else:
-      res = runner.run(['sudo', 'kill', '-9', str(pid)])
+      res = runner.run([AMBARI_SUDO_BINARY, 'kill', '-9', str(pid)])
       if res['exitCode'] != 0:
       if res['exitCode'] != 0:
         raise Exception("Error while performing agent stop. " + res['error'] + res['output'])
         raise Exception("Error while performing agent stop. " + res['error'] + res['output'])
     os._exit(1)
     os._exit(1)

+ 3 - 3
ambari-agent/src/test/python/ambari_agent/TestMain.py

@@ -219,7 +219,7 @@ class TestMain(unittest.TestCase):
     # Testing normal exit
     # Testing normal exit
     exists_mock.return_value = False
     exists_mock.return_value = False
     main.stop_agent()
     main.stop_agent()
-    kill_mock.assert_called_with(['sudo', 'kill', '-15', pid])
+    kill_mock.assert_called_with(['ambari-sudo.sh', 'kill', '-15', pid])
     _exit_mock.assert_called_with(0)
     _exit_mock.assert_called_with(0)
 
 
     # Restore
     # Restore
@@ -230,8 +230,8 @@ class TestMain(unittest.TestCase):
     # Testing exit when failed to remove pid file
     # Testing exit when failed to remove pid file
     exists_mock.return_value = True
     exists_mock.return_value = True
     main.stop_agent()
     main.stop_agent()
-    kill_mock.assert_any_call(['sudo', 'kill', '-15', pid])
-    kill_mock.assert_any_call(['sudo', 'kill', '-9', pid])
+    kill_mock.assert_any_call(['ambari-sudo.sh', 'kill', '-15', pid])
+    kill_mock.assert_any_call(['ambari-sudo.sh', 'kill', '-9', pid])
     _exit_mock.assert_called_with(1)
     _exit_mock.assert_called_with(1)
 
 
     # Restore
     # Restore

+ 2 - 2
ambari-agent/src/test/python/resource_management/TestCopyFromLocal.py

@@ -39,7 +39,7 @@ class TestCopyFromLocal(TestCase):
       self.assertEqual('fs -copyFromLocal /user/testdir/*.files /apps/test/',
       self.assertEqual('fs -copyFromLocal /user/testdir/*.files /apps/test/',
                        call_arg_list[0][0][0].command)
                        call_arg_list[0][0][0].command)
       print call_arg_list[0][0][0].arguments
       print call_arg_list[0][0][0].arguments
-      self.assertEquals({'not_if': "/usr/bin/sudo su user1 -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]PATH=$PATH:/usr/bin hadoop fs -ls /apps/test//*.files'", 'bin_dir': '/usr/bin', 'user': 'user1', 'conf_dir': '/etc/hadoop/conf'},
+      self.assertEquals({'not_if': "ambari-sudo.sh su user1 -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]PATH=$PATH:/usr/bin hadoop fs -ls /apps/test//*.files'", 'bin_dir': '/usr/bin', 'user': 'user1', 'conf_dir': '/etc/hadoop/conf'},
                         call_arg_list[0][0][0].arguments)
                         call_arg_list[0][0][0].arguments)
       self.assertEquals('fs -chown user1 /apps/test//*.files', call_arg_list[1][0][0].command)
       self.assertEquals('fs -chown user1 /apps/test//*.files', call_arg_list[1][0][0].command)
       self.assertEquals({'user': 'hdfs', 'bin_dir': '/usr/bin', 'conf_dir': '/etc/hadoop/conf'}, call_arg_list[1][0][0].arguments)
       self.assertEquals({'user': 'hdfs', 'bin_dir': '/usr/bin', 'conf_dir': '/etc/hadoop/conf'}, call_arg_list[1][0][0].arguments)
@@ -60,7 +60,7 @@ class TestCopyFromLocal(TestCase):
       call_arg_list = execute_hadoop_mock.call_args_list
       call_arg_list = execute_hadoop_mock.call_args_list
       self.assertEqual('fs -copyFromLocal /user/testdir/*.files /apps/test/',
       self.assertEqual('fs -copyFromLocal /user/testdir/*.files /apps/test/',
                        call_arg_list[0][0][0].command)
                        call_arg_list[0][0][0].command)
-      self.assertEquals({'not_if': "/usr/bin/sudo su user1 -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]PATH=$PATH:/usr/bin hadoop fs -ls /apps/test//*.files'", 'bin_dir': '/usr/bin', 'user': 'user1', 'conf_dir': '/etc/hadoop/conf'},
+      self.assertEquals({'not_if': "ambari-sudo.sh su user1 -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]PATH=$PATH:/usr/bin hadoop fs -ls /apps/test//*.files'", 'bin_dir': '/usr/bin', 'user': 'user1', 'conf_dir': '/etc/hadoop/conf'},
                         call_arg_list[0][0][0].arguments)
                         call_arg_list[0][0][0].arguments)
       self.assertEquals('fs -chown user1:hdfs /apps/test//*.files', call_arg_list[1][0][0].command)
       self.assertEquals('fs -chown user1:hdfs /apps/test//*.files', call_arg_list[1][0][0].command)
       self.assertEquals({'user': 'hdfs', 'bin_dir': '/usr/bin', 'conf_dir': '/etc/hadoop/conf'}, call_arg_list[1][0][0].arguments)
       self.assertEquals({'user': 'hdfs', 'bin_dir': '/usr/bin', 'conf_dir': '/etc/hadoop/conf'}, call_arg_list[1][0][0].arguments)

+ 1 - 1
ambari-agent/src/test/python/resource_management/TestExecuteResource.py

@@ -185,7 +185,7 @@ class TestExecuteResource(TestCase):
       )
       )
       
       
 
 
-    expected_command = ['/bin/bash', '--login', '--noprofile', '-c', '/usr/bin/sudo su test_user -l -s /bin/bash -c \'export  PATH=' + os.environ['PATH'] + ':/bin JAVA_HOME=/test/java/home ; echo "1"\'']
+    expected_command = ['/bin/bash', '--login', '--noprofile', '-c', 'ambari-sudo.sh su test_user -l -s /bin/bash -c \'export  PATH=' + os.environ['PATH'] + ':/bin JAVA_HOME=/test/java/home ; echo "1"\'']
     self.assertEqual(popen_mock.call_args_list[0][0][0], expected_command)
     self.assertEqual(popen_mock.call_args_list[0][0][0], expected_command)
 
 
 
 

+ 3 - 3
ambari-agent/src/test/python/resource_management/TestGroupResource.py

@@ -51,7 +51,7 @@ class TestGroupResource(TestCase):
     
     
 
 
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E groupadd -p secure hadoop"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E groupadd -p secure hadoop"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     getgrnam_mock.assert_called_with('hadoop')
     getgrnam_mock.assert_called_with('hadoop')
 
 
 
 
@@ -73,7 +73,7 @@ class TestGroupResource(TestCase):
     
     
 
 
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     getgrnam_mock.assert_called_with('mapred')
     getgrnam_mock.assert_called_with('mapred')
 
 
 
 
@@ -98,7 +98,7 @@ class TestGroupResource(TestCase):
     except Fail:
     except Fail:
       pass
       pass
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     getgrnam_mock.assert_called_with('mapred')
     getgrnam_mock.assert_called_with('mapred')
 
 
 
 

+ 4 - 4
ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py

@@ -32,9 +32,9 @@ class TestMonitorWebserverResource(TestCase):
       MonitorWebserverProvider(MonitorWebserver("start")).action_start()
       MonitorWebserverProvider(MonitorWebserver("start")).action_start()
     defined_resources = env.resource_list
     defined_resources = env.resource_list
     expected_resources = '[u"MonitorWebserver[\'start\']", u"Execute[\'grep -E \'KeepAlive (On|Off)\' ' \
     expected_resources = '[u"MonitorWebserver[\'start\']", u"Execute[\'grep -E \'KeepAlive (On|Off)\' ' \
-                         '/etc/httpd/conf/httpd.conf && /usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E sed -i ' \
+                         '/etc/httpd/conf/httpd.conf && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E sed -i ' \
                          '\'s/KeepAlive Off/KeepAlive On/\' /etc/httpd/conf/httpd.conf || echo \'KeepAlive On\' ' \
                          '\'s/KeepAlive Off/KeepAlive On/\' /etc/httpd/conf/httpd.conf || echo \'KeepAlive On\' ' \
-                         '| /usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E tee --append /etc/httpd/conf/httpd.conf > /dev/null\']"' \
+                         '| ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E tee --append /etc/httpd/conf/httpd.conf > /dev/null\']"' \
                          ', u"Execute[\'(\'/etc/init.d/httpd\', \'start\')\']"]'
                          ', u"Execute[\'(\'/etc/init.d/httpd\', \'start\')\']"]'
     self.assertEqual(str(defined_resources), expected_resources)
     self.assertEqual(str(defined_resources), expected_resources)
 
 
@@ -44,9 +44,9 @@ class TestMonitorWebserverResource(TestCase):
       MonitorWebserverProvider(MonitorWebserver("start")).action_start()
       MonitorWebserverProvider(MonitorWebserver("start")).action_start()
     defined_resources = env.resource_list
     defined_resources = env.resource_list
     expected_resources = '[u"MonitorWebserver[\'start\']", u"Execute[\'grep -E \'KeepAlive (On|Off)\' ' \
     expected_resources = '[u"MonitorWebserver[\'start\']", u"Execute[\'grep -E \'KeepAlive (On|Off)\' ' \
-                         '/etc/apache2/httpd.conf && /usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E sed -i ' \
+                         '/etc/apache2/httpd.conf && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E sed -i ' \
                          '\'s/KeepAlive Off/KeepAlive On/\' /etc/apache2/httpd.conf || echo \'KeepAlive On\' ' \
                          '\'s/KeepAlive Off/KeepAlive On/\' /etc/apache2/httpd.conf || echo \'KeepAlive On\' ' \
-                         '| /usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E tee --append /etc/apache2/httpd.conf > /dev/null\']",' \
+                         '| ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E tee --append /etc/apache2/httpd.conf > /dev/null\']",' \
                          ' u"Execute[\'(\'/etc/init.d/apache2\', \'start\')\']"]'
                          ' u"Execute[\'(\'/etc/init.d/apache2\', \'start\')\']"]'
     self.assertEqual(str(defined_resources), expected_resources)
     self.assertEqual(str(defined_resources), expected_resources)
 
 

+ 10 - 10
ambari-agent/src/test/python/resource_management/TestUserResource.py

@@ -44,7 +44,7 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create", shell = "/bin/bash")
       user = User("mapred", action = "create", shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E useradd -m -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, env={'PATH': '/bin'}, bufsize=1, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E useradd -m -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, env={'PATH': '/bin'}, bufsize=1, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -59,7 +59,7 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create", shell = "/bin/bash")
       user = User("mapred", action = "create", shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -90,7 +90,7 @@ class TestUserResource(TestCase):
       user = User("mapred", action = "create", comment = "testComment", 
       user = User("mapred", action = "create", comment = "testComment", 
           shell = "/bin/bash")
           shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -c testComment -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -c testComment -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -106,7 +106,7 @@ class TestUserResource(TestCase):
       user = User("mapred", action = "create", home = "/test/home", 
       user = User("mapred", action = "create", home = "/test/home", 
           shell = "/bin/bash")
           shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/bash -d /test/home mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash -d /test/home mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -122,7 +122,7 @@ class TestUserResource(TestCase):
       user = User("mapred", action = "create", password = "secure", 
       user = User("mapred", action = "create", password = "secure", 
           shell = "/bin/bash")    
           shell = "/bin/bash")    
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/bash -p secure mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash -p secure mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -137,7 +137,7 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create", shell = "/bin/sh")
       user = User("mapred", action = "create", shell = "/bin/sh")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/sh mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/sh mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -152,7 +152,7 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create", uid = "1", shell = "/bin/bash")
       user = User("mapred", action = "create", uid = "1", shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/bash -u 1 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash -u 1 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -167,7 +167,7 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create", gid = "1", shell = "/bin/bash")
       user = User("mapred", action = "create", gid = "1", shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -s /bin/bash -g 1 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash -g 1 mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -183,7 +183,7 @@ class TestUserResource(TestCase):
       user = User("mapred", action = "create", groups = ['1','2','3'], 
       user = User("mapred", action = "create", groups = ['1','2','3'], 
           shell = "/bin/bash")
           shell = "/bin/bash")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E usermod -G 1,2,3 -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -G 1,2,3 -s /bin/bash mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 
   @patch.object(subprocess, "Popen")
   @patch.object(subprocess, "Popen")
@@ -197,6 +197,6 @@ class TestUserResource(TestCase):
     with Environment('/') as env:
     with Environment('/') as env:
       user = User("mapred", action = "create")
       user = User("mapred", action = "create")
 
 
-    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "/usr/bin/sudo  PATH=/bin -H -E useradd -m mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
+    popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E useradd -m mapred"], shell=False, preexec_fn=None, stderr=-2, stdout=5, bufsize=1, env={'PATH': '/bin'}, cwd=None)
     self.assertEqual(popen_mock.call_count, 1)
     self.assertEqual(popen_mock.call_count, 1)
 
 

+ 0 - 0
ambari-agent/src/test/python/tmp_hostcheck.result


+ 1 - 0
ambari-common/src/main/python/ambari_commons/constants.py

@@ -0,0 +1 @@
+AMBARI_SUDO_BINARY = "ambari-sudo.sh"

+ 14 - 8
ambari-common/src/main/python/resource_management/core/shell.py

@@ -35,6 +35,7 @@ import traceback
 from exceptions import Fail
 from exceptions import Fail
 from exceptions import ExecuteTimeoutException
 from exceptions import ExecuteTimeoutException
 from resource_management.core.logger import Logger
 from resource_management.core.logger import Logger
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 
 # use quiet=True calls from this folder (logs get too messy duplicating the resources with its commands)
 # use quiet=True calls from this folder (logs get too messy duplicating the resources with its commands)
 RMF_FOLDER = 'resource_management/'
 RMF_FOLDER = 'resource_management/'
@@ -137,6 +138,7 @@ def _call(command, logoutput=None, throw_on_failure=True,
     command = command.replace(placeholder, replacement.format(env_str=env_str))
     command = command.replace(placeholder, replacement.format(env_str=env_str))
 
 
   master_fd, slave_fd = pty.openpty()
   master_fd, slave_fd = pty.openpty()
+  Logger.info(command) # TODO: remove this before commit
   # --noprofile is used to preserve PATH set for ambari-agent
   # --noprofile is used to preserve PATH set for ambari-agent
   subprocess_command = ["/bin/bash","--login","--noprofile","-c", command]
   subprocess_command = ["/bin/bash","--login","--noprofile","-c", command]
   proc = subprocess.Popen(subprocess_command, bufsize=1, stdout=slave_fd, stderr=subprocess.STDOUT,
   proc = subprocess.Popen(subprocess_command, bufsize=1, stdout=slave_fd, stderr=subprocess.STDOUT,
@@ -198,14 +200,14 @@ def _call(command, logoutput=None, throw_on_failure=True,
   
   
   return code, out
   return code, out
 
 
-def as_sudo(command, env=None):
+def as_sudo(command, env=None, auto_escape=True):
   """
   """
   command - list or tuple of arguments.
   command - list or tuple of arguments.
   env - when run as part of Execute resource, this SHOULD NOT be used.
   env - when run as part of Execute resource, this SHOULD NOT be used.
   It automatically gets replaced later by call, checked_call. This should be used in not_if, only_if
   It automatically gets replaced later by call, checked_call. This should be used in not_if, only_if
   """
   """
   if isinstance(command, (list, tuple)):
   if isinstance(command, (list, tuple)):
-    command = string_cmd_from_args_list(command)
+    command = string_cmd_from_args_list(command, auto_escape=auto_escape)
   else:
   else:
     # Since ambari user sudoer privileges may be restricted,
     # Since ambari user sudoer privileges may be restricted,
     # without having /bin/bash permission, and /bin/su permission.
     # without having /bin/bash permission, and /bin/su permission.
@@ -217,14 +219,14 @@ def as_sudo(command, env=None):
     raise Fail(err_msg)
     raise Fail(err_msg)
 
 
   env = _get_environment_str(_add_current_path_to_env(env)) if env else ENV_PLACEHOLDER
   env = _get_environment_str(_add_current_path_to_env(env)) if env else ENV_PLACEHOLDER
-  return "/usr/bin/sudo {0} -H -E {1}".format(env, command)
+  return "{0} {1} -H -E {2}".format(_get_sudo_binary(), env, command)
 
 
-def as_user(command, user, env=None):
+def as_user(command, user, env=None, auto_escape=True):
   if isinstance(command, (list, tuple)):
   if isinstance(command, (list, tuple)):
-    command = string_cmd_from_args_list(command)
+    command = string_cmd_from_args_list(command, auto_escape=auto_escape)
 
 
   export_env = "export {0} ; ".format(_get_environment_str(_add_current_path_to_env(env))) if env else EXPORT_PLACEHOLDER
   export_env = "export {0} ; ".format(_get_environment_str(_add_current_path_to_env(env))) if env else EXPORT_PLACEHOLDER
-  return "/usr/bin/sudo su {0} -l -s /bin/bash -c {1}".format(user, quote_bash_args(export_env + command))
+  return "{0} su {1} -l -s /bin/bash -c {2}".format(_get_sudo_binary(), user, quote_bash_args(export_env + command))
 
 
 def quote_bash_args(command):
 def quote_bash_args(command):
   if not command:
   if not command:
@@ -246,12 +248,16 @@ def _add_current_path_to_env(env):
     result['PATH'] = os.pathsep.join([os.environ['PATH'], result['PATH']])
     result['PATH'] = os.pathsep.join([os.environ['PATH'], result['PATH']])
   
   
   return result
   return result
+
+def _get_sudo_binary():
+  return AMBARI_SUDO_BINARY
   
   
 def _get_environment_str(env):
 def _get_environment_str(env):
   return reduce(lambda str,x: '{0} {1}={2}'.format(str,x,quote_bash_args(env[x])), env, '')
   return reduce(lambda str,x: '{0} {1}={2}'.format(str,x,quote_bash_args(env[x])), env, '')
 
 
-def string_cmd_from_args_list(command):
-  return ' '.join(quote_bash_args(x) for x in command)
+def string_cmd_from_args_list(command, auto_escape=True):
+  escape_func = lambda x:quote_bash_args(x) if auto_escape else lambda x:x
+  return ' '.join(escape_func(x) for x in command)
 
 
 def _on_timeout(proc, timeout_event):
 def _on_timeout(proc, timeout_event):
   timeout_event.set()
   timeout_event.set()

+ 1 - 0
ambari-server/conf/unix/ambari-env.sh

@@ -16,3 +16,4 @@
 
 
 AMBARI_PASSHPHRASE="DEV"
 AMBARI_PASSHPHRASE="DEV"
 export AMBARI_JVM_ARGS=$AMBARI_JVM_ARGS' -Xms512m -Xmx2048m -Djava.security.auth.login.config=/etc/ambari-server/conf/krb5JAASLogin.conf -Djava.security.krb5.conf=/etc/krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false'
 export AMBARI_JVM_ARGS=$AMBARI_JVM_ARGS' -Xms512m -Xmx2048m -Djava.security.auth.login.config=/etc/ambari-server/conf/krb5JAASLogin.conf -Djava.security.krb5.conf=/etc/krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false'
+export PATH=$PATH:/var/lib/ambari-agent

+ 7 - 7
ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh

@@ -47,7 +47,7 @@ function instantiateGmetadConf()
   source ./gmetadLib.sh;
   source ./gmetadLib.sh;
 
 
   generateGmetadConf > ${TMP_GANGLIA_FILE};
   generateGmetadConf > ${TMP_GANGLIA_FILE};
-  sudo -H -E cp ${TMP_GANGLIA_FILE} ${GMETAD_CONF_FILE}
+  /var/lib/ambari-agent/ambari-sudo.sh -H -E cp ${TMP_GANGLIA_FILE} ${GMETAD_CONF_FILE}
 }
 }
 
 
 function instantiateGmondConf()
 function instantiateGmondConf()
@@ -65,7 +65,7 @@ function instantiateGmondConf()
     
     
     # Always blindly generate the core gmond config - that goes on every box running gmond. 
     # Always blindly generate the core gmond config - that goes on every box running gmond. 
     generateGmondCoreConf ${gmondClusterName} > ${TMP_GANGLIA_FILE};
     generateGmondCoreConf ${gmondClusterName} > ${TMP_GANGLIA_FILE};
-    sudo -H -E cp ${TMP_GANGLIA_FILE} `getGmondCoreConfFileName ${gmondClusterName}`;
+    /var/lib/ambari-agent/ambari-sudo.sh -H -E cp ${TMP_GANGLIA_FILE} `getGmondCoreConfFileName ${gmondClusterName}`;
 
 
     isMasterGmond=${2};
     isMasterGmond=${2};
 
 
@@ -73,13 +73,13 @@ function instantiateGmondConf()
     if [ "0" -eq "${isMasterGmond}" ]
     if [ "0" -eq "${isMasterGmond}" ]
     then
     then
       generateGmondSlaveConf ${gmondClusterName} > ${TMP_GANGLIA_FILE};
       generateGmondSlaveConf ${gmondClusterName} > ${TMP_GANGLIA_FILE};
-      sudo -H -E cp ${TMP_GANGLIA_FILE} `getGmondSlaveConfFileName ${gmondClusterName}`;
+      /var/lib/ambari-agent/ambari-sudo.sh -H -E cp ${TMP_GANGLIA_FILE} `getGmondSlaveConfFileName ${gmondClusterName}`;
     else
     else
       generateGmondMasterConf ${gmondClusterName} > ${TMP_GANGLIA_FILE}
       generateGmondMasterConf ${gmondClusterName} > ${TMP_GANGLIA_FILE}
-      sudo -H -E cp ${TMP_GANGLIA_FILE} `getGmondMasterConfFileName ${gmondClusterName}`;
+      /var/lib/ambari-agent/ambari-sudo.sh -H -E cp ${TMP_GANGLIA_FILE} `getGmondMasterConfFileName ${gmondClusterName}`;
     fi
     fi
 
 
-    sudo -H -E chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+    /var/lib/ambari-agent/ambari-sudo.sh -H -E chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
 
 
   else
   else
     echo "No gmondClusterName passed in, nothing to instantiate";
     echo "No gmondClusterName passed in, nothing to instantiate";
@@ -122,8 +122,8 @@ done
 createDirectory ${GANGLIA_CONF_DIR};
 createDirectory ${GANGLIA_CONF_DIR};
 createDirectory ${GANGLIA_RUNTIME_DIR};
 createDirectory ${GANGLIA_RUNTIME_DIR};
 # So rrdcached can drop its PID files in here.
 # So rrdcached can drop its PID files in here.
-sudo -H -E chmod -R o+rw ${GANGLIA_RUNTIME_DIR};
-sudo -H -E chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+/var/lib/ambari-agent/ambari-sudo.sh -H -E chmod -R o+rw ${GANGLIA_RUNTIME_DIR};
+/var/lib/ambari-agent/ambari-sudo.sh -H -E chown ${owner}:${group} ${GANGLIA_CONF_DIR};
 
 
 if [ -n "${gmondClusterName}" ]
 if [ -n "${gmondClusterName}" ]
 then
 then

+ 1 - 1
ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2

@@ -57,7 +57,7 @@ function createDirectory()
 
 
     if [ "x" != "x${directoryPath}" ]
     if [ "x" != "x${directoryPath}" ]
     then
     then
-        sudo -H -E mkdir -p ${directoryPath};
+        /var/lib/ambari-agent/ambari-sudo.sh -H -E mkdir -p ${directoryPath};
     fi
     fi
 }
 }
 
 

+ 1 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py

@@ -45,7 +45,7 @@ def hbase_service(
         user = params.hbase_user,
         user = params.hbase_user,
         # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
         # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
         timeout = 30,
         timeout = 30,
-        on_timeout = format("! ( {no_op_test} ) || sudo -H -E kill -9 `cat {pid_file}`"),
+        on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
       )
       )
       
       
       Execute (format("rm -f {pid_file}"))
       Execute (format("rm -f {pid_file}"))

+ 2 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py

@@ -18,6 +18,7 @@ limitations under the License.
 
 
 """
 """
 
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from functions import calc_xmn_from_xms
 from functions import calc_xmn_from_xms
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.default import default
@@ -27,6 +28,7 @@ import status_params
 # server configurations
 # server configurations
 config = Script.get_config()
 config = Script.get_config()
 exec_tmp_dir = Script.get_tmp_dir()
 exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
 
 
 stack_name = default("/hostLevelParams/stack_name", None)
 stack_name = default("/hostLevelParams/stack_name", None)
 
 

+ 17 - 23
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py

@@ -167,9 +167,9 @@ def format_namenode(force=None):
                     conf_dir=hadoop_conf_dir)
                     conf_dir=hadoop_conf_dir)
     else:
     else:
       if not is_namenode_formatted(params):
       if not is_namenode_formatted(params):
-        Execute(format(
-          'sudo su {hdfs_user} - -s /bin/bash -c "export PATH=$PATH:{hadoop_bin_dir} ; yes Y | hdfs --config {hadoop_conf_dir} namenode -format"'),
-                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin",
+        Execute(format("yes Y | hdfs --config {hadoop_conf_dir} namenode -format"),
+                user = params.hdfs_user,
+                path = [params.hadoop_bin_dir]
         )
         )
         for m_dir in mark_dir:
         for m_dir in mark_dir:
           Directory(m_dir,
           Directory(m_dir,
@@ -180,9 +180,9 @@ def format_namenode(force=None):
       if params.hostname == params.dfs_ha_namenode_active:
       if params.hostname == params.dfs_ha_namenode_active:
         # check and run the format command in the HA deployment scenario
         # check and run the format command in the HA deployment scenario
         # only format the "active" namenode in an HA deployment
         # only format the "active" namenode in an HA deployment
-        Execute(format(
-          'sudo su {hdfs_user} - -s /bin/bash -c "export PATH=$PATH:{hadoop_bin_dir} ; yes Y | hdfs --config {hadoop_conf_dir} namenode -format"'),
-                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin",
+        Execute(format("yes Y | hdfs --config {hadoop_conf_dir} namenode -format"),
+                user = params.hdfs_user,
+                path = [params.hadoop_bin_dir]
         )
         )
         for m_dir in mark_dir:
         for m_dir in mark_dir:
           Directory(m_dir,
           Directory(m_dir,
@@ -213,33 +213,27 @@ def is_namenode_formatted(params):
   for old_mark_dir in old_mark_dirs:
   for old_mark_dir in old_mark_dirs:
     if os.path.isdir(old_mark_dir):
     if os.path.isdir(old_mark_dir):
       for mark_dir in mark_dirs:
       for mark_dir in mark_dirs:
-        Execute(format(
-          "sudo cp -ar {old_mark_dir} {mark_dir}"),
-                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+        Execute(['cp', '-ar', old_mark_dir, mark_dir],
+                sudo = True
         )
         )
         marked = True
         marked = True
-      Execute(format(
-        "sudo rm -rf {old_mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )           
+      Directory(old_mark_dir,
+        action = "delete"
+      )    
     elif os.path.isfile(old_mark_dir):
     elif os.path.isfile(old_mark_dir):
       for mark_dir in mark_dirs:
       for mark_dir in mark_dirs:
-        Execute(format(
-          "sudo mkdir -p ${mark_dir}"),
-                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+        Directory(mark_dir,
+                  recursive = True,
         )
         )
-      Execute(format(
-        "sudo rm -f {old_mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )  
+      Directory(old_mark_dir,
+        action = "delete"
+      )
       marked = True
       marked = True
       
       
   # Check if name dirs are not empty
   # Check if name dirs are not empty
   for name_dir in nn_name_dirs:
   for name_dir in nn_name_dirs:
     try:
     try:
-      Execute(format(
-        "sudo ls {name_dir} | wc -l  | grep -q ^0$"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+      Execute(format("ls {name_dir} | wc -l  | grep -q ^0$"),
       )
       )
       marked = False
       marked = False
     except Exception:
     except Exception:

+ 5 - 5
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh

@@ -27,11 +27,11 @@ userhost=$4
 
 
 # The restart (not start) is required to pick up mysql configuration changes made by sed
 # The restart (not start) is required to pick up mysql configuration changes made by sed
 # during install, in case mysql is already started. The changes are required by Hive later on.
 # during install, in case mysql is already started. The changes are required by Hive later on.
-sudo service $mysqldservice restart
+/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice restart
   
   
 echo "Adding user $mysqldbuser@% and removing users with empty name"
 echo "Adding user $mysqldbuser@% and removing users with empty name"
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DELETE FROM mysql.user WHERE user='';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DELETE FROM mysql.user WHERE user='';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
 
 

+ 3 - 3
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh

@@ -24,10 +24,10 @@ mysqldservice=$1
 mysqldbuser=$2
 mysqldbuser=$2
 userhost=$3
 userhost=$3
 myhostname=$(hostname -f)
 myhostname=$(hostname -f)
-sudo_prefix = "sudo -H -E"
+sudo_prefix = "/var/lib/ambari-agent/ambari-sudo.sh -H -E"
 
 
 $sudo_prefix service $mysqldservice start
 $sudo_prefix service $mysqldservice start
 echo "Removing user $mysqldbuser@$userhost"
 echo "Removing user $mysqldbuser@$userhost"
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
 $sudo_prefix service $mysqldservice stop
 $sudo_prefix service $mysqldservice stop

+ 5 - 5
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh

@@ -36,7 +36,7 @@ fi
 
 
 export no_proxy=$ttonhost
 export no_proxy=$ttonhost
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 
 
 if [[ "$httpExitCode" -ne "200" ]] ; then
 if [[ "$httpExitCode" -ne "200" ]] ; then
@@ -50,7 +50,7 @@ exit 0
 #try hcat ddl command
 #try hcat ddl command
 echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
 echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 
 
 if [[ "$httpExitCode" -ne "200" ]] ; then
 if [[ "$httpExitCode" -ne "200" ]] ; then
@@ -76,17 +76,17 @@ echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
 echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
 echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
 
 
 #copy pig script to hdfs
 #copy pig script to hdfs
-sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
 
 
 #copy input file to hdfs
 #copy input file to hdfs
-sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
 
 
 #create, copy post args file
 #create, copy post args file
 echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
 echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
 
 
 #submit pig query
 #submit pig query
 cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
 cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 if [[ "$httpExitCode" -ne "200" ]] ; then
 if [[ "$httpExitCode" -ne "200" ]] ; then
   echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
   echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"

+ 2 - 2
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py

@@ -102,8 +102,8 @@ def hive_service(name, action='start', rolling_restart=False):
             
             
   elif action == 'stop':
   elif action == 'stop':
 
 
-    daemon_kill_cmd = format("sudo kill `cat {pid_file}`")
-    daemon_hard_kill_cmd = format("sudo kill -9 `cat {pid_file}`")
+    daemon_kill_cmd = format("{sudo} kill `cat {pid_file}`")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 `cat {pid_file}`")
 
 
     Execute(daemon_kill_cmd,
     Execute(daemon_kill_cmd,
       not_if = format("! ({process_id_exists_command})")
       not_if = format("! ({process_id_exists_command})")

+ 2 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py

@@ -18,6 +18,7 @@ limitations under the License.
 
 
 """
 """
 
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.default import default
 from resource_management import *
 from resource_management import *
@@ -27,6 +28,7 @@ import os
 # server configurations
 # server configurations
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
 
 
 stack_name = default("/hostLevelParams/stack_name", None)
 stack_name = default("/hostLevelParams/stack_name", None)
 
 

+ 15 - 15
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh

@@ -44,9 +44,9 @@ function checkOozieJobStatus {
   local i=0
   local i=0
   local rc=1
   local rc=1
   local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
   local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+  /var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"
   while [ $i -lt $num_of_tries ] ; do
   while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+    cmd_output=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
     (IFS='';echo $cmd_output)
     (IFS='';echo $cmd_output)
     act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
     act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
     echo "workflow_status=$act_status"
     echo "workflow_status=$act_status"
@@ -80,15 +80,15 @@ fi
 
 
 cd $OOZIE_EXAMPLES_DIR
 cd $OOZIE_EXAMPLES_DIR
 
 
-sudo tar -zxf oozie-examples.tar.gz
-sudo chmod -R o+rx examples
+/var/lib/ambari-agent/ambari-sudo.sh tar -zxf oozie-examples.tar.gz
+/var/lib/ambari-agent/ambari-sudo.sh chmod -R o+rx examples
 
 
-sudo sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sudo sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
 
 
 if [[ $security_enabled == "True" ]]; then
 if [[ $security_enabled == "True" ]]; then
   kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
   kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
@@ -96,14 +96,14 @@ else
   kinitcmd=""
   kinitcmd=""
 fi
 fi
 
 
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
 
 
 cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
 cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
 echo $cmd
 echo $cmd
-job_info=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_info=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
 job_id="`echo $job_info | cut -d':' -f2`"
 job_id="`echo $job_info | cut -d':' -f2`"
 checkOozieJobStatus "$job_id" 15
 checkOozieJobStatus "$job_id" 15
 OOZIE_EXIT_CODE="$?"
 OOZIE_EXIT_CODE="$?"

+ 3 - 3
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py

@@ -171,15 +171,15 @@ def oozie_server_specific():
 
 
   #falcon el extension
   #falcon el extension
   if params.has_falcon_host:
   if params.has_falcon_host:
-    Execute(format('sudo cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
+    Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
       not_if  = no_op_test,
       not_if  = no_op_test,
     )
     )
-    Execute(format('sudo chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
+    Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test,
       not_if  = no_op_test,
     )
     )
   if params.lzo_enabled:
   if params.lzo_enabled:
     Package(params.lzo_packages_for_current_host)
     Package(params.lzo_packages_for_current_host)
-    Execute(format('sudo cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+    Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
       not_if  = no_op_test,
       not_if  = no_op_test,
     )
     )
 
 

+ 2 - 0
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py

@@ -18,6 +18,7 @@ limitations under the License.
 
 
 """
 """
 
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core import System
 from resource_management.core import System
 from resource_management.libraries import Script
 from resource_management.libraries import Script
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import default
@@ -35,6 +36,7 @@ import os
 # server configurations
 # server configurations
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
 
 
 hostname = config["hostname"]
 hostname = config["hostname"]
 
 

+ 2 - 0
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py

@@ -17,6 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 
 
 """
 """
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.script import Script
 from resource_management.libraries.script import Script
@@ -46,6 +47,7 @@ def get_bare_principal(normalized_principal_name):
 # server configurations
 # server configurations
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
 
 
 stack_name = default("/hostLevelParams/stack_name", None)
 stack_name = default("/hostLevelParams/stack_name", None)
 
 

+ 2 - 2
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service.py

@@ -75,10 +75,10 @@ def service(
   elif action == "stop":
   elif action == "stop":
     process_dont_exist = format("! ({no_op_test})")
     process_dont_exist = format("! ({no_op_test})")
     pid = format("`cat {pid_file}`")
     pid = format("`cat {pid_file}`")
-    Execute(format("sudo kill {pid}"),
+    Execute(format("{sudo} kill {pid}"),
             not_if=process_dont_exist
             not_if=process_dont_exist
     )
     )
-    Execute(format("sudo kill -9 {pid}"),
+    Execute(format("{sudo} kill -9 {pid}"),
             not_if=format("sleep 2; {process_dont_exist} || sleep 20; {process_dont_exist}"),
             not_if=format("sleep 2; {process_dont_exist} || sleep 20; {process_dont_exist}"),
             ignore_failures=True
             ignore_failures=True
     )
     )

+ 1 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py

@@ -69,7 +69,7 @@ def storm():
         content=Template("storm-metrics2.properties.j2")
         content=Template("storm-metrics2.properties.j2")
     )
     )
 
 
-    Execute(format("sudo ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+    Execute(format("{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             only_if=format("ls {metric_collector_sink_jar}")
             only_if=format("ls {metric_collector_sink_jar}")
     )
     )

+ 1 - 1
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh

@@ -23,4 +23,4 @@
 zkcli_script=$1
 zkcli_script=$1
 user=$2
 user=$2
 conf_dir=$3
 conf_dir=$3
-sudo su $user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"
+/var/lib/ambari-agent/ambari-sudo.sh su $user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

+ 7 - 7
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh

@@ -37,7 +37,7 @@ zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`
 echo "zk_node1=$zk_node1"
 echo "zk_node1=$zk_node1"
 if [[ $security_enabled == "True" ]]; then
 if [[ $security_enabled == "True" ]]; then
   kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smokeuser_principal"
   kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smokeuser_principal"
-  sudo su $smoke_user -s /bin/bash - -c "$kinitcmd"
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "$kinitcmd"
 fi
 fi
 
 
 function verify_output() {
 function verify_output() {
@@ -52,17 +52,17 @@ function verify_output() {
 }
 }
 
 
 # Delete /zk_smoketest znode if exists
 # Delete /zk_smoketest znode if exists
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>$test_output_file
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>$test_output_file
 # Create /zk_smoketest znode on one zookeeper server
 # Create /zk_smoketest znode on one zookeeper server
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>>$test_output_file
 verify_output
 verify_output
 
 
 for i in $zkhosts ; do
 for i in $zkhosts ; do
   echo "Running test on host $i"
   echo "Running test on host $i"
   # Verify the data associated with znode across all the nodes in the zookeeper quorum
   # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port"
-  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${zk_cli_shell} -server $i:$client_port"
-  output=$(sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port")
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port"
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${zk_cli_shell} -server $i:$client_port"
+  output=$(/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port")
   echo $output | grep smoke_data
   echo $output | grep smoke_data
   if [[ $? -ne 0 ]] ; then
   if [[ $? -ne 0 ]] ; then
     echo "Data associated with znode /zk_smoketests is not consistent on host $i"
     echo "Data associated with znode /zk_smoketests is not consistent on host $i"
@@ -70,7 +70,7 @@ for i in $zkhosts ; do
   fi
   fi
 done
 done
 
 
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${zk_cli_shell} -server $zk_node1:$client_port"
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${zk_cli_shell} -server $zk_node1:$client_port"
 if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
 if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
   echo "Zookeeper Smoke Test: Failed" 
   echo "Zookeeper Smoke Test: Failed" 
 else
 else

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -17,11 +17,13 @@ limitations under the License.
 
 
 """
 """
 
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management import *
 from resource_management import *
 from resource_management.core.system import System
 from resource_management.core.system import System
 
 
 config = Script.get_config()
 config = Script.get_config()
+sudo = AMBARI_SUDO_BINARY
 
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -24,8 +24,8 @@ def setup_hdp_install_directory():
   SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/hdp-select-set-all.performed"
   SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/hdp-select-set-all.performed"
   import params
   import params
   if params.hdp_stack_version != "" and compare_versions(params.stack_version_unformatted, '2.2') >= 0:
   if params.hdp_stack_version != "" and compare_versions(params.stack_version_unformatted, '2.2') >= 0:
-    Execute(format('sudo touch {SELECT_ALL_PERFORMED_MARKER} ; ' +
-                   'sudo /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{stack_version_unformatted} | tail -1`'),
+    Execute(as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]) + ' ; ' +
+                   format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{stack_version_unformatted} | tail -1`'),
             only_if=format('ls -d /usr/hdp/{stack_version_unformatted}*'),   # If any HDP version is installed
             only_if=format('ls -d /usr/hdp/{stack_version_unformatted}*'),   # If any HDP version is installed
             not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
             not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
     )
     )

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh

@@ -46,7 +46,7 @@ set -e
 
 
 dir_array=($(echo $directories | sed 's/,/\n/g'))
 dir_array=($(echo $directories | sed 's/,/\n/g'))
 old_uid=$(id -u $username)
 old_uid=$(id -u $username)
-sudo_prefix="sudo -H -E"
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
 echo "Changing uid of $username from $old_uid to $newUid"
 echo "Changing uid of $username from $old_uid to $newUid"
 echo "Changing directory permisions for ${dir_array[@]}"
 echo "Changing directory permisions for ${dir_array[@]}"
 $sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
 $sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py

@@ -17,6 +17,7 @@ limitations under the License.
 
 
 """
 """
 
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management import *
 from resource_management import *
 from resource_management.core.system import System
 from resource_management.core.system import System
@@ -25,6 +26,7 @@ import collections
 
 
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
 
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -44,10 +44,10 @@ def setup_java():
 
 
   if params.jdk_name.endswith(".bin"):
   if params.jdk_name.endswith(".bin"):
     chmod_cmd = ("chmod", "+x", jdk_curl_target)
     chmod_cmd = ("chmod", "+x", jdk_curl_target)
-    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && sudo cp -r {tmp_java_dir}/* {java_dir}")
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -r {tmp_java_dir}/* {java_dir}")
   elif params.jdk_name.endswith(".gz"):
   elif params.jdk_name.endswith(".gz"):
     chmod_cmd = ("chmod","a+x", java_dir)
     chmod_cmd = ("chmod","a+x", java_dir)
-    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && sudo cp -r {tmp_java_dir}/* {java_dir}")
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -r {tmp_java_dir}/* {java_dir}")
 
 
   Directory(java_dir
   Directory(java_dir
   )
   )

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh

@@ -36,8 +36,8 @@ export list_of_non_empty_dirs=""
 
 
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 if [[ -f ${mark_file} ]] ; then
 if [[ -f ${mark_file} ]] ; then
-  sudo rm -f ${mark_file}
-  sudo mkdir -p ${mark_dir}
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
 fi
 fi
 
 
 if [[ ! -d $mark_dir ]] ; then
 if [[ ! -d $mark_dir ]] ; then
@@ -52,7 +52,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
   done
 
 
   if [[ $EXIT_CODE == 0 ]] ; then
   if [[ $EXIT_CODE == 0 ]] ; then
-    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
     (( EXIT_CODE = $EXIT_CODE | $? ))
     (( EXIT_CODE = $EXIT_CODE | $? ))
   else
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py

@@ -61,7 +61,7 @@ class TestFlumeHandler(RMFTestCase):
     self.assertTrue(set_desired_mock.call_args[0][0] == 'STARTED')
     self.assertTrue(set_desired_mock.call_args[0][0] == 'STARTED')
 
 
 
 
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su flume -l -s /bin/bash -c 'export  PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name a1 --conf /etc/flume/conf/a1 --conf-file /etc/flume/conf/a1/flume.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=c6401.ambari.apache.org:8655 > /var/log/flume/a1.out 2>&1' &",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su flume -l -s /bin/bash -c 'export  PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name a1 --conf /etc/flume/conf/a1 --conf-file /etc/flume/conf/a1/flume.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=c6401.ambari.apache.org:8655 > /var/log/flume/a1.out 2>&1' &",
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         wait_for_finish = False,
         wait_for_finish = False,
     )
     )
@@ -298,7 +298,7 @@ class TestFlumeHandler(RMFTestCase):
     self.assert_configure_many()
     self.assert_configure_many()
 
 
 
 
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su flume -l -s /bin/bash -c 'export  PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name b1 --conf /etc/flume/conf/b1 --conf-file /etc/flume/conf/b1/flume.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=c6401.ambari.apache.org:8655 > /var/log/flume/b1.out 2>&1' &",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su flume -l -s /bin/bash -c 'export  PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name b1 --conf /etc/flume/conf/b1 --conf-file /etc/flume/conf/b1/flume.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=c6401.ambari.apache.org:8655 > /var/log/flume/b1.out 2>&1' &",
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         wait_for_finish = False,
         wait_for_finish = False,
     )
     )

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py

@@ -60,7 +60,7 @@ class TestGangliaMonitor(RMFTestCase):
     )
     )
     self.assert_configure_default()
     self.assert_configure_default()
     self.assert_gmond_master_conf_generated()
     self.assert_gmond_master_conf_generated()
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
@@ -74,7 +74,7 @@ class TestGangliaMonitor(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py

@@ -45,7 +45,7 @@ class TestGangliaServer(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assert_configure_default()
     self.assert_configure_default()
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
     )
     )
     self.assertResourceCalled('MonitorWebserver', 'restart',
     self.assertResourceCalled('MonitorWebserver', 'restart',
@@ -60,7 +60,7 @@ class TestGangliaServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
         path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
     )
     )
     self.assertResourceCalled('MonitorWebserver', 'restart',
     self.assertResourceCalled('MonitorWebserver', 'restart',

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -63,7 +63,7 @@ class TestHBaseMaster(RMFTestCase):
     )
     )
     
     
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
-        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || sudo -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
         timeout = 30,
         timeout = 30,
         user = 'hbase',
         user = 'hbase',
     )
     )
@@ -160,7 +160,7 @@ class TestHBaseMaster(RMFTestCase):
     )
     )
 
 
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
-        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || sudo -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
         timeout = 30,
         timeout = 30,
         user = 'hbase',
         user = 'hbase',
     )
     )

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py

@@ -63,7 +63,7 @@ class TestHbaseRegionServer(RMFTestCase):
     )
     )
     
     
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
-        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || sudo -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
         timeout = 30,
         timeout = 30,
         user = 'hbase',
         user = 'hbase',
     )
     )
@@ -110,7 +110,7 @@ class TestHbaseRegionServer(RMFTestCase):
     )
     )
 
 
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
-        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || sudo -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
         timeout = 30,
         timeout = 30,
         user = 'hbase',
         user = 'hbase',
     )
     )

+ 8 - 8
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py

@@ -64,7 +64,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
     )
@@ -96,7 +96,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -142,7 +142,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
     )
@@ -180,7 +180,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
     )
@@ -221,7 +221,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
     )
@@ -253,7 +253,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -295,7 +295,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/sudo [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -339,7 +339,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = None,
         not_if = None,
     )
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py

@@ -64,7 +64,7 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
     )
     )
@@ -90,7 +90,7 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -136,7 +136,7 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
     )
     )
@@ -162,7 +162,7 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )

+ 37 - 39
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -50,15 +50,14 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assert_configure_default()
     self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'sudo ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin ; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
+    self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
+    self.assertResourceCalled('Execute', 'yes Y | hdfs --config /etc/hadoop/conf namenode -format',
+        path = ['/usr/bin'],
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
-                              recursive = True,
-                              )
+        recursive = True,
+    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
                               owner = 'hdfs',
                               content = Template('exclude_hosts_list.j2'),
                               content = Template('exclude_hosts_list.j2'),
@@ -81,7 +80,7 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
     )
     )
@@ -142,7 +141,7 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -171,15 +170,14 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assert_configure_secured()
     self.assert_configure_secured()
-    self.assertResourceCalled('Execute', 'sudo ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin ; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
+    self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
+    self.assertResourceCalled('Execute', 'yes Y | hdfs --config /etc/hadoop/conf namenode -format',
+        path = ['/usr/bin'],
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
-                              recursive = True,
-                              )
+        recursive = True,
+    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
                               owner = 'hdfs',
                               content = Template('exclude_hosts_list.j2'),
                               content = Template('exclude_hosts_list.j2'),
@@ -202,7 +200,7 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
     )
     )
@@ -266,7 +264,7 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -306,14 +304,14 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
     )
     )
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
         path = ['/usr/bin'],
         path = ['/usr/bin'],
         tries = 40,
         tries = 40,
-        only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         user = 'hdfs',
         user = 'hdfs',
         try_sleep = 10,
         try_sleep = 10,
     )
     )
@@ -347,7 +345,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         action = ['create'],
         action = ['create'],
         bin_dir = '/usr/bin',
         bin_dir = '/usr/bin',
-        only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -382,7 +380,7 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
     )
     )
@@ -392,7 +390,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
         path = ['/usr/bin'],
         path = ['/usr/bin'],
         tries = 40,
         tries = 40,
-        only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         user = 'hdfs',
         user = 'hdfs',
         try_sleep = 10,
         try_sleep = 10,
     )
     )
@@ -426,7 +424,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         action = ['create'],
         action = ['create'],
         bin_dir = '/usr/bin',
         bin_dir = '/usr/bin',
-        only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -443,13 +441,13 @@ class TestNamenode(RMFTestCase):
     self.assert_configure_default()
     self.assert_configure_default()
 
 
     # verify that active namenode was formatted
     # verify that active namenode was formatted
-    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin ; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
+    self.assertResourceCalled('Execute', 'yes Y | hdfs --config /etc/hadoop/conf namenode -format',
+        path = ['/usr/bin'],
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
-                              recursive = True,
-                              )
-
+        recursive = True,
+    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
                               owner = 'hdfs',
                               content = Template('exclude_hosts_list.j2'),
                               content = Template('exclude_hosts_list.j2'),
@@ -472,14 +470,14 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
                               path = ['/usr/bin'],
                               path = ['/usr/bin'],
                               tries = 40,
                               tries = 40,
-                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
                               user = 'hdfs',
                               user = 'hdfs',
                               try_sleep = 10,
                               try_sleep = 10,
                               )
                               )
@@ -513,7 +511,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
                               bin_dir = '/usr/bin',
                               bin_dir = '/usr/bin',
-                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -558,14 +556,14 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
     self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
                               path = ['/usr/bin'],
                               path = ['/usr/bin'],
                               tries = 40,
                               tries = 40,
-                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
                               user = 'hdfs',
                               user = 'hdfs',
                               try_sleep = 10,
                               try_sleep = 10,
                               )
                               )
@@ -599,7 +597,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
                               bin_dir = '/usr/bin',
                               bin_dir = '/usr/bin',
-                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -773,7 +771,7 @@ class TestNamenode(RMFTestCase):
                          hdp_stack_version = self.STACK_VERSION,
                          hdp_stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       )
-      self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf balancer -threshold -1'",
+      self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf balancer -threshold -1'",
           logoutput = False,
           logoutput = False,
           on_new_line = FunctionMock('handle_new_line'),
           on_new_line = FunctionMock('handle_new_line'),
       )
       )

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py

@@ -62,7 +62,7 @@ class TestServiceCheck(RMFTestCase):
     self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
     self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         logoutput = True,
         logoutput = True,
-        not_if = "/usr/bin/sudo su ambari-qa -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]/usr/bin/hadoop --config /etc/hadoop/conf fs -test -e /tmp'",
+        not_if = "ambari-sudo.sh su ambari-qa -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]/usr/bin/hadoop --config /etc/hadoop/conf fs -test -e /tmp'",
         try_sleep = 3,
         try_sleep = 3,
         tries = 5,
         tries = 5,
         bin_dir = '/usr/bin',
         bin_dir = '/usr/bin',

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py

@@ -72,7 +72,7 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
     )
     )
@@ -103,7 +103,7 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -159,7 +159,7 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
     )
     )
@@ -190,7 +190,7 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )

+ 6 - 6
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py

@@ -80,7 +80,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
     )
     )
@@ -107,7 +107,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -171,7 +171,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
     )
     )
@@ -197,7 +197,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = None,
         not_if = None,
     )
     )
@@ -267,7 +267,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
@@ -333,7 +333,7 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
                               )
                               )

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py

@@ -67,10 +67,10 @@ class TestHiveMetastore(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
     )
     )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
@@ -127,10 +127,10 @@ class TestHiveMetastore(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
     )
     )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -88,10 +88,10 @@ class TestHiveServer(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive-server.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive-server.pid`',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive-server.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive-server.pid`',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) )',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) )',
     )
     )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
@@ -165,10 +165,10 @@ class TestHiveServer(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive-server.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive-server.pid`',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive-server.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive-server.pid`',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) )',
       not_if = '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1) )',
     )
     )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1)',

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py

@@ -284,10 +284,10 @@ class TestOozieServer(RMFTestCase):
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         sudo = True,
         sudo = True,
     )
     )
-    self.assertResourceCalled('Execute', 'sudo cp /usr/lib/falcon/oozie/ext/falcon-oozie-el-extension-*.jar /usr/lib/oozie/libext',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh cp /usr/lib/falcon/oozie/ext/falcon-oozie-el-extension-*.jar /usr/lib/oozie/libext',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo chown oozie:hadoop /usr/lib/oozie/libext/falcon-oozie-el-extension-*.jar',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh chown oozie:hadoop /usr/lib/oozie/libext/falcon-oozie-el-extension-*.jar',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
     )
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
@@ -437,10 +437,10 @@ class TestOozieServer(RMFTestCase):
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         sudo = True,
         sudo = True,
     )
     )
-    self.assertResourceCalled('Execute', 'sudo cp /usr/lib/falcon/oozie/ext/falcon-oozie-el-extension-*.jar /usr/lib/oozie/libext',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh cp /usr/lib/falcon/oozie/ext/falcon-oozie-el-extension-*.jar /usr/lib/oozie/libext',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo chown oozie:hadoop /usr/lib/oozie/libext/falcon-oozie-el-extension-*.jar',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh chown oozie:hadoop /usr/lib/oozie/libext/falcon-oozie-el-extension-*.jar',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
     )
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py

@@ -51,7 +51,7 @@ class TestHookBeforeInstall(RMFTestCase):
         not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
         not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
         sudo = True,
         sudo = True,
     )
     )
-    self.assertResourceCalled('Execute', 'mkdir -p /tmp/jdk && cd /tmp/jdk && tar -xf /tmp/AMBARI-artifacts//jdk-7u67-linux-x64.tar.gz && sudo cp -r /tmp/jdk/* /usr/jdk64',
+    self.assertResourceCalled('Execute', 'mkdir -p /tmp/jdk && cd /tmp/jdk && tar -xf /tmp/AMBARI-artifacts//jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -r /tmp/jdk/* /usr/jdk64',
         not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
         not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
     )
     )
     self.assertResourceCalled('Execute', ('chgrp', '-R', u'hadoop', u'/usr/jdk64/jdk1.7.0_45'),
     self.assertResourceCalled('Execute', ('chgrp', '-R', u'hadoop', u'/usr/jdk64/jdk1.7.0_45'),

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py

@@ -69,10 +69,10 @@ class TestHiveMetastore(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive.pid`',
                               not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
                               not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
                               )
                               )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive.pid`',
                               not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
                               not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
                               )
                               )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
@@ -130,10 +130,10 @@ class TestHiveMetastore(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/hive/hive.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/hive/hive.pid`',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
       not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) || ( sleep 5 && ! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1) )',
     )
     )
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',
     self.assertResourceCalled('Execute', '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p `cat /var/run/hive/hive.pid` >/dev/null 2>&1)',

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py

@@ -72,10 +72,10 @@ class TestStormDrpcServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/drpc.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/drpc.pid`',
         not_if = '! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/drpc.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/drpc.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -129,10 +129,10 @@ class TestStormDrpcServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/drpc.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/drpc.pid`',
         not_if = '! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/drpc.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/drpc.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/drpc.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/drpc.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py

@@ -71,10 +71,10 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/nimbus.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/nimbus.pid`',
         not_if = '! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/nimbus.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/nimbus.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -128,10 +128,10 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/nimbus.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/nimbus.pid`',
         not_if = '! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/nimbus.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/nimbus.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/nimbus.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/nimbus.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py

@@ -71,10 +71,10 @@ class TestStormRestApi(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/restapi.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/restapi.pid`',
         not_if = '! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/restapi.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/restapi.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -128,10 +128,10 @@ class TestStormRestApi(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/restapi.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/restapi.pid`',
         not_if = '! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/restapi.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/restapi.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/restapi.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/restapi.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 8 - 8
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py

@@ -83,20 +83,20 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/supervisor.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/supervisor.pid`',
         not_if = '! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/supervisor.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/supervisor.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
     self.assertResourceCalled('File', '/var/run/storm/supervisor.pid',
     self.assertResourceCalled('File', '/var/run/storm/supervisor.pid',
         action = ['delete'],
         action = ['delete'],
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/logviewer.pid`',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/logviewer.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -163,20 +163,20 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/supervisor.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/supervisor.pid`',
         not_if = '! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/supervisor.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/supervisor.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/supervisor.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/supervisor.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
     self.assertResourceCalled('File', '/var/run/storm/supervisor.pid',
     self.assertResourceCalled('File', '/var/run/storm/supervisor.pid',
         action = ['delete'],
         action = ['delete'],
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/logviewer.pid`',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/logviewer.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py

@@ -76,10 +76,10 @@ class TestStormSupervisor(TestStormBase):
     self.assertResourceCalled('Execute', 'supervisorctl stop storm-supervisor',
     self.assertResourceCalled('Execute', 'supervisorctl stop storm-supervisor',
                               wait_for_finish = False,
                               wait_for_finish = False,
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/logviewer.pid`',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/logviewer.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -141,10 +141,10 @@ class TestStormSupervisor(TestStormBase):
                               wait_for_finish = False,
                               wait_for_finish = False,
     )
     )
 
 
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/logviewer.pid`',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/logviewer.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/logviewer.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py

@@ -69,10 +69,10 @@ class TestStormUiServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/ui.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/ui.pid`',
         not_if = '! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/ui.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/ui.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )
@@ -126,10 +126,10 @@ class TestStormUiServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/storm/ui.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill `cat /var/run/storm/ui.pid`',
         not_if = '! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         not_if = '! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
     )
     )
-    self.assertResourceCalled('Execute', 'sudo kill -9 `cat /var/run/storm/ui.pid`',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/storm/ui.pid`',
         not_if = 'sleep 2; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         not_if = 'sleep 2; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1) || sleep 20; ! (ls /var/run/storm/ui.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/ui.pid` >/dev/null 2>&1)',
         ignore_failures = True,
         ignore_failures = True,
     )
     )

+ 2 - 2
ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py

@@ -71,12 +71,12 @@ class TestKnoxGateway(RMFTestCase):
     )
     )
     self.assertResourceCalled('Execute', '/usr/lib/knox/bin/knoxcli.sh create-master --master sa',
     self.assertResourceCalled('Execute', '/usr/lib/knox/bin/knoxcli.sh create-master --master sa',
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
-        not_if = "/usr/bin/sudo su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /var/lib/knox/data/security/master'",
+        not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /var/lib/knox/data/security/master'",
         user = 'knox',
         user = 'knox',
     )
     )
     self.assertResourceCalled('Execute', '/usr/lib/knox/bin/knoxcli.sh create-cert --hostname c6401.ambari.apache.org',
     self.assertResourceCalled('Execute', '/usr/lib/knox/bin/knoxcli.sh create-cert --hostname c6401.ambari.apache.org',
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
-        not_if = "/usr/bin/sudo su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /var/lib/knox/data/security/keystores/gateway.jks'",
+        not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /var/lib/knox/data/security/keystores/gateway.jks'",
         user = 'knox',
         user = 'knox',
     )
     )
     self.assertResourceCalled('File', '/etc/knox/conf/ldap-log4j.properties',
     self.assertResourceCalled('File', '/etc/knox/conf/ldap-log4j.properties',