Kaynağa Gözat

AMBARI-8373 - Refactor the OS-dependent Ambari Agent Windows components (Eugene Chekanskiy via abaranchuk)

Artem Baranchuk 10 yıl önce
ebeveyn
işleme
15c65b9300
35 değiştirilmiş dosya ile 1050 ekleme ve 1539 silme
  1. 11 4
      ambari-agent/conf/windows/service_wrapper.py
  2. 0 2
      ambari-agent/src/main/python/ambari_agent/ActionQueue.py
  3. 0 230
      ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
  4. 0 232
      ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
  5. 130 51
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  6. 2 12
      ambari-agent/src/main/python/ambari_agent/Controller.py
  7. 16 20
      ambari-agent/src/main/python/ambari_agent/Facter.py
  8. 2 2
      ambari-agent/src/main/python/ambari_agent/Hardware.py
  9. 147 0
      ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py
  10. 0 58
      ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
  11. 0 91
      ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
  12. 6 4
      ambari-agent/src/main/python/ambari_agent/HostCleanup.py
  13. 502 6
      ambari-agent/src/main/python/ambari_agent/HostInfo.py
  14. 0 380
      ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
  15. 0 227
      ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
  16. 2 7
      ambari-agent/src/main/python/ambari_agent/NetUtil.py
  17. 0 1
      ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py
  18. 2 2
      ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
  19. 7 11
      ambari-agent/src/main/python/ambari_agent/main.py
  20. 2 2
      ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
  21. 5 5
      ambari-agent/src/test/python/ambari_agent/TestHardware.py
  22. 4 4
      ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
  23. 42 38
      ambari-agent/src/test/python/ambari_agent/TestHostInfo.py
  24. 7 7
      ambari-agent/src/test/python/ambari_agent/TestMain.py
  25. 0 1
      ambari-agent/src/test/python/ambari_agent/TestRegistration.py
  26. 84 108
      ambari-common/src/main/python/ambari_commons/os_check.py
  27. 64 0
      ambari-common/src/main/python/ambari_commons/os_family_impl.py
  28. 2 2
      ambari-common/src/main/python/ambari_commons/os_utils.py
  29. 1 1
      ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
  30. 1 1
      ambari-server/src/main/python/ambari-server.py
  31. 2 2
      ambari-server/src/main/python/ambari_server/dbConfiguration.py
  32. 1 1
      ambari-server/src/main/python/ambari_server/serverConfiguration.py
  33. 6 3
      ambari-server/src/main/python/ambari_server/serverSetup.py
  34. 2 2
      ambari-server/src/main/python/ambari_server/setupSecurity.py
  35. 0 22
      ambari-server/src/test/python/TestOSCheck.py

+ 11 - 4
ambari-agent/conf/windows/service_wrapper.py

@@ -28,11 +28,18 @@ from ambari_commons.ambari_service import AmbariService, ENV_PYTHON_PATH
 from ambari_commons.exceptions import *
 from ambari_commons.logging_utils import *
 from ambari_commons.os_windows import WinServiceController
-from ambari_agent.AmbariConfig import AmbariConfig, SETUP_ACTION, START_ACTION, DEBUG_ACTION, STOP_ACTION, STATUS_ACTION
-from ambari_agent.HeartbeatHandlers_windows import HeartbeatStopHandler
+from ambari_agent.AmbariConfig import AmbariConfig
+from ambari_agent.HeartbeatHandlers import HeartbeatStopHandlers
 
 AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
 
+SETUP_ACTION = "setup"
+START_ACTION = "start"
+STOP_ACTION = "stop"
+RESET_ACTION = "reset"
+STATUS_ACTION = "status"
+DEBUG_ACTION = "debug"
+
 def parse_options():
   # parse env cmd
   with open(os.path.join(os.getcwd(), "ambari-env.cmd"), "r") as env_cmd:
@@ -82,7 +89,7 @@ class AmbariAgentService(AmbariService):
     # Soft dependency on the Windows Time service
     ensure_time_service_is_started()
 
-    self.heartbeat_stop_handler = HeartbeatStopHandler(self._heventSvcStop)
+    self.heartbeat_stop_handler = HeartbeatStopHandlers(self._heventSvcStop)
 
     self.ReportServiceStatus(win32service.SERVICE_RUNNING)
 
@@ -101,7 +108,7 @@ class AmbariAgentService(AmbariService):
 def ensure_time_service_is_started():
   ret = WinServiceController.EnsureServiceIsStarted("W32Time")
   if 0 != ret:
-    raise FatalException(-1, "Error starting Windows Time service: " + string(ret))
+    raise FatalException(-1, "Error starting Windows Time service: " + str(ret))
   pass
 
 

+ 0 - 2
ambari-agent/src/main/python/ambari_agent/ActionQueue.py

@@ -28,7 +28,6 @@ import json
 
 from AgentException import AgentException
 from LiveStatus import LiveStatus
-from shell import shellRunner
 from ActualConfigHandler import ActualConfigHandler
 from CommandStatusDict import CommandStatusDict
 from CustomServiceOrchestrator import CustomServiceOrchestrator
@@ -74,7 +73,6 @@ class ActionQueue(threading.Thread):
       self.status_update_callback)
     self.config = config
     self.controller = controller
-    self.sh = shellRunner()
     self.configTags = {}
     self._stop = threading.Event()
     self.tmpdir = config.get('agent', 'prefix')

+ 0 - 230
ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py

@@ -1,230 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/tmp/ambari-agent
-tmp_dir=/tmp/ambari-agent/tmp
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-data_cleanup_max_size_MB = 100
-ping_port=8670
-cache_dir=/var/lib/ambari-agent/cache
-run_as_user=root
-
-[services]
-
-[python]
-custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/tmp/ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-log_lines_count=300
-
-"""
-
-imports = [
-  "hdp/manifests/*.pp",
-  "hdp-hadoop/manifests/*.pp",
-  "hdp-hbase/manifests/*.pp",
-  "hdp-zookeeper/manifests/*.pp",
-  "hdp-oozie/manifests/*.pp",
-  "hdp-pig/manifests/*.pp",
-  "hdp-sqoop/manifests/*.pp",
-  "hdp-templeton/manifests/*.pp",
-  "hdp-hive/manifests/*.pp",
-  "hdp-hcat/manifests/*.pp",
-  "hdp-mysql/manifests/*.pp",
-  "hdp-monitor-webserver/manifests/*.pp",
-  "hdp-repos/manifests/*.pp"
-]
-
-rolesToClass = {
-  'GLUSTERFS': 'hdp-hadoop::glusterfs',
-  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
-  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HUE_SERVER': 'hdp-hue::server',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client',
-  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
-  'FLUME_SERVER': 'hdp-flume',
-  'JOURNALNODE': 'hdp-hadoop::journalnode',
-  'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '/var/run/hue/supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-    'defaultValue' : '/var/run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'ganglia_runtime_dir',
-    'defaultValue' : '/var/run/ganglia/hdp'},
-  {'var' : 'hbase_pid_dir',
-    'defaultValue' : '/var/run/hbase'},
-  {'var' : 'zk_pid_dir',
-    'defaultValue' : '/var/run/zookeeper'},
-  {'var' : 'oozie_pid_dir',
-    'defaultValue' : '/var/run/oozie'},
-  {'var' : 'hcat_pid_dir',
-    'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'hive_pid_dir',
-    'defaultValue' : '/var/run/hive'},
-  {'var' : 'mysqld_pid_dir',
-    'defaultValue' : '/var/run/mysqld'},
-  {'var' : 'hcat_pid_dir',
-    'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop-mapreduce'},
-]
-
-if 'AMBARI_AGENT_CONF_DIR' in os.environ:
-  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
-else:
-  configFile = "/etc/ambari-agent/conf/ambari-agent.ini"
-
-if 'AMBARI_AGENT_LOG_DIR' in os.environ:
-  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
-else:
-  logfile = "/var/log/ambari-agent/ambari-agent.log"
-
-if 'AMBARI_AGENT_OUT_DIR' in os.environ:
-  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
-else:
-  outfile = "/var/log/ambari-agent/ambari-agent.out"

+ 0 - 232
ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py

@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=\\tmp\\ambari-agent
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-ping_port=8670
-cache_dir=\\var\\lib\\ambari-agent\\cache
-
-[services]
-
-[python]
-custom_actions_dir = \\var\\lib\\ambari-agent\\resources\\custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=\\tmp\\ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=\\etc\\hadoop,\\etc\\hadoop\\conf,\\var\\run\\hadoop,\\var\\log\\hadoop
-rpms=glusterfs,openssl,wget,net-snmp,ntpd,ganglia,nagios,glusterfs
-log_lines_count=300
-
-"""
-
-imports = [
-  "hdp\\manifests\\*.pp",
-  "hdp-hadoop\\manifests\\*.pp",
-  "hdp-hbase\\manifests\\*.pp",
-  "hdp-zookeeper\\manifests\\*.pp",
-  "hdp-oozie\\manifests\\*.pp",
-  "hdp-pig\\manifests\\*.pp",
-  "hdp-sqoop\\manifests\\*.pp",
-  "hdp-templeton\\manifests\\*.pp",
-  "hdp-hive\\manifests\\*.pp",
-  "hdp-hcat\\manifests\\*.pp",
-  "hdp-mysql\\manifests\\*.pp",
-  "hdp-monitor-webserver\\manifests\\*.pp",
-  "hdp-repos\\manifests\\*.pp"
-]
-
-rolesToClass = {
-  'GLUSTERFS': 'hdp-hadoop::glusterfs',
-  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
-  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'NAGIOS_SERVER': 'hdp-nagios::server',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HUE_SERVER': 'hdp-hue::server',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client',
-  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
-  'FLUME_SERVER': 'hdp-flume',
-  'JOURNALNODE': 'hdp-hadoop::journalnode',
-  'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'NAGIOS_SERVER': 'nagios.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '\\var\\run\\hue\\supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-   'defaultValue' : '\\var\\run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop'},
-  {'var' : 'ganglia_runtime_dir',
-   'defaultValue' : '\\var\\run\\ganglia\\hdp'},
-  {'var' : 'hbase_pid_dir',
-   'defaultValue' : '\\var\\run\\hbase'},
-  {'var' : '',
-   'defaultValue' : '\\var\\run\\nagios'},
-  {'var' : 'zk_pid_dir',
-   'defaultValue' : '\\var\\run\\zookeeper'},
-  {'var' : 'oozie_pid_dir',
-   'defaultValue' : '\\var\\run\\oozie'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '\\var\\run\\webhcat'},
-  {'var' : 'hive_pid_dir',
-   'defaultValue' : '\\var\\run\\hive'},
-  {'var' : 'mysqld_pid_dir',
-   'defaultValue' : '\\var\\run\\mysqld'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '\\var\\run\\webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop-mapreduce'},
-]
-
-if 'AMBARI_AGENT_CONF_DIR' in os.environ:
-  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
-else:
-  configFile = "ambari-agent.ini"
-
-if 'AMBARI_AGENT_LOG_DIR' in os.environ:
-  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
-else:
-  logfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.log"
-
-if 'AMBARI_AGENT_OUT_DIR' in os.environ:
-  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
-else:
-  outfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.out"

+ 130 - 51
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -18,35 +18,128 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
-import platform
-
 import ConfigParser
 import StringIO
 import json
 from NetUtil import NetUtil
+import os
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix={ps}tmp{ps}ambari-agent
+tmp_dir={ps}tmp{ps}ambari-agent{ps}tmp
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir={ps}var{ps}lib{ps}ambari-agent{ps}cache
+
+[services]
+
+[python]
+custom_actions_dir = {ps}var{ps}lib{ps}ambari-agent{ps}resources{ps}custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir={ps}tmp{ps}ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs={ps}etc{ps}hadoop,{ps}etc{ps}hadoop{ps}conf,{ps}var{ps}run{ps}hadoop,{ps}var{ps}log{ps}hadoop
+log_lines_count=300
+
+""".format(ps=os.sep)
+
+
+servicesToPidNames = {
+  'GLUSTERFS' : 'glusterd.pid$',
+  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+  'DATANODE': 'hadoop-{USER}-datanode.pid$',
+  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+  'OOZIE_SERVER': 'oozie.pid',
+  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+  'FLUME_SERVER': 'flume-node.pid',
+  'TEMPLETON_SERVER': 'templeton.pid',
+  'GANGLIA_SERVER': 'gmetad.pid',
+  'GANGLIA_MONITOR': 'gmond.pid',
+  'HBASE_MASTER': 'hbase-{USER}-master.pid',
+  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
+  'KERBEROS_SERVER': 'kadmind.pid',
+  'HIVE_SERVER': 'hive-server.pid',
+  'HIVE_METASTORE': 'hive.pid',
+  'MYSQL_SERVER': 'mysqld.pid',
+  'HUE_SERVER': '/var/run/hue/supervisor.pid',
+  'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+  'NAMENODE': 'hdfs_user',
+  'SECONDARY_NAMENODE': 'hdfs_user',
+  'DATANODE': 'hdfs_user',
+  'JOURNALNODE': 'hdfs_user',
+  'ZKFC': 'hdfs_user',
+  'JOBTRACKER': 'mapred_user',
+  'TASKTRACKER': 'mapred_user',
+  'RESOURCEMANAGER': 'yarn_user',
+  'NODEMANAGER': 'yarn_user',
+  'HISTORYSERVER': 'mapred_user',
+  'HBASE_MASTER': 'hbase_user',
+  'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+  {'var' : 'glusterfs_pid_dir_prefix',
+   'defaultValue' : '/var/run'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'ganglia_runtime_dir',
+   'defaultValue' : '/var/run/ganglia/hdp'},
+  {'var' : 'hbase_pid_dir',
+   'defaultValue' : '/var/run/hbase'},
+  {'var' : 'zk_pid_dir',
+   'defaultValue' : '/var/run/zookeeper'},
+  {'var' : 'oozie_pid_dir',
+   'defaultValue' : '/var/run/oozie'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'hive_pid_dir',
+   'defaultValue' : '/var/run/hive'},
+  {'var' : 'mysqld_pid_dir',
+   'defaultValue' : '/var/run/mysqld'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'yarn_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop-yarn'},
+  {'var' : 'mapred_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop-mapreduce'},
+]
 
-SETUP_ACTION = "setup"
-START_ACTION = "start"
-STOP_ACTION = "stop"
-RESET_ACTION = "reset"
-STATUS_ACTION = "status"
-DEBUG_ACTION = "debug"
-
-IS_WINDOWS = platform.system() == "Windows"
-
-if not IS_WINDOWS:
-  from AgentConfig_linux import *
-else:
-  from AgentConfig_windows import *
 
-config = ConfigParser.RawConfigParser()
 
-s = StringIO.StringIO(content)
-config.readfp(s)
 
 class AmbariConfig:
   TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"
-  CONFIG_FILE = "/etc/ambari-agent/conf/ambari-agent.ini"
   SERVER_CONNECTION_INFO = "{0}/connection_info"
   CONNECTION_PROTOCOL = "https"
 
@@ -73,46 +166,32 @@ class AmbariConfig:
   def add_section(self, section):
     self.config.add_section(section)
 
-  @staticmethod
-  def getConfigFile():
-    global configFile
-    return configFile
-
-  @staticmethod
-  def getLogFile():
-    global logfile
-    return logfile
-
-  @staticmethod
-  def getOutFile():
-    global outfile
-    return outfile
-
   def setConfig(self, customConfig):
     self.config = customConfig
 
   def getConfig(self):
     return self.config
 
-  def getImports(self):
-    global imports
-    return imports
-
-  def getRolesToClass(self):
-    global rolesToClass
-    return rolesToClass
-
-  def getServiceStates(self):
-    global serviceStates
-    return serviceStates
+  @staticmethod
+  def getConfigFile():
+    if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+    else:
+      return os.path.join(os.sep, "etc", "ambari-agent", "conf", "ambari-agent.ini")
 
-  def getServicesToPidNames(self):
-    global servicesToPidNames
-    return servicesToPidNames
+  @staticmethod
+  def getLogFile():
+    if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+    else:
+      return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.log")
 
-  def pidPathVars(self):
-    global pidPathVars
-    return pidPathVars
+  @staticmethod
+  def getOutFile():
+    if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+    else:
+      return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.out")
 
   def has_option(self, section, option):
     return self.config.has_option(section, option)

+ 2 - 12
ambari-agent/src/main/python/ambari_agent/Controller.py

@@ -42,25 +42,19 @@ from FileCache import FileCache
 from NetUtil import NetUtil
 from LiveStatus import LiveStatus
 from AlertSchedulerHandler import AlertSchedulerHandler
+from HeartbeatHandlers import HeartbeatStopHandlers, bind_signal_handlers
 
 logger = logging.getLogger()
 
 AGENT_AUTO_RESTART_EXIT_CODE = 77
 
-IS_WINDOWS = platform.system() == "Windows"
-
 class Controller(threading.Thread):
 
   def __init__(self, config, heartbeat_stop_callback = None, range=30):
     threading.Thread.__init__(self)
     logger.debug('Initializing Controller RPC thread.')
-
     if heartbeat_stop_callback is None:
-      if IS_WINDOWS:
-        from HeartbeatHandlers_windows import HeartbeatStopHandler
-      else:
-        from HeartbeatStopHandler_linux import HeartbeatStopHandler
-      heartbeat_stop_callback = HeartbeatStopHandler()
+      heartbeat_stop_callback = HeartbeatStopHandlers()
 
     self.lock = threading.Lock()
     self.safeMode = True
@@ -419,10 +413,6 @@ class Controller(threading.Thread):
 
 def main(argv=None):
   # Allow Ctrl-C
-  if IS_WINDOWS:
-    from HeartbeatHandlers_windows import bind_signal_handlers
-  else:
-    from HeartbeatStopHandler_linux import bind_signal_handlers
 
   logger.setLevel(logging.INFO)
   formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \

+ 16 - 20
ambari-agent/src/main/python/ambari_agent/Facter.py

@@ -30,7 +30,8 @@ import subprocess
 from shell import shellRunner
 import time
 import uuid
-from ambari_commons import OSCheck
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
 
 log = logging.getLogger()
 
@@ -47,7 +48,7 @@ def run_os_command(cmd):
   return process.returncode, stdoutdata, stderrdata
 
 
-class FacterBase():
+class Facter(object):
   def __init__(self):
     pass
 
@@ -180,8 +181,8 @@ class FacterBase():
   def convertSizeMbToGb(size):
     return "%0.2f GB" % round(float(size) / (1024.0), 2)
 
-
-class FacterWindows(FacterBase):
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class FacterWindows(Facter):
   GET_SYSTEM_INFO_CMD = "systeminfo"
   GET_MEMORY_CMD = '$mem =(Get-WMIObject Win32_OperatingSystem -ComputerName "LocalHost" ); echo "$($mem.FreePhysicalMemory) $($mem.TotalVisibleMemorySize)"'
   GET_PAGE_FILE_INFO = '$pgo=(Get-WmiObject Win32_PageFileUsage); echo "$($pgo.AllocatedBaseSize) $($pgo.AllocatedBaseSize-$pgo.CurrentUsage)"'
@@ -274,13 +275,14 @@ class FacterWindows(FacterBase):
     return 0
 
   def facterInfo(self):
-    facterInfo = FacterBase.facterInfo(self)
-    facterInfo['swapsize'] = FacterBase.convertSizeMbToGb(self.getSwapSize())
-    facterInfo['swapfree'] = FacterBase.convertSizeMbToGb(self.getSwapFree())
+    facterInfo = super(FacterWindows, self).facterInfo()
+    facterInfo['swapsize'] = Facter.convertSizeMbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = Facter.convertSizeMbToGb(self.getSwapFree())
     return facterInfo
 
 
-class FacterLinux(FacterBase):
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class FacterLinux(Facter):
   # selinux command
   GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
   GET_IFCONFIG_CMD = "ifconfig"
@@ -289,9 +291,9 @@ class FacterLinux(FacterBase):
 
   def __init__(self):
 
-    self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
-    self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
-    self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
+    self.DATA_IFCONFIG_OUTPUT = FacterLinux.setDataIfConfigOutput()
+    self.DATA_UPTIME_OUTPUT = FacterLinux.setDataUpTimeOutput()
+    self.DATA_MEMINFO_OUTPUT = FacterLinux.setMemInfoOutput()
 
   @staticmethod
   def setDataIfConfigOutput():
@@ -442,19 +444,13 @@ class FacterLinux(FacterBase):
       return 0
 
   def facterInfo(self):
-    facterInfo = FacterBase.facterInfo(self)
+    facterInfo = super(FacterLinux, self).facterInfo()
     facterInfo['selinux'] = self.isSeLinux()
-    facterInfo['swapsize'] = FacterBase.convertSizeKbToGb(self.getSwapSize())
-    facterInfo['swapfree'] = FacterBase.convertSizeKbToGb(self.getSwapFree())
+    facterInfo['swapsize'] = Facter.convertSizeKbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = Facter.convertSizeKbToGb(self.getSwapFree())
     return facterInfo
 
 
-if platform.system() == "Windows":
-  Facter = FacterWindows
-else:
-  Facter = FacterLinux
-
-
 def main(argv=None):
   print Facter().facterInfo()
 

+ 2 - 2
ambari-agent/src/main/python/ambari_agent/Hardware.py

@@ -24,7 +24,7 @@ import subprocess
 import platform
 from shell import shellRunner
 from Facter import Facter
-
+from ambari_commons.os_check import OSConst, OSCheck
 logger = logging.getLogger()
 
 class Hardware:
@@ -62,7 +62,7 @@ class Hardware:
 
   @staticmethod
   def osdisks():
-    if platform.system() == "Windows":
+    if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
       return Hardware._osdisks_win()
     else:
       return Hardware._osdisks_linux()

+ 147 - 0
ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py

@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from ambari_commons.exceptions import FatalException
+from ambari_commons.os_check import OSConst, OSCheck
+import os
+import logging
+import signal
+import threading
+import traceback
+from ambari_commons.os_family_impl import OsFamilyImpl
+logger = logging.getLogger()
+
+_handler = None
+
+class HeartbeatStopHandlers(object):pass
+
+# windows impl
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HeartbeatStopHandlersWindows(HeartbeatStopHandlers):
+  def __init__(self, stopEvent=None):
+    import win32event
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      # Allow standalone testing
+      self._heventStop = win32event.CreateEvent(None, 0, 0, None)
+    else:
+      # Allow one unique event per process
+      self._heventStop = stopEvent
+
+  def set_heartbeat(self):
+    import win32event
+
+    win32event.SetEvent(self._heventHeartbeat)
+
+  def reset_heartbeat(self):
+    import win32event
+
+    win32event.ResetEvent(self._heventHeartbeat)
+
+  def wait(self, timeout1, timeout2=0):
+    import win32event
+
+    timeout = int(timeout1 + timeout2) * 1000
+
+    result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
+    if (
+          win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
+      raise FatalException(-1, "Error waiting for stop/heartbeat events: " + str(result))
+    if (win32event.WAIT_TIMEOUT == result):
+      return -1
+    return result
+
+# linux impl
+
+def signal_handler(signum, frame):
+  global _handler
+  _handler.set_stop()
+
+
+def debug(sig, frame):
+  """Interrupt running process, and provide a python prompt for
+  interactive debugging."""
+  d = {'_frame': frame}  # Allow access to frame object.
+  d.update(frame.f_globals)  # Unless shadowed by global
+  d.update(frame.f_locals)
+
+  message = "Signal received : entering python shell.\nTraceback:\n"
+  message += ''.join(traceback.format_stack(frame))
+  logger.info(message)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HeartbeatStopHandlersLinux(HeartbeatStopHandlers):
+  def __init__(self, stopEvent=None):
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self.heartbeat_wait_event = threading.Event()
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      # Allow standalone testing
+      self.stop_event = threading.Event()
+    else:
+      # Allow one unique event per process
+      self.stop_event = stopEvent
+
+  def set_heartbeat(self):
+    self.heartbeat_wait_event.set()
+
+  def reset_heartbeat(self):
+    self.heartbeat_wait_event.clear()
+
+  def set_stop(self):
+    self.stop_event.set()
+
+  def wait(self, timeout1, timeout2=0):
+    if self.heartbeat_wait_event.wait(timeout=timeout1):
+      # Event signaled, exit
+      return 0
+    # Stop loop when stop event received
+    # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
+    # and sent in one heartbeat. Also avoid server overload with heartbeats
+    if self.stop_event.wait(timeout=timeout2):
+      logger.info("Stop event received")
+      return 1
+    # Timeout
+    return -1
+
+
+
+
+def bind_signal_handlers(agentPid):
+  global _handler
+  if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
+    if os.getpid() == agentPid:
+      signal.signal(signal.SIGINT, signal_handler)
+      signal.signal(signal.SIGTERM, signal_handler)
+      signal.signal(signal.SIGUSR1, debug)
+    _handler = HeartbeatStopHandlersLinux()
+  else:
+    _handler = HeartbeatStopHandlersWindows()
+  return _handler
+
+

+ 0 - 58
ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py

@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import win32event
-
-from ambari_commons.exceptions import FatalException
-
-
-def bind_signal_handlers(agentPid):
-  return HeartbeatStopHandler()
-
-
-class HeartbeatStopHandler:
-  def __init__(self, stopEvent = None):
-    # Event is used for synchronizing heartbeat iterations (to make possible
-    # manual wait() interruption between heartbeats )
-    self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
-
-    # Event is used to stop the Agent process
-    if stopEvent is None:
-      #Allow standalone testing
-      self._heventStop = win32event.CreateEvent(None, 0, 0, None)
-    else:
-      #Allow one unique event per process
-      self._heventStop = stopEvent
-
-  def set_heartbeat(self):
-    win32event.SetEvent(self._heventHeartbeat)
-
-  def reset_heartbeat(self):
-    win32event.ResetEvent(self._heventHeartbeat)
-
-  def wait(self, timeout1, timeout2 = 0):
-    timeout = int(timeout1 + timeout2) * 1000
-
-    result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
-    if(win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
-      raise FatalException(-1, "Error waiting for stop/heartbeat events: " + string(result))
-    if(win32event.WAIT_TIMEOUT == result):
-      return -1
-    return result - win32event.WAIT_OBJECT_0

+ 0 - 91
ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py

@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import signal
-import threading
-import traceback
-
-
-logger = logging.getLogger()
-
-_handler = None
-
-def signal_handler(signum, frame):
-  _handler.set_stop()
-
-def bind_signal_handlers(agentPid):
-  if os.getpid() == agentPid:
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGUSR1, debug)
-
-  global _handler
-  _handler = HeartbeatStopHandler()
-
-  return _handler
-
-def debug(sig, frame):
-  """Interrupt running process, and provide a python prompt for
-  interactive debugging."""
-  d={'_frame':frame}         # Allow access to frame object.
-  d.update(frame.f_globals)  # Unless shadowed by global
-  d.update(frame.f_locals)
-
-  message  = "Signal received : entering python shell.\nTraceback:\n"
-  message += ''.join(traceback.format_stack(frame))
-  logger.info(message)
-
-class HeartbeatStopHandler:
-  def __init__(self, stopEvent = None):
-    # Event is used for synchronizing heartbeat iterations (to make possible
-    # manual wait() interruption between heartbeats )
-    self.heartbeat_wait_event = threading.Event()
-
-    # Event is used to stop the Agent process
-    if stopEvent is None:
-      #Allow standalone testing
-      self.stop_event = threading.Event()
-    else:
-      #Allow one unique event per process
-      self.stop_event = stopEvent
-
-  def set_heartbeat(self):
-    self.heartbeat_wait_event.set()
-
-  def reset_heartbeat(self):
-    self.heartbeat_wait_event.clear()
-
-  def set_stop(self):
-    self.stop_event.set()
-
-  def wait(self, timeout1, timeout2 = 0):
-    if self.heartbeat_wait_event.wait(timeout = timeout1):
-      #Event signaled, exit
-      return 0
-    # Stop loop when stop event received
-    # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
-    # and sent in one heartbeat. Also avoid server overload with heartbeats
-    if self.stop_event.wait(timeout = timeout2):
-      logger.info("Stop event received")
-      return 1
-    #Timeout
-    return -1

+ 6 - 4
ambari-agent/src/main/python/ambari_agent/HostCleanup.py

@@ -33,8 +33,10 @@ import optparse
 import shlex
 import datetime
 from AmbariConfig import AmbariConfig
-from pwd import getpwnam
-from ambari_commons import OSCheck
+from ambari_commons import OSCheck, OSConst
+if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
+  from pwd import getpwnam
+
 
 logger = logging.getLogger()
 
@@ -90,8 +92,8 @@ class HostCleanup:
   def resolve_ambari_config(self):
     try:
       config = AmbariConfig()
-      if os.path.exists(AmbariConfig.CONFIG_FILE):
-        config.read(AmbariConfig.CONFIG_FILE)
+      if os.path.exists(AmbariConfig.getConfigFile()):
+        config.read(AmbariConfig.getConfigFile())
       else:
         raise Exception("No config found, use default")
 

+ 502 - 6
ambari-agent/src/main/python/ambari_agent/HostInfo.py

@@ -18,11 +18,507 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+import os
+import glob
+import logging
+import re
+import time
+import subprocess
+import threading
+import shlex
 import platform
+import hostname
+from PackagesAnalyzer import PackagesAnalyzer
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from Hardware import Hardware
+from ambari_commons import OSCheck, OSConst, Firewall
+import socket
+from ambari_commons.os_family_impl import OsFamilyImpl
 
-if platform.system() == "Windows":
-  import HostInfo_win
-  HostInfo = HostInfo_win.HostInfo
-else:
-  import HostInfo_linux
-  HostInfo = HostInfo_linux.HostInfo
+logger = logging.getLogger()
+
+# service cmd
+SERVICE_CMD = "service"
+
+
+class HostInfo(object):
+  # Filters used to identify processed
+  PROC_FILTER = [
+    "hadoop", "zookeeper"
+  ]
+
+  current_umask = -1
+
+  def __init__(self, config=None):
+    self.config = config
+    self.reportFileHandler = HostCheckReportFileHandler(config)
+
+  def dirType(self, path):
+    if not os.path.exists(path):
+      return 'not_exist'
+    elif os.path.islink(path):
+      return 'sym_link'
+    elif os.path.isdir(path):
+      return 'directory'
+    elif os.path.isfile(path):
+      return 'file'
+    return 'unknown'
+
+  def checkLiveServices(self, services, result):
+    osType = OSCheck.get_os_family()
+    for service in services:
+      svcCheckResult = {}
+      if isinstance(service, dict):
+        serviceName = service[osType]
+      else:
+        serviceName = service
+      svcCheckResult['name'] = serviceName
+      svcCheckResult['status'] = "UNKNOWN"
+      svcCheckResult['desc'] = ""
+      try:
+        out, err, code = self.getServiceStatus(serviceName)
+        if 0 != code:
+          svcCheckResult['status'] = "Unhealthy"
+          svcCheckResult['desc'] = out
+          if len(out) == 0:
+            svcCheckResult['desc'] = err
+        else:
+          svcCheckResult['status'] = "Healthy"
+      except Exception, e:
+        svcCheckResult['status'] = "Unhealthy"
+        svcCheckResult['desc'] = repr(e)
+      result.append(svcCheckResult)
+
+  def getUMask(self):
+    if (self.current_umask == -1):
+      self.current_umask = os.umask(self.current_umask)
+      os.umask(self.current_umask)
+      return self.current_umask
+    else:
+      return self.current_umask
+
+  def checkReverseLookup(self):
+    """
+    Check if host fqdn resolves to current host ip
+    """
+    try:
+      host_name = socket.gethostname().lower()
+      host_ip = socket.gethostbyname(host_name)
+      host_fqdn = socket.getfqdn().lower()
+      fqdn_ip = socket.gethostbyname(host_fqdn)
+      return host_ip == fqdn_ip
+    except socket.error:
+      pass
+    return False
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HostInfoLinux(HostInfo):
+  # List of project names to be used to find alternatives folders etc.
+  DEFAULT_PROJECT_NAMES = [
+    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
+    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
+    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
+    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
+    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
+  ]
+
+  # List of live services checked for on the host, takes a map of plan strings
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
+  ]
+
+  # Set of default users (need to be replaced with the configured user names)
+  DEFAULT_USERS = [
+    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+    "hue", "yarn", "tez", "storm", "falcon", "kafka", "knox"
+  ]
+
+  # Default set of directories that are checked for existence of files and folders
+  DEFAULT_DIRS = [
+    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
+  ]
+
+  # Packages that are used to find repos (then repos are used to find other packages)
+  PACKAGES = [
+    "hadoop_2_2_*", "hadoop-2-2-.*", "zookeeper_2_2_*", "zookeeper-2-2-.*",
+    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
+  ]
+
+  # Additional packages to look for (search packages that start with these)
+  ADDITIONAL_PACKAGES = [
+    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
+    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
+  ]
+
+  # ignore packages from repos whose names start with these strings
+  IGNORE_PACKAGES_FROM_REPOS = [
+    "ambari", "installed"
+  ]
+
+  # ignore required packages
+  IGNORE_PACKAGES = [
+    "epel-release"
+  ]
+
+  # ignore repos from the list of repos to be cleaned
+  IGNORE_REPOS = [
+    "ambari", "HDP-UTILS"
+  ]
+
+  RESULT_UNAVAILABLE = "unable_to_determine"
+
+  DEFAULT_SERVICE_NAME = "ntpd"
+  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
+
+  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+
+  def __init__(self, config=None):
+    super(HostInfoLinux, self).__init__(config)
+    self.packages = PackagesAnalyzer()
+
+  def osdiskAvailableSpace(self, path):
+    diskInfo = {}
+    try:
+      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
+      dfdata = df.communicate()[0]
+      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
+    except:
+      pass
+    return diskInfo
+
+  def createAlerts(self, alerts):
+    existingUsers = []
+    self.checkUsers(self.DEFAULT_USERS, existingUsers)
+    dirs = []
+    self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+    alert = {
+      'name': 'host_alert',
+      'instance': None,
+      'service': 'AMBARI',
+      'component': 'host',
+      'host': hostname.hostname(self.config),
+      'state': 'OK',
+      'label': 'Disk space',
+      'text': 'Used disk space less than 80%'}
+    message = ""
+    mountinfoSet = []
+    for dir in dirs:
+      if dir["type"] == 'directory':
+        mountinfo = self.osdiskAvailableSpace(dir['name'])
+        if int(mountinfo["percent"].strip('%')) >= 80:
+          if not mountinfo in mountinfoSet:
+            mountinfoSet.append(mountinfo)
+          message += str(dir['name']) + ";\n"
+
+    if message != "":
+      message = "These discs have low space:\n" + str(
+        mountinfoSet) + "\n They include following critical directories:\n" + message
+      alert['state'] = 'WARNING'
+      alert['text'] = message
+    alerts.append(alert)
+    return alerts
+
+  def checkUsers(self, users, results):
+    f = open('/etc/passwd', 'r')
+    for userLine in f:
+      fields = userLine.split(":")
+      if fields[0] in users:
+        result = {}
+        homeDir = fields[5]
+        result['name'] = fields[0]
+        result['homeDir'] = fields[5]
+        result['status'] = "Available"
+        if not os.path.exists(homeDir):
+          result['status'] = "Invalid home directory"
+        results.append(result)
+
+  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
+    foldersToIgnore = []
+    for user in existingUsers:
+      foldersToIgnore.append(user['homeDir'])
+    try:
+      for dirName in basePaths:
+        for project in projectNames:
+          path = os.path.join(dirName.strip(), project.strip())
+          if not path in foldersToIgnore and os.path.exists(path):
+            obj = {}
+            obj['type'] = self.dirType(path)
+            obj['name'] = path
+            dirs.append(obj)
+    except:
+      pass
+
+  def javaProcs(self, list):
+    import pwd
+
+    try:
+      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+      for pid in pids:
+        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
+        cmd = cmd.replace('\0', ' ')
+        if not 'AmbariServer' in cmd:
+          if 'java' in cmd:
+            dict = {}
+            dict['pid'] = int(pid)
+            dict['hadoop'] = False
+            for filter in self.PROC_FILTER:
+              if filter in cmd:
+                dict['hadoop'] = True
+            dict['command'] = cmd.strip()
+            for line in open(os.path.join('/proc', pid, 'status')):
+              if line.startswith('Uid:'):
+                uid = int(line.split()[1])
+                dict['user'] = pwd.getpwuid(uid).pw_name
+            list.append(dict)
+    except:
+      pass
+    pass
+
+  def getReposToRemove(self, repos, ignoreList):
+    reposToRemove = []
+    for repo in repos:
+      addToRemoveList = True
+      for ignoreRepo in ignoreList:
+        if self.packages.nameMatch(ignoreRepo, repo):
+          addToRemoveList = False
+          continue
+      if addToRemoveList:
+        reposToRemove.append(repo)
+    return reposToRemove
+
+  def getTransparentHugePage(self):
+    # This file exist only on redhat 6
+    thp_regex = "\[(.+)\]"
+    if os.path.isfile(self.THP_FILE):
+      with open(self.THP_FILE) as f:
+        file_content = f.read()
+        return re.search(thp_regex, file_content).groups()[0]
+    else:
+      return ""
+
+  def checkIptables(self):
+    return Firewall().getFirewallObject().check_iptables()
+
+  def hadoopVarRunCount(self):
+    if not os.path.exists('/var/run/hadoop'):
+      return 0
+    pids = glob.glob('/var/run/hadoop/*/*.pid')
+    return len(pids)
+
+  def hadoopVarLogCount(self):
+    if not os.path.exists('/var/log/hadoop'):
+      return 0
+    logs = glob.glob('/var/log/hadoop/*/*.log')
+    return len(logs)
+
+  def etcAlternativesConf(self, projects, etcResults):
+    if not os.path.exists('/etc/alternatives'):
+      return []
+    projectRegex = "'" + '|'.join(projects) + "'"
+    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
+    for conf in files:
+      result = {}
+      filePath = os.path.join('/etc/alternatives', conf)
+      if os.path.islink(filePath):
+        realConf = os.path.realpath(filePath)
+        result['name'] = conf
+        result['target'] = realConf
+        etcResults.append(result)
+
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    """ Return various details about the host
+    componentsMapped: indicates if any components are mapped to this host
+    commandsInProgress: indicates if any commands are in progress
+    """
+
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['transparentHugePage'] = self.getTransparentHugePage()
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+
+    else:
+      etcs = []
+      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
+      dict['alternatives'] = etcs
+
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+
+      dirs = []
+      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+      dict['stackFoldersAndFiles'] = dirs
+
+      installedPackages = []
+      availablePackages = []
+      self.packages.allInstalledPackages(installedPackages)
+      self.packages.allAvailablePackages(availablePackages)
+
+      repos = []
+      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
+                                      self.IGNORE_PACKAGES_FROM_REPOS, repos)
+      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
+      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
+        self.ADDITIONAL_PACKAGES, installedPackages)
+      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
+      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
+
+      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
+      dict['existingRepos'] = repos
+
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+    pass
+
+  def getServiceStatus(self, serivce_name):
+    service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
+    service_check_live[1] = serivce_name
+    osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE)
+    out, err = osStat.communicate()
+    return out, err, osStat.returncode
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HostInfoWindows(HostInfo):
+  SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
+  GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
+  GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.WINSRV_FAMILY: "W32Time"}
+  ]
+  def checkUsers(self, users, results):
+    get_users_cmd = ["powershell", '-noProfile', '-NonInteractive', '-nologo', "-Command", self.GET_USERS_CMD]
+    try:
+      osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      out, err = osStat.communicate()
+    except:
+      raise Exception("Failed to get users.")
+    for user in out.split(os.linesep):
+      if user in users:
+        result = {}
+        result['name'] = user
+        result['status'] = "Available"
+        results.append(result)
+
+  def checkIptables(self):
+    from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
+
+    out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
+    if out[0] != 0:
+      logger.warn("Unable to check firewall status:{0}".format(out[2]))
+      return False
+    profiles_status = [i for i in out[1].split("\n") if not i == ""]
+    if "1" in profiles_status:
+      return True
+    return False
+
+  def createAlerts(self, alerts):
+    # TODO AMBARI-7849 Implement createAlerts for Windows
+    return alerts
+
+  def javaProcs(self, list):
+    try:
+      from ambari_commons.os_windows import run_powershell_script
+
+      code, out, err = run_powershell_script(self.GET_JAVA_PROC_CMD)
+      if code == 0:
+        splitted_output = out.split(os.linesep)
+        for i in [index for index in range(0, len(splitted_output)) if (index % 3) == 0]:
+          pid = splitted_output[i]
+          cmd = splitted_output[i + 1]
+          user = splitted_output[i + 2]
+          if not 'AmbariServer' in cmd:
+            if 'java' in cmd:
+              dict = {}
+              dict['pid'] = int(pid)
+              dict['hadoop'] = False
+              for filter in self.PROC_FILTER:
+                if filter in cmd:
+                  dict['hadoop'] = True
+              dict['command'] = cmd.strip()
+              dict['user'] = user
+              list.append(dict)
+    except Exception as e:
+      pass
+    pass
+
+  def getServiceStatus(self, serivce_name):
+    from ambari_commons.os_windows import run_powershell_script
+    code, out, err = run_powershell_script(self.SERVICE_STATUS_CMD.format(serivce_name))
+    return out, err, code
+
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    """ Return various details about the host
+    componentsMapped: indicates if any components are mapped to this host
+    commandsInProgress: indicates if any commands are in progress
+    """
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+    else:
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+      # TODO check HDP stack and folders here
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+
+
+def main(argv=None):
+  h = HostInfo()
+  struct = {}
+  h.register(struct)
+  print struct
+
+
+if __name__ == '__main__':
+  main()

+ 0 - 380
ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py

@@ -1,380 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import glob
-import logging
-import pwd
-import re
-import time
-import subprocess
-import threading
-import shlex
-import platform
-import hostname
-from PackagesAnalyzer import PackagesAnalyzer
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from Hardware import Hardware
-from ambari_commons import OSCheck, OSConst, Firewall
-import socket
-
-logger = logging.getLogger()
-
-# service cmd
-SERVICE_CMD = "service"
-
-
-class HostInfo:
-  # List of project names to be used to find alternatives folders etc.
-  DEFAULT_PROJECT_NAMES = [
-    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
-    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
-    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
-    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
-    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
-  ]
-
-  # List of live services checked for on the host, takes a map of plan strings
-  DEFAULT_LIVE_SERVICES = [
-    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
-  ]
-
-  # Set of default users (need to be replaced with the configured user names)
-  DEFAULT_USERS = [
-    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
-    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
-    "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
-  ]
-
-  # Filters used to identify processed
-  PROC_FILTER = [
-    "hadoop", "zookeeper"
-  ]
-
-  # Additional path patterns to find existing directory
-  DIRNAME_PATTERNS = [
-    "/tmp/hadoop-", "/tmp/hsperfdata_"
-  ]
-
-  # Default set of directories that are checked for existence of files and folders
-  DEFAULT_DIRS = [
-    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
-  ]
-
-  # Packages that are used to find repos (then repos are used to find other packages)
-  PACKAGES = [
-    "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
-    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
-  ]
-
-  # Additional packages to look for (search packages that start with these)
-  ADDITIONAL_PACKAGES = [
-    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
-    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
-  ]
-
-  # ignore packages from repos whose names start with these strings
-  IGNORE_PACKAGES_FROM_REPOS = [
-    "ambari", "installed"
-  ]
-
-  # ignore required packages
-  IGNORE_PACKAGES = [
-    "epel-release"
-  ]
-
-  # ignore repos from the list of repos to be cleaned
-  IGNORE_REPOS = [
-    "ambari", "HDP-UTILS"
-  ]
-
-  # default timeout for async invoked processes
-  TIMEOUT_SECONDS = 60
-  RESULT_UNAVAILABLE = "unable_to_determine"
-
-  DEFAULT_SERVICE_NAME = "ntpd"
-  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
-
-  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
-
-  event = threading.Event()
-
-  current_umask = -1
-
-  def __init__(self, config=None):
-    self.packages = PackagesAnalyzer()
-    self.config = config
-    self.reportFileHandler = HostCheckReportFileHandler(config)
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def hadoopVarRunCount(self):
-    if not os.path.exists('/var/run/hadoop'):
-      return 0
-    pids = glob.glob('/var/run/hadoop/*/*.pid')
-    return len(pids)
-
-  def hadoopVarLogCount(self):
-    if not os.path.exists('/var/log/hadoop'):
-      return 0
-    logs = glob.glob('/var/log/hadoop/*/*.log')
-    return len(logs)
-
-  def etcAlternativesConf(self, projects, etcResults):
-    if not os.path.exists('/etc/alternatives'):
-      return []
-    projectRegex = "'" + '|'.join(projects) + "'"
-    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
-    for conf in files:
-      result = {}
-      filePath = os.path.join('/etc/alternatives', conf)
-      if os.path.islink(filePath):
-        realConf = os.path.realpath(filePath)
-        result['name'] = conf
-        result['target'] = realConf
-        etcResults.append(result)
-
-  def checkLiveServices(self, services, result):
-    osType = OSCheck.get_os_family()
-    for service in services:
-      svcCheckResult = {}
-      if isinstance(service, dict):
-        serviceName = service[osType]
-      else:
-        serviceName = service
-
-      service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
-      service_check_live[1] = serviceName
-
-      svcCheckResult['name'] = serviceName
-      svcCheckResult['status'] = "UNKNOWN"
-      svcCheckResult['desc'] = ""
-      try:
-        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
-          stderr=subprocess.PIPE)
-        out, err = osStat.communicate()
-        if 0 != osStat.returncode:
-          svcCheckResult['status'] = "Unhealthy"
-          svcCheckResult['desc'] = out
-          if len(out) == 0:
-            svcCheckResult['desc'] = err
-        else:
-          svcCheckResult['status'] = "Healthy"
-      except Exception, e:
-        svcCheckResult['status'] = "Unhealthy"
-        svcCheckResult['desc'] = repr(e)
-      result.append(svcCheckResult)
-
-  def checkUsers(self, users, results):
-    f = open('/etc/passwd', 'r')
-    for userLine in f:
-      fields = userLine.split(":")
-      if fields[0] in users:
-        result = {}
-        homeDir = fields[5]
-        result['name'] = fields[0]
-        result['homeDir'] = fields[5]
-        result['status'] = "Available"
-        if not os.path.exists(homeDir):
-          result['status'] = "Invalid home directory"
-        results.append(result)
-
-  def osdiskAvailableSpace(self, path):
-    diskInfo = {}
-    try:
-      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
-      dfdata = df.communicate()[0]
-      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
-    except:
-      pass
-    return diskInfo
-
-  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
-    foldersToIgnore = []
-    for user in existingUsers:
-      foldersToIgnore.append(user['homeDir'])
-    try:
-      for dirName in basePaths:
-        for project in projectNames:
-          path = os.path.join(dirName.strip(), project.strip())
-          if not path in foldersToIgnore and os.path.exists(path):
-            obj = {}
-            obj['type'] = self.dirType(path)
-            obj['name'] = path
-            dirs.append(obj)
-    except:
-      pass
-
-  def javaProcs(self, list):
-    try:
-      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
-      for pid in pids:
-        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
-        cmd = cmd.replace('\0', ' ')
-        if not 'AmbariServer' in cmd:
-          if 'java' in cmd:
-            dict = {}
-            dict['pid'] = int(pid)
-            dict['hadoop'] = False
-            for filter in self.PROC_FILTER:
-              if filter in cmd:
-                dict['hadoop'] = True
-            dict['command'] = cmd.strip()
-            for line in open(os.path.join('/proc', pid, 'status')):
-              if line.startswith('Uid:'):
-                uid = int(line.split()[1])
-                dict['user'] = pwd.getpwuid(uid).pw_name
-            list.append(dict)
-    except:
-      pass
-    pass
-
-  def getReposToRemove(self, repos, ignoreList):
-    reposToRemove = []
-    for repo in repos:
-      addToRemoveList = True
-      for ignoreRepo in ignoreList:
-        if self.packages.nameMatch(ignoreRepo, repo):
-          addToRemoveList = False
-          continue
-      if addToRemoveList:
-        reposToRemove.append(repo)
-    return reposToRemove
-
-  def getUMask(self):
-    if (self.current_umask == -1):
-      self.current_umask = os.umask(self.current_umask)
-      os.umask(self.current_umask)
-      return self.current_umask
-    else:
-      return self.current_umask
-
-  def getTransparentHugePage(self):
-    # This file exist only on redhat 6
-    thp_regex = "\[(.+)\]"
-    if os.path.isfile(self.THP_FILE):
-      with open(self.THP_FILE) as f:
-        file_content = f.read()
-        return re.search(thp_regex, file_content).groups()[0]
-    else:
-      return ""
-
-  def checkIptables(self):
-    return Firewall().getFirewallObject().check_iptables()
-
-  """ Return various details about the host
-  componentsMapped: indicates if any components are mapped to this host
-  commandsInProgress: indicates if any commands are in progress
-  """
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
-    dict['hostHealth'] = {}
-
-    java = []
-    self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
-
-    liveSvcs = []
-    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
-
-    dict['umask'] = str(self.getUMask())
-
-    dict['transparentHugePage'] = self.getTransparentHugePage()
-    dict['iptablesIsRunning'] = self.checkIptables()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    # If commands are in progress or components are already mapped to this host
-    # Then do not perform certain expensive host checks
-    if componentsMapped or commandsInProgress:
-      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
-      dict['installedPackages'] = []
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
-
-    else:
-      etcs = []
-      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
-      dict['alternatives'] = etcs
-
-      existingUsers = []
-      self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
-
-      dirs = []
-      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
-      dict['stackFoldersAndFiles'] = dirs
-
-      installedPackages = []
-      availablePackages = []
-      self.packages.allInstalledPackages(installedPackages)
-      self.packages.allAvailablePackages(availablePackages)
-
-      repos = []
-      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
-        self.IGNORE_PACKAGES_FROM_REPOS, repos)
-      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
-      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
-        self.ADDITIONAL_PACKAGES, installedPackages)
-      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
-      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
-
-      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
-      dict['existingRepos'] = repos
-
-      self.reportFileHandler.writeHostCheckFile(dict)
-      pass
-
-    # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
-    pass
-
-  def checkReverseLookup(self):
-    """
-    Check if host fqdn resolves to current host ip
-    """
-    try:
-      host_name = socket.gethostname()
-      host_ip = socket.gethostbyname(host_name)
-      host_fqdn = socket.getfqdn()
-      fqdn_ip = socket.gethostbyname(host_fqdn)
-      return host_ip == fqdn_ip
-    except socket.error:
-      pass
-    return False
-
-def main(argv=None):
-  h = HostInfo()
-  struct = {}
-  h.register(struct)
-  print struct
-
-
-if __name__ == '__main__':
-  main()

+ 0 - 227
ambari-agent/src/main/python/ambari_agent/HostInfo_win.py

@@ -1,227 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import time
-import subprocess
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from shell import shellRunner
-from ambari_commons.os_check import OSCheck, OSConst
-from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
-import socket
-
-logger = logging.getLogger()
-
-# OS info
-OS_VERSION = OSCheck().get_os_major_version()
-OS_TYPE = OSCheck.get_os_type()
-OS_FAMILY = OSCheck.get_os_family()
-
-class HostInfo:
-  # List of live services checked for on the host, takes a map of plan strings
-  DEFAULT_LIVE_SERVICES = [
-    {OSConst.WINSRV_FAMILY: "W32Time"}
-  ]
-
-  # Set of default users (need to be replaced with the configured user names)
-  DEFAULT_USERS = [
-    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
-    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
-    "hue", "yarn"
-  ]
-
-  # Filters used to identify processed
-  PROC_FILTER = [
-    "hadoop", "zookeeper"
-  ]
-
-  RESULT_UNAVAILABLE = "unable_to_determine"
-
-  SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
-  GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
-  GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
-
-  current_umask = -1
-
-  def __init__(self, config=None):
-    self.reportFileHandler = HostCheckReportFileHandler(config)
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def checkLiveServices(self, services, result):
-    osType = OSCheck.get_os_family()
-    for service in services:
-      svcCheckResult = {}
-      if isinstance(service, dict):
-        serviceName = service[osType]
-      else:
-        serviceName = service
-
-      service_check_live = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.SERVICE_STATUS_CMD.format(serviceName)]
-      svcCheckResult['name'] = serviceName
-      svcCheckResult['status'] = "UNKNOWN"
-      svcCheckResult['desc'] = ""
-      try:
-        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-        out, err = osStat.communicate()
-        if 0 != osStat.returncode:
-          svcCheckResult['status'] = "Unhealthy"
-          svcCheckResult['desc'] = out
-          if len(out) == 0:
-            svcCheckResult['desc'] = err
-        else:
-          svcCheckResult['status'] = "Healthy"
-      except Exception, e:
-        svcCheckResult['status'] = "Unhealthy"
-        svcCheckResult['desc'] = repr(e)
-      result.append(svcCheckResult)
-
-  #TODO get user directory
-  def checkUsers(self, users, results):
-    get_users_cmd = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_USERS_CMD]
-    try:
-      osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE,                               stderr=subprocess.PIPE)
-      out, err = osStat.communicate()
-    except:
-      raise Exception("Failed to get users.")
-    for user in out.split(os.linesep):
-      if user in users:
-        result = {}
-        result['name'] = user
-        result['status'] = "Available"
-        results.append(result)
-
-  def javaProcs(self, list):
-    try:
-      runner = shellRunner()
-      command_result = runner.run(["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_JAVA_PROC_CMD])
-      if command_result["exitCode"] == 0:
-        splitted_output = command_result["output"].split(os.linesep)
-        for i in [index for index in range(0,len(splitted_output)) if (index % 3)==0]:
-          pid = splitted_output[i]
-          cmd = splitted_output[i+1]
-          user = splitted_output[i+2]
-          if not 'AmbariServer' in cmd:
-            if 'java' in cmd:
-              dict = {}
-              dict['pid'] = int(pid)
-              dict['hadoop'] = False
-              for filter in self.PROC_FILTER:
-                if filter in cmd:
-                  dict['hadoop'] = True
-              dict['command'] = cmd.strip()
-              dict['user'] = user
-              list.append(dict)
-    except Exception as e:
-      pass
-    pass
-
-  def getUMask(self):
-    if (self.current_umask == -1):
-      self.current_umask = os.umask(self.current_umask)
-      os.umask(self.current_umask)
-      return self.current_umask
-    else:
-      return self.current_umask
-
-  def checkIptables(self):
-    out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
-    if out[0] != 0:
-      logger.warn("Unable to check firewall status:{0}".format(out[2]))
-      return False
-    profiles_status = [i for i in out[1].split("\n") if not i == ""]
-    if "1" in profiles_status:
-      return True
-    return False
-
-  """ Return various details about the host
-  componentsMapped: indicates if any components are mapped to this host
-  commandsInProgress: indicates if any commands are in progress
-  """
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
-    dict['hostHealth'] = {}
-
-    java = []
-    self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
-
-    liveSvcs = []
-    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
-
-    dict['umask'] = str(self.getUMask())
-
-    dict['iptablesIsRunning'] = self.checkIptables()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    # If commands are in progress or components are already mapped to this host
-    # Then do not perform certain expensive host checks
-    if componentsMapped or commandsInProgress:
-      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
-      dict['installedPackages'] = []
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
-    else:
-      existingUsers = []
-      self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
-      #TODO check HDP stack and folders here
-      self.reportFileHandler.writeHostCheckFile(dict)
-      pass
-
-    # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
-    pass
-
-  def checkReverseLookup(self):
-    """
-    Check if host fqdn resolves to current host ip
-    """
-    try:
-      host_name = socket.gethostname().lower()
-      host_ip = socket.gethostbyname(host_name)
-      host_fqdn = socket.getfqdn().lower()
-      fqdn_ip = socket.gethostbyname(host_fqdn)
-      return host_ip == fqdn_ip
-    except socket.error:
-      pass
-    return False
-
-def main(argv=None):
-  h = HostInfo()
-  struct = {}
-  h.register(struct)
-  print struct
-
-
-if __name__ == '__main__':
-  main()

+ 2 - 7
ambari-agent/src/main/python/ambari_agent/NetUtil.py

@@ -19,6 +19,7 @@ import logging
 import httplib
 from ssl import SSLError
 import platform
+from HeartbeatHandlers import HeartbeatStopHandlers
 
 ERROR_SSL_WRONG_VERSION = "SSLError: Failed to connect. Please check openssl library versions. \n" +\
               "Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details."
@@ -47,13 +48,7 @@ class NetUtil:
 
   def __init__(self, stop_callback=None):
     if stop_callback is None:
-      IS_WINDOWS = platform.system() == "Windows"
-      if IS_WINDOWS:
-        from HeartbeatHandlers_windows import HeartbeatStopHandler
-      else:
-        from HeartbeatStopHandler_linux import HeartbeatStopHandler
-      stop_callback = HeartbeatStopHandler()
-
+      stop_callback = HeartbeatStopHandlers()
     self.stopCallback = stop_callback
 
   def checkURL(self, url):

+ 0 - 1
ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py

@@ -20,7 +20,6 @@ limitations under the License.
 
 import os
 import logging
-import pwd
 import shell
 import subprocess
 from threading import Thread

+ 2 - 2
ambari-agent/src/main/python/ambari_agent/PythonExecutor.py

@@ -27,7 +27,7 @@ import platform
 from threading import Thread
 import time
 from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
-
+from ambari_commons.os_check import OSConst, OSCheck
 from Grep import Grep
 import shell, sys
 
@@ -141,7 +141,7 @@ class PythonExecutor:
     Creates subprocess with given parameters. This functionality was moved to separate method
     to make possible unit testing
     """
-    close_fds = None if platform.system() == "Windows" else True
+    close_fds = None if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else True
     return subprocess.Popen(command,
       stdout=tmpout,
       stderr=tmperr, close_fds=close_fds)

+ 7 - 11
ambari-agent/src/main/python/ambari_agent/main.py

@@ -36,23 +36,19 @@ from PingPortListener import PingPortListener
 import hostname
 from DataCleaner import DataCleaner
 import socket
+from ambari_commons import OSConst, OSCheck
 from ambari_agent import shell
+import HeartbeatHandlers
+from HeartbeatHandlers import bind_signal_handlers
 logger = logging.getLogger()
 
 formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
 agentPid = os.getpid()
 config = AmbariConfig.AmbariConfig()
-configFile = config.CONFIG_FILE
+configFile = config.getConfigFile()
 two_way_ssl_property = config.TWO_WAY_SSL_PROPERTY
 
-IS_WINDOWS = platform.system() == "Windows"
 
-if IS_WINDOWS:
-  from HeartbeatHandlers_windows import bind_signal_handlers
-else:
-  from HeartbeatStopHandler_linux import bind_signal_handlers
-  from HeartbeatStopHandler_linux import signal_handler
-  from HeartbeatStopHandler_linux import debug
 
 def setup_logging(verbose):
   formatter = logging.Formatter(formatstr)
@@ -119,7 +115,7 @@ def perform_prestart_checks(expected_hostname):
       logger.error(msg)
       sys.exit(1)
   # Check if there is another instance running
-  if os.path.isfile(ProcessHelper.pidfile) and not IS_WINDOWS:
+  if os.path.isfile(ProcessHelper.pidfile) and not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
     print("%s already exists, exiting" % ProcessHelper.pidfile)
     sys.exit(1)
   # check if ambari prefix exists
@@ -232,7 +228,7 @@ def main(heartbeat_stop_callback=None):
 
   perform_prestart_checks(expected_hostname)
 
-  if not IS_WINDOWS:
+  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
     daemonize()
 
   # Starting ping port listener
@@ -265,7 +261,7 @@ def main(heartbeat_stop_callback=None):
     controller = Controller(config, heartbeat_stop_callback)
     controller.start()
     controller.join()
-  if not IS_WINDOWS:
+  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
     stop_agent()
   logger.info("finished")
 

+ 2 - 2
ambari-agent/src/test/python/ambari_agent/TestActionQueue.py

@@ -36,7 +36,7 @@ from ambari_agent.PythonExecutor import PythonExecutor
 from ambari_agent.CommandStatusDict import CommandStatusDict
 from ambari_agent.ActualConfigHandler import ActualConfigHandler
 from FileCache import FileCache
-
+import ambari_commons.os_check
 
 class TestActionQueue(TestCase):
   def setUp(self):
@@ -275,7 +275,7 @@ class TestActionQueue(TestCase):
     actionQueue.process_command(execution_command)
     self.assertTrue(print_exc_mock.called)
 
-
+  @patch.object(ambari_commons.os_check,"os_distribution", new=lambda: ('Suse','11','Final'))
   @patch("__builtin__.open")
   @patch.object(ActionQueue, "status_update_callback")
   def test_execute_command(self, status_update_callback_mock, open_mock):

+ 5 - 5
ambari-agent/src/test/python/ambari_agent/TestHardware.py

@@ -26,7 +26,7 @@ import platform
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
   from ambari_agent import hostname
   from ambari_agent.Hardware import Hardware
-  from ambari_agent.Facter import Facter
+  from ambari_agent.Facter import Facter, FacterLinux
   from ambari_commons import OSCheck
 
 @patch.object(platform,"linux_distribution", new = ('Suse','11','Final'))
@@ -87,7 +87,7 @@ class TestHardware(TestCase):
     self.assertEquals(result, None)
 
   @patch.object(hostname,"hostname")
-  @patch.object(Facter, "getFqdn")
+  @patch.object(FacterLinux, "getFqdn")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_fqdnDomainHostname(self, get_os_version_mock, get_os_type_mock, facter_getFqdn_mock, hostname_mock):
@@ -101,7 +101,7 @@ class TestHardware(TestCase):
     self.assertEquals(result['domain'], "apache.org")
     self.assertEquals(result['fqdn'], (result['hostname'] + '.' + result['domain']))
 
-  @patch.object(Facter, "setDataUpTimeOutput")
+  @patch.object(FacterLinux, "setDataUpTimeOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_uptimeSecondsHoursDays(self, get_os_version_mock, get_os_type_mock, facter_setDataUpTimeOutput_mock):
@@ -115,7 +115,7 @@ class TestHardware(TestCase):
     self.assertEquals(result['uptime_hours'], '73')
     self.assertEquals(result['uptime_days'], '3')
 
-  @patch.object(Facter, "setMemInfoOutput")
+  @patch.object(FacterLinux, "setMemInfoOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_facterMemInfoOutput(self, get_os_version_mock, get_os_type_mock, facter_setMemInfoOutput_mock):
@@ -141,7 +141,7 @@ SwapFree:        1598676 kB
     self.assertEquals(result['swapsize'], '2.04 GB')
     self.assertEquals(result['swapfree'], '1.52 GB')
 
-  @patch.object(Facter, "setDataIfConfigOutput")
+  @patch.object(FacterLinux, "setDataIfConfigOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_facterDataIfConfigOutput(self, get_os_version_mock, get_os_type_mock, facter_setDataIfConfigOutput_mock):

+ 4 - 4
ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py

@@ -34,7 +34,7 @@ with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
   from ambari_agent.LiveStatus import LiveStatus
   from ambari_agent import AmbariConfig
   from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
-  from ambari_agent.HostInfo import HostInfo
+  from ambari_agent.HostInfo import HostInfoLinux
 
 
 class TestHeartbeat(TestCase):
@@ -74,7 +74,7 @@ class TestHeartbeat(TestCase):
 
 
   @patch.object(ActionQueue, "result")
-  @patch.object(HostInfo, "register")
+  @patch.object(HostInfoLinux, "register")
   def test_no_mapping(self, register_mock, result_mock):
     result_mock.return_value = {
       'reports': [{'status': 'IN_PROGRESS',
@@ -194,7 +194,7 @@ class TestHeartbeat(TestCase):
     self.assertEquals(hb, expected)
 
 
-  @patch.object(HostInfo, 'register')
+  @patch.object(HostInfoLinux, 'register')
   def test_heartbeat_no_host_check_cmd_in_queue(self, register_mock):
     config = AmbariConfig.AmbariConfig().getConfig()
     config.set('agent', 'prefix', 'tmp')
@@ -219,7 +219,7 @@ class TestHeartbeat(TestCase):
     self.assertFalse(args[1])
 
 
-  @patch.object(HostInfo, 'register')
+  @patch.object(HostInfoLinux, 'register')
   def test_heartbeat_host_check_no_cmd(self, register_mock):
     config = AmbariConfig.AmbariConfig().getConfig()
     config.set('agent', 'prefix', 'tmp')

+ 42 - 38
ambari-agent/src/test/python/ambari_agent/TestHostInfo.py

@@ -27,15 +27,17 @@ import socket
 from mock.mock import patch
 from mock.mock import MagicMock
 from mock.mock import create_autospec
+import ambari_commons
 
 with patch("platform.linux_distribution", return_value = ('redhat','11','Final')):
   from ambari_agent.HostCheckReportFileHandler import HostCheckReportFileHandler
   from ambari_agent.PackagesAnalyzer import PackagesAnalyzer
-  from ambari_agent.HostInfo import HostInfo
+  from ambari_agent.HostInfo import HostInfo, HostInfoLinux
   from ambari_agent.Hardware import Hardware
   from ambari_agent.AmbariConfig import AmbariConfig
   from resource_management.core.system import System
   from ambari_commons import OSCheck, Firewall, FirewallChecks ,OSConst
+  import ambari_commons
 
 @patch.object(System, "os_family", new = 'redhat')
 class TestHostInfo(TestCase):
@@ -74,7 +76,7 @@ class TestHostInfo(TestCase):
   def test_getReposToRemove(self):
     l1 = ["Hortonworks Data Platform Utils Version - HDP-UTILS-1.1.0.15", "Ambari 1.x", "HDP"]
     l2 = ["Ambari", "HDP-UTIL"]
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     l3 = hostInfo.getReposToRemove(l1, l2)
     self.assertTrue(1, len(l3))
     self.assertEqual(l3[0], "HDP")
@@ -208,7 +210,7 @@ class TestHostInfo(TestCase):
   @patch('os.path.exists')
   def test_checkFolders(self, path_mock):
     path_mock.return_value = True
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     results = []
     existingUsers = [{'name':'a1', 'homeDir':'/home/a1'}, {'name':'b1', 'homeDir':'/home/b1'}]
     hostInfo.checkFolders(["/etc/conf", "/var/lib", "/home/"], ["a1", "b1"], existingUsers, results)
@@ -217,15 +219,16 @@ class TestHostInfo(TestCase):
     for item in ['/etc/conf/a1', '/var/lib/a1', '/etc/conf/b1', '/var/lib/b1']:
       self.assertTrue(item in names)
 
+  @patch("ambari_commons.os_check.os_distribution", new=MagicMock(return_value=('redhat','11','Final')))
   @patch('os.path.exists')
   @patch('__builtin__.open')
   def test_checkUsers(self, builtins_open_mock, path_mock):
     builtins_open_mock.return_value = [
       "hdfs:x:493:502:Hadoop HDFS:/usr/lib/hadoop:/bin/bash",
       "zookeeper:x:492:502:ZooKeeper:/var/run/zookeeper:/bin/bash"]
-    path_mock.side_effect = [True, False]
+    path_mock.side_effect = [False, True, False]
 
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     results = []
     hostInfo.checkUsers(["zookeeper", "hdfs"], results)
     self.assertEqual(2, len(results))
@@ -236,7 +239,7 @@ class TestHostInfo(TestCase):
     self.assertTrue(newlist[1]['homeDir'], "/var/run/zookeeper")
     self.assertTrue(newlist[0]['status'], "Available")
     self.assertTrue(newlist[1]['status'], "Invalid home directory")
-
+    print(path_mock.mock_calls)
 
   @patch.object(OSCheck, "get_os_type")
   @patch('os.umask')
@@ -247,14 +250,14 @@ class TestHostInfo(TestCase):
   @patch.object(PackagesAnalyzer, 'getInstalledPkgsByNames')
   @patch.object(PackagesAnalyzer, 'getInstalledPkgsByRepo')
   @patch.object(PackagesAnalyzer, 'getInstalledRepos')
-  @patch.object(HostInfo, 'checkUsers')
-  @patch.object(HostInfo, 'checkLiveServices')
-  @patch.object(HostInfo, 'javaProcs')
-  @patch.object(HostInfo, 'checkFolders')
-  @patch.object(HostInfo, 'etcAlternativesConf')
-  @patch.object(HostInfo, 'hadoopVarRunCount')
-  @patch.object(HostInfo, 'hadoopVarLogCount')
-  @patch.object(HostInfo, 'checkIptables')
+  @patch.object(HostInfoLinux, 'checkUsers')
+  @patch.object(HostInfoLinux, 'checkLiveServices')
+  @patch.object(HostInfoLinux, 'javaProcs')
+  @patch.object(HostInfoLinux, 'checkFolders')
+  @patch.object(HostInfoLinux, 'etcAlternativesConf')
+  @patch.object(HostInfoLinux, 'hadoopVarRunCount')
+  @patch.object(HostInfoLinux, 'hadoopVarLogCount')
+  @patch.object(HostInfoLinux, 'checkIptables')
   def test_hostinfo_register_suse(self, cit_mock, hvlc_mock, hvrc_mock, eac_mock, cf_mock, jp_mock,
                              cls_mock, cu_mock, gir_mock, gipbr_mock, gipbn_mock,
                              gpd_mock, aip_mock, aap_mock, whcf_mock, os_umask_mock, get_os_type_mock):
@@ -266,7 +269,7 @@ class TestHostInfo(TestCase):
     gpd_mock.return_value = ["pkg1", "pkg2"]
     get_os_type_mock.return_value = "suse"
 
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     dict = {}
     hostInfo.register(dict, False, False)
     self.assertTrue(cit_mock.called)
@@ -290,15 +293,15 @@ class TestHostInfo(TestCase):
   @patch.object(PackagesAnalyzer, 'getInstalledPkgsByNames')
   @patch.object(PackagesAnalyzer, 'getInstalledPkgsByRepo')
   @patch.object(PackagesAnalyzer, 'getInstalledRepos')
-  @patch.object(HostInfo, 'checkUsers')
-  @patch.object(HostInfo, 'checkLiveServices')
-  @patch.object(HostInfo, 'javaProcs')
-  @patch.object(HostInfo, 'checkFolders')
-  @patch.object(HostInfo, 'etcAlternativesConf')
-  @patch.object(HostInfo, 'hadoopVarRunCount')
-  @patch.object(HostInfo, 'hadoopVarLogCount')
-  @patch.object(HostInfo, 'checkIptables')
-  @patch.object(HostInfo, 'getTransparentHugePage')
+  @patch.object(HostInfoLinux, 'checkUsers')
+  @patch.object(HostInfoLinux, 'checkLiveServices')
+  @patch.object(HostInfoLinux, 'javaProcs')
+  @patch.object(HostInfoLinux, 'checkFolders')
+  @patch.object(HostInfoLinux, 'etcAlternativesConf')
+  @patch.object(HostInfoLinux, 'hadoopVarRunCount')
+  @patch.object(HostInfoLinux, 'hadoopVarLogCount')
+  @patch.object(HostInfoLinux, 'checkIptables')
+  @patch.object(HostInfoLinux, 'getTransparentHugePage')
   def test_hostinfo_register(self, get_transparentHuge_page_mock, cit_mock, hvlc_mock, hvrc_mock, eac_mock, cf_mock, jp_mock,
                              cls_mock, cu_mock, gir_mock, gipbr_mock, gipbn_mock,
                              gpd_mock, aip_mock, aap_mock, whcf_mock, os_umask_mock, get_os_type_mock):
@@ -310,7 +313,7 @@ class TestHostInfo(TestCase):
     gpd_mock.return_value = ["pkg1", "pkg2"]
     get_os_type_mock.return_value = "redhat"
 
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     dict = {}
     hostInfo.register(dict, True, True)
     self.verifyReturnedValues(dict)
@@ -323,7 +326,7 @@ class TestHostInfo(TestCase):
     self.assertTrue(os_umask_mock.call_count == 2)
 
     cit_mock.reset_mock()
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     dict = {}
     hostInfo.register(dict, False, False)
     self.assertTrue(gir_mock.called)
@@ -339,7 +342,7 @@ class TestHostInfo(TestCase):
       self.assertTrue(existingPkg in args[1])
 
   def verifyReturnedValues(self, dict):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     self.assertEqual(dict['alternatives'], [])
     self.assertEqual(dict['stackFoldersAndFiles'], [])
     self.assertEqual(dict['existingUsers'], [])
@@ -352,7 +355,7 @@ class TestHostInfo(TestCase):
   @patch("os.path.isdir")
   @patch("os.path.isfile")
   def test_dirType(self, os_path_isfile_mock, os_path_isdir_mock, os_path_islink_mock, os_path_exists_mock):
-    host = HostInfo()
+    host = HostInfoLinux()
 
     os_path_exists_mock.return_value = False
     result = host.dirType("/home")
@@ -387,7 +390,7 @@ class TestHostInfo(TestCase):
   @patch("os.path.exists")
   @patch("glob.glob")
   def test_hadoopVarRunCount(self, glob_glob_mock, os_path_exists_mock):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
 
     os_path_exists_mock.return_value = True
     glob_glob_mock.return_value = ['pid1','pid2','pid3']
@@ -402,7 +405,7 @@ class TestHostInfo(TestCase):
   @patch("os.path.exists")
   @patch("glob.glob")
   def test_hadoopVarLogCount(self, glob_glob_mock, os_path_exists_mock):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
 
     os_path_exists_mock.return_value = True
     glob_glob_mock.return_value = ['log1','log2']
@@ -413,12 +416,12 @@ class TestHostInfo(TestCase):
     result = hostInfo.hadoopVarLogCount()
     self.assertEquals(result, 0)
 
-
+  @patch("ambari_commons.os_check.os_distribution", new=MagicMock(return_value=('redhat','11','Final')))
   @patch("os.listdir", create=True, autospec=True)
   @patch("__builtin__.open", create=True, autospec=True)
   @patch("pwd.getpwuid", create=True, autospec=True)
   def test_javaProcs(self, pwd_getpwuid_mock, buitin_open_mock, os_listdir_mock):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     openRead = MagicMock()
     openRead.read.return_value = '/java/;/hadoop/'
     buitin_open_mock.side_effect = [openRead, ['Uid: 22']]
@@ -438,7 +441,7 @@ class TestHostInfo(TestCase):
   @patch("subprocess.Popen")
   @patch.object(Hardware, 'extractMountInfo')
   def test_osdiskAvailableSpace(self, extract_mount_info_mock, subproc_popen_mock):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     p = MagicMock()
     p.communicate.return_value = ['some']
     subproc_popen_mock.return_value = p
@@ -456,7 +459,7 @@ class TestHostInfo(TestCase):
   @patch.object(OSCheck, "get_os_type")
   @patch("subprocess.Popen")
   def test_checkLiveServices(self, subproc_popen, get_os_type_method):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     p = MagicMock()
     p.returncode = 0
     p.communicate.return_value = ('', 'err')
@@ -496,13 +499,13 @@ class TestHostInfo(TestCase):
     self.assertEquals(result[0]['name'], 'service1')
     self.assertTrue(len(result[0]['desc']) > 0)
 
-
+  @patch("ambari_commons.os_check.os_distribution", new=MagicMock(return_value=('redhat','11','Final')))
   @patch("os.path.exists")
   @patch("os.listdir", create=True, autospec=True)
   @patch("os.path.islink")
   @patch("os.path.realpath")
   def test_etcAlternativesConf(self, os_path_realpath_mock, os_path_islink_mock, os_listdir_mock, os_path_exists_mock):
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
     os_path_exists_mock.return_value = False
     result = hostInfo.etcAlternativesConf('',[])
 
@@ -537,7 +540,7 @@ class TestHostInfo(TestCase):
     gethostbyname_mock.side_effect = ["123.123.123.123", "123.123.123.123"]
     getfqdn_mock.return_value = "test.example.com"
 
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
 
     self.assertTrue(hostInfo.checkReverseLookup())
     gethostbyname_mock.assert_any_call("test.example.com")
@@ -564,6 +567,7 @@ class TestHostInfo(TestCase):
     run_os_command_mock.return_value = 3, "", ""
     self.assertFalse(Firewall().getFirewallObject().check_iptables())
 
+  @patch("ambari_commons.os_check.os_distribution", new=MagicMock(return_value=('redhat','11','Final')))
   @patch("os.path.isfile")
   @patch('__builtin__.open')
   def test_transparent_huge_page(self, open_mock, os_path_isfile_mock):
@@ -577,7 +581,7 @@ class TestHostInfo(TestCase):
     setattr( context_manager_mock, '__enter__', enter_mock )
     setattr( context_manager_mock, '__exit__', exit_mock )
 
-    hostInfo = HostInfo()
+    hostInfo = HostInfoLinux()
 
     os_path_isfile_mock.return_value = True
     self.assertEqual("never", hostInfo.getTransparentHugePage())

+ 7 - 7
ambari-agent/src/test/python/ambari_agent/TestMain.py

@@ -37,7 +37,7 @@ with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
   from ambari_agent.PingPortListener import PingPortListener
   from ambari_agent.Controller import Controller
   from ambari_agent.DataCleaner import DataCleaner
-
+  import ambari_agent.HeartbeatHandlers as HeartbeatHandlers
 
 class TestMain(unittest.TestCase):
 
@@ -52,7 +52,7 @@ class TestMain(unittest.TestCase):
     sys.stdout = sys.__stdout__
 
 
-  @patch("ambari_agent.HeartbeatStopHandler_linux")
+  @patch("ambari_agent.HeartbeatHandlers.HeartbeatStopHandlersLinux")
   @patch("os._exit")
   @patch("os.getpid")
   @patch.object(ProcessHelper, "stopAgent")
@@ -60,13 +60,13 @@ class TestMain(unittest.TestCase):
     # testing exit of children
     main.agentPid = 4444
     os_getpid_mock.return_value = 5555
-    main.signal_handler("signum", "frame")
+    HeartbeatHandlers.signal_handler("signum", "frame")
     heartbeat_handler_mock.set_stop.assert_called()
     os_exit_mock.reset_mock()
 
     # testing exit of main process
     os_getpid_mock.return_value = main.agentPid
-    main.signal_handler("signum", "frame")
+    HeartbeatHandlers.signal_handler("signum", "frame")
     heartbeat_handler_mock.set_stop.assert_called()
 
 
@@ -123,10 +123,10 @@ class TestMain(unittest.TestCase):
   def test_bind_signal_handlers(self, signal_mock):
     main.bind_signal_handlers(os.getpid())
     # Check if on SIGINT/SIGTERM agent is configured to terminate
-    signal_mock.assert_any_call(signal.SIGINT, main.signal_handler)
-    signal_mock.assert_any_call(signal.SIGTERM, main.signal_handler)
+    signal_mock.assert_any_call(signal.SIGINT, HeartbeatHandlers.signal_handler)
+    signal_mock.assert_any_call(signal.SIGTERM, HeartbeatHandlers.signal_handler)
     # Check if on SIGUSR1 agent is configured to fall into debug
-    signal_mock.assert_any_call(signal.SIGUSR1, main.debug)
+    signal_mock.assert_any_call(signal.SIGUSR1, HeartbeatHandlers.debug)
 
 
   @patch("os.path.exists")

+ 0 - 1
ambari-agent/src/test/python/ambari_agent/TestRegistration.py

@@ -27,7 +27,6 @@ from mock.mock import MagicMock
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
   from ambari_agent.Register import Register
   from ambari_agent.AmbariConfig import AmbariConfig
-  from ambari_agent.HostInfo import HostInfo
   from ambari_commons import OSCheck, Firewall, FirewallChecks
 
 class TestRegistration(TestCase):

+ 84 - 108
ambari-common/src/main/python/ambari_commons/os_check.py

@@ -21,6 +21,37 @@ limitations under the License.
 import os
 import sys
 import platform
+import ctypes
+
+class _OSVERSIONINFOEXW(ctypes.Structure):
+  _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
+              ('dwMajorVersion', ctypes.c_ulong),
+              ('dwMinorVersion', ctypes.c_ulong),
+              ('dwBuildNumber', ctypes.c_ulong),
+              ('dwPlatformId', ctypes.c_ulong),
+              ('szCSDVersion', ctypes.c_wchar*128),
+              ('wServicePackMajor', ctypes.c_ushort),
+              ('wServicePackMinor', ctypes.c_ushort),
+              ('wSuiteMask', ctypes.c_ushort),
+              ('wProductType', ctypes.c_byte),
+              ('wReserved', ctypes.c_byte)]
+
+VER_NT_WORKSTATION = 1
+VER_NT_DOMAIN_CONTROLLER = 2
+VER_NT_SERVER = 3
+
+def _get_windows_version():
+  """
+  Get's the OS major and minor versions.  Returns a tuple of
+  (OS_MAJOR, OS_MINOR).
+  """
+  os_version = _OSVERSIONINFOEXW()
+  os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
+  retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
+  if retcode != 0:
+    raise Exception("Failed to get OS version")
+
+  return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber, os_version.wProductType
 
 # path to resources dir
 RESOURCES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources")
@@ -30,45 +61,48 @@ OSFAMILY_JSON_RESOURCE = "os_family.json"
 JSON_OS_TYPE = "distro"
 JSON_OS_VERSION = "versions"
 
-
-def linux_distribution():
-  PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
-
-  if PYTHON_VER < 26:
-    (distname, version, id)  = platform.dist()
-  elif os.path.exists('/etc/redhat-release'):
-    (distname, version, id)  = platform.dist()
+#windows family constants
+SYSTEM_WINDOWS = "Windows"
+REL_2008 = "win2008server"
+REL_2008R2 = "win2008serverr2"
+REL_2012 = "win2012server"
+REL_2012R2 = "win2012serverr2"
+
+def os_distribution():
+  if platform.system() == SYSTEM_WINDOWS:
+    # windows distribution
+    major, minor, build, code = _get_windows_version()
+    if code in (VER_NT_DOMAIN_CONTROLLER, VER_NT_SERVER):
+      # we are on server os
+      release = None
+      if major == 6:
+        if minor == 0:
+          release = REL_2008
+        elif minor == 1:
+          release = REL_2008R2
+        elif minor == 2:
+          release = REL_2012
+        elif minor == 3:
+          release = REL_2012R2
+      distribution = (release, "{0}.{1}".format(major,minor),"WindowsServer")
+    else:
+      # we are on unsupported desktop os
+      distribution = ("", "","")
   else:
-    (distname, version, id) = platform.linux_distribution()
-
-  return (platform.system(), os.name, distname, version, id)
+    # linux distribution
+    PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
 
-def windows_distribution():
-  from os_windows import get_windows_version
-
-  # Only support Windows Server 64 bit
-  (win_release, win_version, win_csd, win_ptype) = platform.win32_ver()
+    if PYTHON_VER < 26:
+      distribution = platform.dist()
+    elif os.path.exists('/etc/redhat-release'):
+      distribution = platform.dist()
+    else:
+      distribution = platform.linux_distribution()
 
-  if win_version.startswith("6.2."):
-    # win32_ver() doesn't work correctly for Windows Server 2012 R2 and Windows 8.1
-    (win_ver_major, win_ver_minor, win_ver_build) = get_windows_version()
-    if win_ver_major == 6 and win_ver_minor == 3:
-      win_release = "2012ServerR2"
-      win_version = "%d.%d.%d" % (win_ver_major, win_ver_minor, win_ver_build)
+  return distribution
 
-  #if win_version
-  return (platform.system(), os.name, "win" + win_release, win_version, win_ptype)
 
 class OS_CONST_TYPE(type):
-  # os platforms
-  LINUX_OS = 'linux'
-  WINDOWS_OS = 'windows'
-
-  # os families
-  REDHAT_FAMILY = 'redhat'
-  DEBIAN_FAMILY = 'debian'
-  SUSE_FAMILY = 'suse'
-  WINSRV_FAMILY = 'winsrv'
 
   # Declare here os type mapping
   OS_FAMILY_COLLECTION = []
@@ -81,8 +115,7 @@ class OS_CONST_TYPE(type):
       Initialize internal data structures from file
     """
     try:
-      fpath = os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE)
-      f = open(fpath)
+      f = open(os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE))
       json_data = eval(f.read())
       f.close()
       for family in json_data:
@@ -93,7 +126,7 @@ class OS_CONST_TYPE(type):
           'os_list': json_data[family][JSON_OS_TYPE]
         }]
     except:
-      raise Exception("Couldn't load '%s' file" % fpath)
+      raise Exception("Couldn't load '%s' file" % OSFAMILY_JSON_RESOURCE)
 
   def __init__(cls, name, bases, dct):
     cls.initialize_data()
@@ -113,18 +146,6 @@ class OS_CONST_TYPE(type):
       return name[:-7]
     raise Exception("Unknown class property '%s'" % name)
 
-def get_os_distribution():
-  if platform.system() == 'Windows':
-    dist = windows_distribution()
-  else:
-    if platform.system() == 'Mac':
-      raise Exception("MacOS not supported. Exiting...")
-    else:
-      # Linux
-      # Read content from /etc/*-release file
-      # Full release name
-      dist = linux_distribution()
-  return dist
 
 class OSConst:
   __metaclass__ = OS_CONST_TYPE
@@ -132,25 +153,10 @@ class OSConst:
 
 class OSCheck:
 
-  @staticmethod
-  def get_os_os():
-    """
-    Return values:
-    windows, linux
-
-    In case cannot detect - exit.
-    """
-    # Read content from /etc/*-release file
-    # Full release name
-    os_os = get_os_distribution()[0].lower()
-
-    return os_os
-
   @staticmethod
   def get_os_type():
     """
     Return values:
-    win2008server, win2012server,
     redhat, fedora, centos, oraclelinux, ascendos,
     amazon, xenserver, oel, ovs, cloudlinux, slc, scientific, psbm,
     ubuntu, debian, sles, sled, opensuse, suse ... and others
@@ -159,7 +165,8 @@ class OSCheck:
     """
     # Read content from /etc/*-release file
     # Full release name
-    operatingSystem  = get_os_distribution()[2].lower()
+    dist = os_distribution()
+    operatingSystem = dist[0].lower()
 
     # special cases
     if os.path.exists('/etc/oracle-release'):
@@ -197,7 +204,10 @@ class OSCheck:
 
     In case cannot detect raises exception.
     """
-    dist = get_os_distribution()[3]
+    # Read content from /etc/*-release file
+    # Full release name
+    dist = os_distribution()
+    dist = dist[1]
 
     if dist:
       return dist
@@ -220,7 +230,8 @@ class OSCheck:
 
     In case cannot detect raises exception.
     """
-    dist = get_os_distribution()[4].lower()
+    dist = os_distribution()
+    dist = dist[2].lower()
 
     if dist:
       return dist
@@ -272,64 +283,29 @@ class OSCheck:
     return False
 
   @staticmethod
-  def is_windows_family():
-    """
-     Return true if it is so or false if not
-
-     This is safe check for windows family, doesn't generate exception
-    """
-    try:
-      if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
-        return True
-    except Exception:
-      pass
-    return False
-
-  @staticmethod
-  def is_linux_os():
-    """
-     Return true if it is so or false if not
-
-     This is safe check for linux os, doesn't generate exception
-    """
-    try:
-      if OSCheck.get_os_os() == OSConst.LINUX_OS:
-        return True
-    except Exception:
-      pass
-    return False
-
-  @staticmethod
-  def is_windows_os():
+  def is_redhat7():
     """
      Return true if it is so or false if not
 
-     This is safe check for windows os, doesn't generate exception
+     This is safe check for redhat7 , doesn't generate exception
     """
     try:
-      if OSCheck.get_os_os() == OSConst.WINDOWS_OS:
+      ostemp=OSCheck.get_os_family()+OSCheck().get_os_major_version()
+      if ostemp == 'redhat7':
         return True
     except Exception:
       pass
     return False
 
   @staticmethod
-  def is_redhat7():
+  def is_windows_family():
     """
      Return true if it is so or false if not
 
-     This is safe check for redhat7 , doesn't generate exception
+     This is safe check for winsrv , doesn't generate exception
     """
     try:
-      ostemp=OSCheck.get_os_family()+OSCheck().get_os_major_version()
-      if ostemp == 'redhat7':
-        return True
+      return OSCheck.get_os_family() == OSConst.WINSRV_FAMILY
     except Exception:
       pass
     return False
-
-# OS info
-OS_VERSION = OSCheck().get_os_major_version()
-OS_TYPE = OSCheck.get_os_type()
-OS_FAMILY = OSCheck.get_os_family()
-OS_OS = OSCheck.get_os_os()

+ 64 - 0
ambari-common/src/main/python/ambari_commons/os_family_impl.py

@@ -0,0 +1,64 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import types
+from os_check import OSCheck
+
+
+class OsFamilyImpl(object):
+  """
+  Base class for os depended factory. Usage::
+
+      class BaseFoo(object): pass
+      @Factory("windows")
+      class OsFoo(object):pass
+      print BaseFoo()# OsFoo
+
+  """
+
+  DEFAULT = "default"
+  """
+  constant for default implementation
+  """
+
+  def __init__(self, base_cls=None, os_family=None):
+    self.base_cls = base_cls
+    self.os_const = os_family
+
+
+  def __call__(self, cls):
+    if self.base_cls:
+      base_cls = self.base_cls
+    else:
+      base_cls = cls.__bases__[0]
+
+    if not hasattr(base_cls, "_impls"):
+      base_cls._impls = {}
+
+    base_cls._impls[self.os_const] = cls
+
+    def new(cls, *args, **kwargs):
+      if OSCheck.get_os_family() in cls._impls:
+        os_impl_cls = cls._impls[OSCheck.get_os_family()]
+      else:
+        os_impl_cls = cls._impls[OsFamilyImpl.DEFAULT]
+      return object.__new__(os_impl_cls)
+
+    base_cls.__new__ = types.MethodType(new, base_cls)
+
+    return cls

+ 2 - 2
ambari-common/src/main/python/ambari_commons/os_utils.py

@@ -23,7 +23,7 @@ import string
 
 from os_check import *
 
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   from os_windows import *
 else:
   # MacOS not supported
@@ -99,4 +99,4 @@ def set_open_files_limit(maxOpenFiles):
   os_set_open_files_limit(maxOpenFiles)
 
 def get_password(prompt):
-  return os_getpass(prompt)
+  return os_getpass(prompt)

+ 1 - 1
ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py

@@ -25,7 +25,7 @@ import datetime
 from resource_management.core import shell
 from ambari_commons import os_check
 def get_unique_id_and_date():
-  if os_check.OSCheck.is_windows_os():
+  if os_check.OSCheck.is_windows_family():
     from ambari_commons.os_windows import run_os_command
     code, out, err = run_os_command("cmd /c vol C:")
     for line in out.splitlines():

+ 1 - 1
ambari-server/src/main/python/ambari-server.py

@@ -763,7 +763,7 @@ def check_reverse_lookup():
     host_fqdn = socket.getfqdn().lower()
     fqdn_ip = socket.gethostbyname(host_fqdn)
     return host_ip == fqdn_ip
-  except socket.herror:
+  except socket.error:
     pass
   return False
 

+ 2 - 2
ambari-server/src/main/python/ambari_server/dbConfiguration.py

@@ -92,7 +92,7 @@ class DBMSConfig(object):
   # dbId = additional information, that helps distinguish between various database connections
   #   (Ambari vs. Metrics is a prime example)
   def create(options, properties, dbId = "Ambari"):
-    #if OSCheck.is_windows_os():
+    #if OSCheck.is_windows_family():
     if dbId == "Ambari":
       return SQLServerAmbariDBConfig(options, properties)
     elif dbId == "Metrics":
@@ -207,7 +207,7 @@ class DBMSConfig(object):
   def ensure_dbms_is_running(self, options, properties, scmStatus=None):
     pass
 
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   from ambari_server.dbConfiguration_windows import SQLServerAmbariDBConfig, SQLServerMetricsDBConfig
 #else:
 #  from ambari_server.dbConfiguration_linux import PostgreSQLConfig #and potentially MySQLConfig, OracleConfig

+ 1 - 1
ambari-server/src/main/python/ambari_server/serverConfiguration.py

@@ -26,7 +26,7 @@ from ambari_commons.os_utils import *
 from ambari_commons.logging_utils import print_warning_msg, print_info_msg, print_error_msg
 from properties import Properties
 
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   from serverConfiguration_windows import *
 else:
   # MacOS not supported

+ 6 - 3
ambari-server/src/main/python/ambari_server/serverSetup.py

@@ -30,12 +30,15 @@ from setupSecurity import adjust_directory_permissions, get_is_secure, store_pas
 from userInput import *
 from utils import *
 
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   from serverSetup_windows import *
 else:
   # MacOS not supported
   from serverSetup_linux import *
 
+OS_VERSION = OSCheck().get_os_major_version()
+OS_TYPE = OSCheck.get_os_type()
+OS_FAMILY = OSCheck.get_os_family()
 
 JDK_INDEX = 0
 
@@ -179,7 +182,7 @@ def download_and_install_jdk(args):
       )
 
       java_bin = "java"
-      if OSCheck.is_windows_os():
+      if OSCheck.is_windows_family():
         java_bin = "java.exe"
 
       if jdk_num == str(custom_jdk_number):
@@ -326,7 +329,7 @@ def configure_os_settings():
   except (KeyError):
     print_error_msg("os_type is not set in the properties file. Setting it now.")
 
-  if OSCheck.is_windows_os():
+  if OSCheck.is_windows_family():
     master_os_type = OS_TYPE + OS_VERSION
   else:
     # MacOS not supported

+ 2 - 2
ambari-server/src/main/python/ambari_server/setupSecurity.py

@@ -42,7 +42,7 @@ GET_CRT_INFO_CMD = 'openssl x509 -dates -subject -in {0}'
 
 #keytool commands
 keytool_bin = "keytool"
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   keytool_bin = "keytool.exe"
 
 KEYTOOL_IMPORT_CERT_CMD = "{0}" + os.sep + "bin" + os.sep + keytool_bin + " -import -alias '{1}' -storetype '{2}' -file '{3}' -storepass '{4}' -noprompt"
@@ -50,7 +50,7 @@ KEYTOOL_DELETE_CERT_CMD = "{0}" + os.sep + "bin" + os.sep + keytool_bin + " -del
 KEYTOOL_KEYSTORE = " -keystore '{0}'"
 
 java_bin = "java"
-if OSCheck.is_windows_os():
+if OSCheck.is_windows_family():
   java_bin = "java.exe"
 
 SECURITY_PROVIDER_GET_CMD = "{0}" + os.sep + "bin" + os.sep + java_bin + " -cp {1}" +\

+ 0 - 22
ambari-server/src/test/python/TestOSCheck.py

@@ -28,7 +28,6 @@ from unittest import TestCase
 from mock.mock import patch
 
 from ambari_commons import OSCheck
-from ambari_commons.os_check import get_os_distribution
 import os_check_type
 
 utils = __import__('ambari_server.utils').utils
@@ -46,13 +45,11 @@ class TestOSCheck(TestCase):
     # 1 - Any system
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('my_os', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'my_os')
 
     # 2 - Negative case
     mock_linux_distribution.return_value = ('', 'aaaa', 'bbbbb')
-    OSCheck._dist = get_os_distribution()
     try:
       result = OSCheck.get_os_type()
       self.fail("Should throw exception in OSCheck.get_os_type()")
@@ -64,14 +61,12 @@ class TestOSCheck(TestCase):
     # 3 - path exist: '/etc/oracle-release'
     mock_exists.return_value = True
     mock_linux_distribution.return_value = ('some_os', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'oraclelinux')
 
     # 4 - Common system
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('CenToS', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'centos')
 
@@ -79,19 +74,16 @@ class TestOSCheck(TestCase):
     mock_exists.return_value = False
     # Red Hat Enterprise Linux Server release 6.5 (Santiago)
     mock_linux_distribution.return_value = ('Red Hat Enterprise Linux Server', '6.5', 'Santiago')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'redhat')
 
     # Red Hat Enterprise Linux Workstation release 6.4 (Santiago)
     mock_linux_distribution.return_value = ('Red Hat Enterprise Linux Workstation', '6.4', 'Santiago')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'redhat')
 
     # Red Hat Enterprise Linux AS release 4 (Nahant Update 3)
     mock_linux_distribution.return_value = ('Red Hat Enterprise Linux AS', '4', 'Nahant Update 3')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'redhat')
 
@@ -102,21 +94,18 @@ class TestOSCheck(TestCase):
     # 1 - Any system
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('MY_os', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'my_os')
 
     # 2 - Redhat
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('Centos Linux', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'redhat')
 
     # 3 - Ubuntu
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('Ubuntu', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'ubuntu')
 
@@ -124,19 +113,16 @@ class TestOSCheck(TestCase):
     mock_exists.return_value = False
     mock_linux_distribution.return_value = (
     'suse linux enterprise server', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'suse')
 
     mock_exists.return_value = False
     mock_linux_distribution.return_value = ('SLED', '', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'suse')
 
     # 5 - Negative case
     mock_linux_distribution.return_value = ('', '111', '2222')
-    OSCheck._dist = get_os_distribution()
     try:
       result = OSCheck.get_os_family()
       self.fail("Should throw exception in OSCheck.get_os_family()")
@@ -150,13 +136,11 @@ class TestOSCheck(TestCase):
 
     # 1 - Any system
     mock_linux_distribution.return_value = ('', '123.45', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_version()
     self.assertEquals(result, '123.45')
 
     # 2 - Negative case
     mock_linux_distribution.return_value = ('ssss', '', 'ddddd')
-    OSCheck._dist = get_os_distribution()
     try:
       result = OSCheck.get_os_version()
       self.fail("Should throw exception in OSCheck.get_os_version()")
@@ -170,13 +154,11 @@ class TestOSCheck(TestCase):
 
     # 1
     mock_linux_distribution.return_value = ('', '123.45.67', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_major_version()
     self.assertEquals(result, '123')
 
     # 2
     mock_linux_distribution.return_value = ('Suse', '11', '')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_major_version()
     self.assertEquals(result, '11')
 
@@ -185,13 +167,11 @@ class TestOSCheck(TestCase):
 
     # 1 - Any system
     mock_linux_distribution.return_value = ('', '', 'MY_NEW_RELEASE')
-    OSCheck._dist = get_os_distribution()
     result = OSCheck.get_os_release_name()
     self.assertEquals(result, 'my_new_release')
 
     # 2 - Negative case
     mock_linux_distribution.return_value = ('aaaa', 'bbbb', '')
-    OSCheck._dist = get_os_distribution()
     try:
       result = OSCheck.get_os_release_name()
       self.fail("Should throw exception in OSCheck.get_os_release_name()")
@@ -253,7 +233,6 @@ class TestOSCheck(TestCase):
     mock_linux_distribution.return_value = ('aaa', '11', 'bb')
     base_args = ["os_check_type.py", "aaa11"]
     sys.argv = list(base_args)
-    OSCheck._dist = get_os_distribution()
 
     try:
       os_check_type.main()
@@ -265,7 +244,6 @@ class TestOSCheck(TestCase):
     mock_linux_distribution.return_value = ('ddd', '33', 'bb')
     base_args = ["os_check_type.py", "zzz_x77"]
     sys.argv = list(base_args)
-    OSCheck._dist = get_os_distribution()
 
     try:
       os_check_type.main()