Browse Source

AMBARI-8269. Merge branch-windows-dev changes to trunk. (Jayush Luniya via yusaku)

Yusaku Sako 10 years ago
parent
commit
8de3425f3f
100 changed files with 7116 additions and 1389 deletions
  1. 30 4
      ambari-admin/pom.xml
  2. 18 0
      ambari-agent/conf/windows/ambari-agent.cmd
  3. 54 0
      ambari-agent/conf/windows/ambari-agent.ini
  4. 245 0
      ambari-agent/conf/windows/ambari-agent.ps1
  5. 22 0
      ambari-agent/conf/windows/ambari-env.cmd
  6. 195 0
      ambari-agent/conf/windows/createservice.ps1
  7. 227 0
      ambari-agent/conf/windows/service_wrapper.py
  8. 164 9
      ambari-agent/pom.xml
  9. 38 0
      ambari-agent/src/main/package/msi/ambari-agent.wxs
  10. 17 18
      ambari-agent/src/main/python/ambari_agent/ActionQueue.py
  11. 229 0
      ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
  12. 232 0
      ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
  13. 8 2
      ambari-agent/src/main/python/ambari_agent/AmbariAgent.py
  14. 41 199
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  15. 31 13
      ambari-agent/src/main/python/ambari_agent/Controller.py
  16. 8 6
      ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
  17. 222 107
      ambari-agent/src/main/python/ambari_agent/Facter.py
  18. 2 1
      ambari-agent/src/main/python/ambari_agent/FileCache.py
  19. 31 2
      ambari-agent/src/main/python/ambari_agent/Hardware.py
  20. 58 0
      ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
  21. 91 0
      ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
  22. 3 2
      ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
  23. 6 389
      ambari-agent/src/main/python/ambari_agent/HostInfo.py
  24. 411 0
      ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
  25. 231 0
      ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
  26. 1 1
      ambari-agent/src/main/python/ambari_agent/LiveStatus.py
  27. 27 3
      ambari-agent/src/main/python/ambari_agent/NetUtil.py
  28. 7 7
      ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py
  29. 19 18
      ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
  30. 3 3
      ambari-agent/src/main/python/ambari_agent/StatusCheck.py
  31. 4 4
      ambari-agent/src/main/python/ambari_agent/hostname.py
  32. 50 57
      ambari-agent/src/main/python/ambari_agent/main.py
  33. 16 12
      ambari-agent/src/main/python/ambari_agent/security.py
  34. 93 20
      ambari-agent/src/main/python/ambari_agent/shell.py
  35. 82 0
      ambari-agent/src/packages/windows.xml
  36. 17 17
      ambari-agent/src/test/python/ambari_agent/TestAlerts.py
  37. 0 1
      ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py
  38. 3 3
      ambari-agent/src/test/python/ambari_agent/TestController.py
  39. 21 20
      ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
  40. 1 1
      ambari-agent/src/test/python/ambari_agent/TestHostname.py
  41. 13 10
      ambari-agent/src/test/python/ambari_agent/TestMain.py
  42. 8 5
      ambari-agent/src/test/python/ambari_agent/TestNetUtil.py
  43. 8 8
      ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
  44. 1 1
      ambari-agent/src/test/python/resource_management/TestContentSources.py
  45. 1 1
      ambari-agent/src/test/python/resource_management/TestDirectoryResource.py
  46. 2 0
      ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
  47. 1 0
      ambari-agent/src/test/python/resource_management/TestExecuteResource.py
  48. 1 0
      ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py
  49. 2 0
      ambari-agent/src/test/python/resource_management/TestSubstituteVars.py
  50. 4 2
      ambari-agent/src/test/python/unitTests.py
  51. 1 0
      ambari-client/groovy-client/pom.xml
  52. 3 3
      ambari-client/python-client/pom.xml
  53. 79 0
      ambari-common/src/main/python/ambari_commons/ambari_service.py
  54. 35 0
      ambari-common/src/main/python/ambari_commons/exceptions.py
  55. 148 0
      ambari-common/src/main/python/ambari_commons/inet_utils.py
  56. 49 0
      ambari-common/src/main/python/ambari_commons/logging_utils.py
  57. 111 15
      ambari-common/src/main/python/ambari_commons/os_check.py
  58. 81 0
      ambari-common/src/main/python/ambari_commons/os_linux.py
  59. 102 0
      ambari-common/src/main/python/ambari_commons/os_utils.py
  60. 563 0
      ambari-common/src/main/python/ambari_commons/os_windows.py
  61. 11 0
      ambari-common/src/main/python/ambari_commons/resources/os_family.json
  62. 30 0
      ambari-common/src/main/python/ambari_commons/str_utils.py
  63. 22 19
      ambari-common/src/main/python/resource_management/core/logger.py
  64. 6 0
      ambari-common/src/main/python/resource_management/core/providers/__init__.py
  65. 20 0
      ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py
  66. 65 0
      ambari-common/src/main/python/resource_management/core/providers/windows/service.py
  67. 382 0
      ambari-common/src/main/python/resource_management/core/providers/windows/system.py
  68. 9 0
      ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
  69. 6 2
      ambari-common/src/main/python/resource_management/libraries/functions/default.py
  70. 11 4
      ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
  71. 182 0
      ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
  72. 48 0
      ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py
  73. 30 0
      ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
  74. 42 0
      ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py
  75. 40 0
      ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py
  76. 3 0
      ambari-common/src/main/python/resource_management/libraries/providers/__init__.py
  77. 7 6
      ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py
  78. 115 68
      ambari-common/src/main/python/resource_management/libraries/script/script.py
  79. 1 0
      ambari-server/conf/unix/ambari.properties
  80. 19 0
      ambari-server/conf/windows/ambari-env.cmd
  81. 82 0
      ambari-server/conf/windows/ambari.properties
  82. 29 0
      ambari-server/conf/windows/ca.config
  83. 61 0
      ambari-server/conf/windows/install-helper.cmd
  84. 12 0
      ambari-server/conf/windows/krb5JAASLogin.conf
  85. 68 0
      ambari-server/conf/windows/log4j.properties
  86. 186 186
      ambari-server/docs/api/v1/clusters-cluster.md
  87. 188 6
      ambari-server/pom.xml
  88. 183 0
      ambari-server/src/main/assemblies/server-windows.xml
  89. 7 1
      ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java
  90. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
  91. 9 4
      ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java
  92. 52 1
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  93. 12 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
  94. 227 122
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
  95. 3 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
  96. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
  97. 63 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java
  98. 572 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java
  99. 132 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java
  100. 6 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java

+ 30 - 4
ambari-admin/pom.xml

@@ -99,9 +99,9 @@
             </goals>
             <configuration>
               <workingDirectory>${basedir}/src/main/resources/ui/admin-web</workingDirectory>
-              <executable>node/node</executable>
+              <executable>${basedir}/src/main/resources/ui/admin-web/node/${executable.node}</executable>
               <arguments>
-                <argument>node_modules/bower/bin/bower</argument>
+                <argument>${basedir}/src/main/resources/ui/admin-web/node_modules/bower/bin/bower</argument>
                 <argument>install</argument>
                 <argument>--allow-root</argument>
               </arguments>
@@ -115,9 +115,9 @@
             </goals>
             <configuration>
               <workingDirectory>${basedir}/src/main/resources/ui/admin-web</workingDirectory>
-              <executable>node/node</executable>
+              <executable>${basedir}/src/main/resources/ui/admin-web/node/${executable.node}</executable>
               <arguments>
-                <argument>node_modules/gulp/bin/gulp</argument>
+                <argument>${basedir}/src/main/resources/ui/admin-web/node_modules/gulp/bin/gulp</argument>
                 <argument>build</argument>
               </arguments>
             </configuration>
@@ -215,4 +215,30 @@
       </resource>
     </resources>
   </build>
+    <profiles>
+    <profile>
+        <id>windows</id>
+        <activation>
+            <os>
+                <family>win</family>
+            </os>
+        </activation>
+        <properties>
+            <envClassifier>win</envClassifier>
+            <executable.node>node.exe</executable.node>
+        </properties>
+    </profile>
+    <profile>
+        <id>linux</id>
+        <activation>
+            <os>
+                <family>unix</family>
+            </os>
+        </activation>
+        <properties>
+            <envClassifier>linux</envClassifier>
+            <executable.node>node</executable.node>
+        </properties>
+    </profile>
+  </profiles>
 </project>

+ 18 - 0
ambari-agent/conf/windows/ambari-agent.cmd

@@ -0,0 +1,18 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements.  See the NOTICE file distributed with
+REM this work for additional information rega4rding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License.  You may obtain a copy of the License at
+REM
+REM     http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+call ambari-env.cmd
+powershell -ExecutionPolicy unrestricted -File ambari-agent.ps1 %*

+ 54 - 0
ambari-agent/conf/windows/ambari-agent.ini

@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=data
+tmp_dir=\\var\\lib\\ambari-agent\\data\\tmp
+;loglevel=(DEBUG/INFO)
+loglevel=INFO
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir=cache
+tolerate_download_failures=true
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=keys
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[services]
+pidLookupPath=\\var\\run\\ambari-agent
+
+[heartbeat]
+state_interval=6
+dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie,
+  /etc/sqoop,/etc/ganglia,/etc/nagios,
+  /var/run/hadoop,/var/run/zookeeper,/var/run/hbase,/var/run/templeton,/var/run/oozie,
+  /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive,
+  /var/log/nagios
+rpms=nagios,ganglia,
+  hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j
+; 0 - unlimited
+log_lines_count=300

+ 245 - 0
ambari-agent/conf/windows/ambari-agent.ps1

@@ -0,0 +1,245 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# description: ambari-agent service
+# processname: ambari-agent
+
+$VERSION="1.3.0-SNAPSHOT"
+$HASH="testhash"
+
+switch ($($args[0])){
+  "--version" {
+    echo "$VERSION"
+    exit 0
+  }
+  "--hash" {
+    echo "$HASH"
+    exit 0
+  }
+}
+
+$AMBARI_AGENT="ambari-agent"
+$AMBARI_SVC_NAME = "Ambari Agent"
+$current_directory = (Get-Item -Path ".\" -Verbose).FullName
+#environment variables used in python, check if they exists, otherwise set them to $current_directory
+#and pass to child python process
+$Env:PYTHONPATH="$current_directory\sbin;$($Env:PYTHONPATH)"
+$Env:PYTHON = "python.exe"
+
+$AMBARI_LOG_DIR="\var\log\ambari-agent"
+$OUTFILE_STDOUT=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.stdout"
+$OUTFILE_STDERR=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.stderr"
+$LOGFILE=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.log"
+$AMBARI_AGENT_PY_SCRIPT=Join-Path -path $PSScriptRoot -childpath "sbin\service_wrapper.py"
+if($AMBARI_AGENT_PY_SCRIPT.Contains(' '))
+{
+  $AMBARI_AGENT_PY_SCRIPT = """" + $AMBARI_AGENT_PY_SCRIPT + """"
+}
+
+$OK=1
+$NOTOK=0
+
+$retcode=0
+
+function _exit($code)
+{
+  $host.SetShouldExit($code)
+  exit $code
+}
+
+function _detect_python()
+{
+  if(![boolean]$(Get-Command $Env:PYTHON -ErrorAction SilentlyContinue))
+  {
+    echo "ERROR: Can not find python.exe in PATH. Add python executable to PATH and try again."
+    _exit(1)
+  }
+}
+
+function _echo([switch]$off)
+{
+  if($off)
+  {
+    try
+    {
+      stop-transcript|out-null
+    }
+    catch [System.InvalidOperationException]
+    {}
+  }
+  else
+  {
+    try
+    {
+      start-transcript|out-null
+    }
+    catch [System.InvalidOperationException]
+    {}
+  }
+}
+
+Function _pstart_brief($cmd_args)
+{
+  #start python with -u to make stdout and stderr unbuffered
+  $arguments = @("-u",$AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+
+  $psi = New-Object System.Diagnostics.ProcessStartInfo
+
+  $psi.RedirectStandardError = $True
+  $psi.RedirectStandardOutput = $True
+
+  $psi.UseShellExecute = $False
+
+  $psi.FileName = $Env:PYTHON
+  $psi.Arguments = $arguments
+  #$psi.WindowStyle = WindowStyle.Hidden
+
+  $process = [Diagnostics.Process]::Start($psi)
+
+  $process.WaitForExit()
+
+  Write-Output $process.StandardOutput.ReadToEnd()
+}
+
+Function _start($cmd_args)
+{
+  echo "Starting $AMBARI_SVC_NAME..."
+  _echo -off
+
+  _pstart_brief($cmd_args)
+
+  $cnt = 0
+  do
+  {
+    Start-Sleep -Milliseconds 250
+    $svc = Get-Service -Name $AMBARI_SVC_NAME
+    $cnt += 1
+    if ($cnt -eq 120)
+    {
+      echo "$AMBARI_SVC_NAME still starting...".
+      return
+    }
+  }
+  until($svc.status -eq "Running")
+
+  echo "$AMBARI_SVC_NAME is running"
+}
+
+Function _pstart($cmd_args)
+{
+  New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+  $arguments = @($AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+  $p = New-Object System.Diagnostics.Process
+  $p.StartInfo.UseShellExecute = $false
+  $p.StartInfo.FileName = $Env:PYTHON
+  $p.StartInfo.Arguments = $arguments
+  [void]$p.Start();
+
+  echo "Verifying $AMBARI_AGENT process status..."
+  if (!$p){
+    echo "ERROR: $AMBARI_AGENT start failed"
+    $host.SetShouldExit(-1)
+    exit
+  }
+  echo "Agent log at: $LOGFILE"
+
+  $p.WaitForExit()
+}
+
+Function _pstart_ioredir($cmd_args)
+{
+  New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+
+  #start python with -u to make stdout and stderr unbuffered
+  $arguments = @("-u",$AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+  $process = Start-Process -FilePath $Env:PYTHON -ArgumentList $arguments -WindowStyle Hidden -RedirectStandardError $OUTFILE_STDERR -RedirectStandardOutput $OUTFILE_STDOUT -PassThru
+  echo "Verifying $AMBARI_AGENT process status..."
+  if (!$process){
+    echo "ERROR: $AMBARI_AGENT start failed"
+    $host.SetShouldExit(-1)
+    exit
+  }
+  echo "Agent stdout at: $OUTFILE_STDOUT"
+  echo "Agent stderr at: $OUTFILE_STDERR"
+  echo "Agent log at: $LOGFILE"
+
+  $process.WaitForExit()
+}
+
+Function _stop($cmd_args){
+  echo "Stopping $AMBARI_SVC_NAME..."
+
+  _pstart_brief($cmd_args)
+
+  $cnt = 0
+  do
+  {
+    Start-Sleep -Milliseconds 250
+    $svc = Get-Service -Name $AMBARI_SVC_NAME
+    $cnt += 1
+    if ($cnt -eq 40)
+    {
+      echo "$AMBARI_SVC_NAME still stopping...".
+      return
+    }
+  }
+  until($svc.status -eq "Stopped")
+
+  echo "$AMBARI_SVC_NAME is stopped"
+}
+
+Function _status($cmd_args){
+  echo "Getting $AMBARI_SVC_NAME status..."
+
+  _pstart_brief($cmd_args)
+}
+
+# check for python before any action
+_detect_python
+switch ($($args[0])){
+  "start"
+  {
+    _start $args
+  }
+  "debug"
+  {
+    echo "Starting ambari-agent"
+    _pstart_ioredir $args
+    echo "Ambari Agent finished"
+  }
+  "stop" {_stop $args}
+  "restart"
+  {
+    _stop @("stop")
+    _start @("start")
+  }
+  "status" {_status $args}
+  "setup"
+  {
+    echo "Installing ambari-agent"
+    _pstart $args
+    echo "Ambari Agent installation finished"
+  }
+  default
+  {
+    echo "Usage: ambari-agent {start|stop|restart|setup|status} [options]"
+    echo "Use ambari-agent <action> --help to get details on options available."
+    echo "Or, simply invoke ambari-agent.py --help to print the options."
+    $retcode=1
+  }
+}
+
+$host.SetShouldExit($retcode)
+exit

+ 22 - 0
ambari-agent/conf/windows/ambari-env.cmd

@@ -0,0 +1,22 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements.  See the NOTICE file distributed with
+REM this work for additional information rega4rding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License.  You may obtain a copy of the License at
+REM
+REM     http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+set SERVICE_NAME=Ambari Agent
+REM REM set AMBARI_AGENT_CONF_DIR=C:\work\test\agent
+REM set AMBARI_AGENT_LOG_DIR=logs
+set AGENT_SERVICE_WRAPPER=sbin\service_wrapper.py
+REM python exe that will be used for command execution(must have access to pywin32 and agent python code)
+set PYTHON_EXE=C:\Python27\python.exe

+ 195 - 0
ambari-agent/conf/windows/createservice.ps1

@@ -0,0 +1,195 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+param(
+  [String]
+  [Parameter(Mandatory=$true )]
+  $username,
+  [String]
+  [Parameter(Mandatory=$true )]
+  $password,
+  [String]
+  [Parameter(Mandatory=$true )]
+  $servicename,
+  [String]
+  [Parameter(Mandatory=$true )]
+  $hdpResourcesDir,
+  [String]
+  [Parameter(Mandatory=$true )]
+  $servicecmdpath
+  )
+
+function Invoke-Cmd ($command)
+{
+  Write-Output $command
+  $out = cmd.exe /C "$command" 2>&1
+  Write-Output $out
+  return $out
+}
+
+function Invoke-CmdChk ($command)
+{
+  Write-Output $command
+  $out = cmd.exe /C "$command" 2>&1
+  Write-Output $out
+  if (-not ($LastExitCode  -eq 0))
+  {
+    throw "Command `"$out`" failed with exit code $LastExitCode "
+  }
+  return $out
+}
+
+### Stops and deletes the Hadoop service.
+function StopAndDeleteHadoopService(
+  [String]
+  [Parameter( Position=0, Mandatory=$true )]
+  $service
+)
+{
+  Write-Output "Stopping $service"
+  $s = Get-Service $service -ErrorAction SilentlyContinue
+
+  if( $s -ne $null )
+  {
+    Stop-Service $service
+    $cmd = "sc.exe delete $service"
+    Invoke-Cmd $cmd
+  }
+}
+
+# Convenience method for processing command-line credential objects
+# Assumes $credentialsHash is a hash with one of the following being true:
+#  - keys "username" and "password"/"passwordBase64" are set to strings
+#  - key "credentialFilePath" is set to the path of a serialized PSCredential object
+function Get-HadoopUserCredentials($credentialsHash)
+{
+  Write-Output "Using provided credentials for username $($credentialsHash["username"])" | Out-Null
+  $username = $credentialsHash["username"]
+  if($username -notlike "*\*")
+  {
+    $username = "$ENV:COMPUTERNAME\$username"
+  }
+  $securePassword = $credentialsHash["password"] | ConvertTo-SecureString -AsPlainText -Force
+  $creds = New-Object System.Management.Automation.PSCredential $username, $securePassword
+  return $creds
+}
+
+### Creates and configures the service.
+function CreateAndConfigureHadoopService(
+  [String]
+  [Parameter( Position=0, Mandatory=$true )]
+  $service,
+  [String]
+  [Parameter( Position=1, Mandatory=$true )]
+  $hdpResourcesDir,
+  [String]
+  [Parameter( Position=2, Mandatory=$true )]
+  $serviceBinDir,
+  [String]
+  [Parameter( Position=3, Mandatory=$true )]
+  $servicecmdpath,
+  [System.Management.Automation.PSCredential]
+  [Parameter( Position=4, Mandatory=$true )]
+  $serviceCredential
+)
+{
+  if ( -not ( Get-Service "$service" -ErrorAction SilentlyContinue ) )
+  {
+    Write-Output "Creating service `"$service`" as $serviceBinDir\$service.exe"
+    $xcopyServiceHost_cmd = "copy /Y `"$hdpResourcesDir\namenode.exe`" `"$serviceBinDir\$service.exe`""
+    Invoke-CmdChk $xcopyServiceHost_cmd
+
+    #HadoopServiceHost.exe will write to this log but does not create it
+    #Creating the event log needs to be done from an elevated process, so we do it here
+    if( -not ([Diagnostics.EventLog]::SourceExists( "$service" )))
+    {
+      [Diagnostics.EventLog]::CreateEventSource( "$service", "" )
+    }
+    Write-Output "Adding service $service"
+    if ($serviceCredential.Password.get_Length() -ne 0)
+    {
+      $s = New-Service -Name "$service" -BinaryPathName "$serviceBinDir\$service.exe" -Credential $serviceCredential -DisplayName "Apache Hadoop $service"
+      if ( $s -eq $null )
+      {
+        throw "CreateAndConfigureHadoopService: Service `"$service`" creation failed"
+      }
+    }
+    else
+    {
+      # Separately handle case when password is not provided
+      # this path is used for creating services that run under (AD) Managed Service Account
+      # for them password is not provided and in that case service cannot be created using New-Service commandlet
+      $serviceUserName = $serviceCredential.UserName
+      $cred = $serviceCredential.UserName.Split("\")
+
+      # Throw exception if domain is not specified
+      if (($cred.Length -lt 2) -or ($cred[0] -eq "."))
+      {
+        throw "Environment is not AD or domain is not specified"
+      }
+
+      $cmd="$ENV:WINDIR\system32\sc.exe create `"$service`" binPath= `"$serviceBinDir\$service.exe`" obj= $serviceUserName DisplayName= `"Apache Hadoop $service`" "
+      try
+      {
+        Invoke-CmdChk $cmd
+      }
+      catch
+      {
+        throw "CreateAndConfigureHadoopService: Service `"$service`" creation failed"
+      }
+    }
+
+    $cmd="$ENV:WINDIR\system32\sc.exe failure $service reset= 30 actions= restart/5000"
+    Invoke-CmdChk $cmd
+
+    $cmd="$ENV:WINDIR\system32\sc.exe config $service start= demand"
+    Invoke-CmdChk $cmd
+
+
+    Write-Output "Creating service config ${serviceBinDir}\$service.xml"
+    $cmd = "$servicecmdpath --service $service > `"$serviceBinDir\$service.xml`""
+    Invoke-CmdChk $cmd
+  }
+  else
+  {
+    Write-Output "Service `"$service`" already exists, Removing `"$service`""
+    StopAndDeleteHadoopService $service
+    CreateAndConfigureHadoopService $service $hdpResourcesDir $serviceBinDir $servicecmdpath $serviceCredential
+  }
+}
+
+
+try
+{
+  Write-Output "Creating credential object"
+  ###
+  ### Create the Credential object from the given username and password or the provided credentials file
+  ###
+  $serviceCredential = Get-HadoopUserCredentials -credentialsHash @{"username" = $username; "password" = $password}
+  $username = $serviceCredential.UserName
+  Write-Output "Username: $username"
+
+  Write-Output "Creating service $service"
+  ###
+  ### Create Service
+  ###
+  CreateAndConfigureHadoopService $servicename $hdpResourcesDir $hdpResourcesDir $servicecmdpath $serviceCredential
+  Write-Output "Done"
+}
+catch
+{
+  Write-Output "Failure"
+  exit 1
+}

+ 227 - 0
ambari-agent/conf/windows/service_wrapper.py

@@ -0,0 +1,227 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import os
+import optparse
+import sys
+
+import win32serviceutil
+import win32api
+import win32event
+import win32service
+
+from ambari_commons.ambari_service import AmbariService
+from ambari_commons.exceptions import *
+from ambari_commons.logging_utils import *
+from ambari_commons.os_windows import WinServiceController
+from ambari_agent.AmbariConfig import *
+from ambari_agent.HeartbeatHandlers_windows import HeartbeatStopHandler
+
+AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
+
+ENV_PYTHONPATH = "PYTHONPATH"
+
+
+def parse_options():
+  # parse env cmd
+  with open(os.path.join(os.getcwd(), "ambari-env.cmd"), "r") as env_cmd:
+    content = env_cmd.readlines()
+  for line in content:
+    if line.startswith("set"):
+      name, value = line[4:].split("=")
+      os.environ[name] = value.rstrip()
+  # checking env variables, and fallback to working dir if no env var was founded
+  if not os.environ.has_key("AMBARI_AGENT_CONF_DIR"):
+    os.environ["AMBARI_AGENT_CONF_DIR"] = os.getcwd()
+  if not os.environ.has_key("AMBARI_AGENT_LOG_DIR"):
+    os.environ["AMBARI_AGENT_LOG_DIR"] = os.path.join("\\", "var", "log", "ambari-agent")
+  if not os.path.exists(os.environ["AMBARI_AGENT_LOG_DIR"]):
+    os.makedirs(os.environ["AMBARI_AGENT_LOG_DIR"])
+
+
+class AmbariAgentService(AmbariService):
+  AmbariService._svc_name_ = "Ambari Agent"
+  AmbariService._svc_display_name_ = "Ambari Agent"
+  AmbariService._svc_description_ = "Ambari Agent"
+
+  AmbariService._AdjustServiceVersion()
+
+  heartbeat_stop_handler = None
+
+  def SvcDoRun(self):
+    parse_options()
+    self.redirect_output_streams()
+
+    # Soft dependency on the Windows Time service
+    ensure_time_service_is_started()
+
+    self.heartbeat_stop_handler = HeartbeatStopHandler(self._heventSvcStop)
+
+    self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+
+    from ambari_agent import main
+
+    main.main(self.heartbeat_stop_handler)
+
+  def _InitOptionsParser(self):
+    return init_options_parser()
+
+  def redirect_output_streams(self):
+    self._RedirectOutputStreamsToFile(AmbariConfig.getOutFile())
+    pass
+
+
+def ensure_time_service_is_started():
+  ret = WinServiceController.EnsureServiceIsStarted("W32Time")
+  if 0 != ret:
+    raise FatalException(-1, "Error starting Windows Time service: " + string(ret))
+  pass
+
+
+def ctrlHandler(ctrlType):
+  AmbariAgentService.DefCtrlCHandler()
+  return True
+
+
+def svcsetup():
+  AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
+  AmbariAgentService.Install()
+  pass
+
+
+#
+# Starts the Ambari Agent as a service.
+# Start the Agent in normal mode, as a Windows service. If the Ambari Agent is
+# not registered as a service, the function fails. By default, only one instance of the service can
+#     possibly run.
+#
+def svcstart(options):
+  if 0 != AmbariAgentService.Start(15):
+    options.exit_message = None
+  pass
+
+
+#
+# Stops the Ambari Agent.
+#
+def svcstop(options):
+  if 0 != AmbariAgentService.Stop():
+    options.exit_message = None
+
+
+#
+# The Ambari Agent status.
+#
+def svcstatus(options):
+  options.exit_message = None
+
+  statusStr = AmbariAgentService.QueryStatus()
+  print "Ambari Agent is " + statusStr
+
+
+def svcdebug(options):
+  sys.frozen = 'windows_exe'  # Fake py2exe so we can debug
+
+  AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
+  win32serviceutil.HandleCommandLine(AmbariAgentService, options)
+
+
+def init_options_parser():
+  parser = optparse.OptionParser(usage="usage: %prog action [options]", )
+  parser.add_option('-r', '--hostname', dest="host_name", default="localhost",
+    help="Use specified Ambari server host for registration.")
+  parser.add_option('-j', '--java-home', dest="java_home", default=None,
+    help="Use specified java_home.  Must be valid on all hosts")
+  parser.add_option("-v", "--verbose",
+    action="store_true", dest="verbose", default=False,
+    help="Print verbose status messages")
+  parser.add_option("-s", "--silent",
+    action="store_true", dest="silent", default=False,
+    help="Silently accepts default prompt values")
+  parser.add_option('--jdbc-driver', default=None,
+    help="Specifies the path to the JDBC driver JAR file for the " \
+         "database type specified with the --jdbc-db option. Used only with --jdbc-db option.",
+    dest="jdbc_driver")
+  return parser
+
+
+#
+# Main.
+#
+def agent_main():
+  parser = init_options_parser()
+  (options, args) = parser.parse_args()
+
+  options.warnings = []
+
+  if len(args) == 0:
+    print parser.print_help()
+    parser.error("No action entered")
+
+  action = args[0]
+  possible_args_numbers = [1]
+
+  matches = 0
+  for args_number_required in possible_args_numbers:
+    matches += int(len(args) == args_number_required)
+
+  if matches == 0:
+    print parser.print_help()
+    possible_args = ' or '.join(str(x) for x in possible_args_numbers)
+    parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + possible_args)
+
+  options.exit_message = "Ambari Agent '%s' completed successfully." % action
+  try:
+    if action == SETUP_ACTION:
+      #TODO Insert setup(options) here upon need
+      svcsetup()
+    elif action == START_ACTION:
+      svcstart(options)
+    elif action == DEBUG_ACTION:
+      svcdebug(options)
+    elif action == STOP_ACTION:
+      svcstop(options)
+    elif action == STATUS_ACTION:
+      svcstatus(options)
+    else:
+      parser.error("Invalid action")
+
+    if options.warnings:
+      for warning in options.warnings:
+        print_warning_msg(warning)
+        pass
+      options.exit_message = "Ambari Agent '%s' completed with warnings." % action
+      pass
+  except FatalException as e:
+    if e.reason is not None:
+      print_error_msg("Exiting with exit code {0}. \nREASON: {1}".format(e.code, e.reason))
+    sys.exit(e.code)
+  except NonFatalException as e:
+    options.exit_message = "Ambari Agent '%s' completed with warnings." % action
+    if e.reason is not None:
+      print_warning_msg(e.reason)
+
+  if options.exit_message is not None:
+    print options.exit_message
+
+
+if __name__ == '__main__':
+  try:
+    agent_main()
+  except (KeyboardInterrupt, EOFError):
+    print("\nAborting ... Keyboard Interrupt.")
+    sys.exit(1)

+ 164 - 9
ambari-agent/pom.xml

@@ -102,6 +102,13 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <skip>${skipSurefireTests}</skip>
+        </configuration>
+      </plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>exec-maven-plugin</artifactId>
@@ -109,13 +116,13 @@
         <executions>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>${executable.python}</executable>
               <workingDirectory>src/test/python</workingDirectory>
               <arguments>
                 <argument>unitTests.py</argument>
               </arguments>
               <environmentVariables>
-                <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/main/python/resource_management:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files:$PYTHONPATH</PYTHONPATH>
+                <PYTHONPATH>${path.python.1}${pathsep}$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
               <skip>${skipTests}</skip>
             </configuration>
@@ -128,15 +135,15 @@
           <execution>
             <!-- TODO: Looks like section is unused, maybe remove? -->
             <configuration>
-              <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
-              <workingDirectory>target/ambari-agent-${project.version}</workingDirectory>
+              <executable>${executable.python}</executable>
+              <workingDirectory>target${dirsep}ambari-agent-${project.version}</workingDirectory>
               <arguments>
-                <argument>${project.basedir}/src/main/python/setup.py</argument>
+                <argument>${project.basedir}${dirsep}src${dirsep}main${dirsep}python${dirsep}setup.py</argument>
                 <argument>clean</argument>
                 <argument>bdist_dumb</argument>
               </arguments>
               <environmentVariables>
-                <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
+                <PYTHONPATH>target${dirsep}ambari-agent-${project.version}${pathsep}$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
             </configuration>
             <id>python-package</id>
@@ -147,14 +154,14 @@
           </execution>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>${executable.python}</executable>
               <workingDirectory>${basedir}</workingDirectory>
               <arguments>
                 <argument>${resource.keeper.script}</argument>
                 <argument>${target.cache.dir}</argument>
               </arguments>
               <environmentVariables>
-                <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
+                <PYTHONPATH>target${dirsep}ambari-agent-${project.version}${pathsep}$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
             </configuration>
             <id>generate-hash-files</id>
@@ -213,7 +220,7 @@
               <groupname>root</groupname>
               <sources>
                 <source>
-                  <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
+                  <location>${project.build.directory}${dirsep}${project.artifactId}-${project.version}${dirsep}ambari_agent</location>
                 </source>
               </sources>
             </mapping>
@@ -637,6 +644,7 @@
             <exclude>**/*.erb</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.pydevproject</exclude>
+            <exclude>**/*.wxs</exclude>
           </excludes>
         </configuration>
         <executions>
@@ -673,6 +681,153 @@
     </extensions>
   </build>
   <profiles>
+    <profile>
+     <id>windows</id>
+     <activation>
+      <os>
+        <family>win</family>
+      </os>
+     </activation>
+     <properties>
+      <envClassifier>win</envClassifier>
+      <dirsep>\</dirsep>
+      <pathsep>;</pathsep>
+      <stack.distribution>HDPWIN</stack.distribution>
+      <executable.python>python</executable.python>
+      <executable.shell>cmd</executable.shell>
+      <fileextension.shell>cmd</fileextension.shell>
+      <fileextension.dot.shell-default>.cmd</fileextension.dot.shell-default>
+      <path.python.1>${project.basedir}\..\ambari-common\src\main\python;${project.basedir}\..\ambari-agent\src\main\python;${project.basedir}\..\ambari-common\src\main\python\ambari_jinja2;${project.basedir}\..\ambari-common\src\main\python\ambari_commons;${project.basedir}\..\ambari-common\src\test\python;${project.basedir}\src\main\python;${project.basedir}\src\main\python\ambari_agent;${project.basedir}\src\main\python\resource_management;${project.basedir}\src\test\python;${project.basedir}\src\test\python\ambari_agent;${project.basedir}\src\test\python\resource_management;${project.basedir}\..\ambari-server\src\test\python;${project.basedir}\..\ambari-server\src\main\resources\stacks\HDP\2.0.6\services\HDFS\package\files;${project.basedir}\..\ambari-server\src\main\resources\stacks\HDP\1.3.2\services\HDFS\package\files</path.python.1>
+     </properties>
+    </profile>
+    <profile>
+     <id>linux</id>
+     <activation>
+      <os>
+        <family>unix</family>
+      </os>
+     </activation>
+     <properties>
+      <envClassifier>linux</envClassifier>
+      <dirsep>/</dirsep>
+      <pathsep>:</pathsep>
+      <stack.distribution>HDP</stack.distribution>
+      <executable.python>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+      <executable.shell>sh</executable.shell>
+      <fileextension.shell>sh</fileextension.shell>
+      <fileextension.dot.shell-default></fileextension.dot.shell-default>
+      <path.python.1>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/../ambari-server/src/test/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files</path.python.1>
+     </properties>
+    </profile>
+    <profile>
+      <id>windows-distro</id>
+      <activation>
+        <os>
+          <family>Windows</family>
+        </os>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <configuration>
+              <tarLongFileMode>gnu</tarLongFileMode>
+              <descriptors>
+                <descriptor>src/packages/tarball/all.xml</descriptor>
+                <descriptor>src/packages/windows.xml</descriptor>
+              </descriptors>
+            </configuration>
+            <executions>
+              <execution>
+                <id>build-windows-zip</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+          <!-- msi creation -->
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>1.2</version>
+            <executions>
+              <execution>
+                <id>run-heat</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>exec</goal>
+                </goals>
+                <configuration>
+                  <executable>heat.exe</executable>
+                  <arguments>
+                    <argument>dir</argument>
+                    <argument>"."</argument>
+                    <argument>-dr</argument>
+                    <argument>"AMBARI_AGENT_1.3.0_SNAPSHOT"</argument>
+                    <argument>-platform</argument>
+                    <argument>Win64</argument>
+                    <argument>-cg</argument>
+                    <argument>"AmbariAgentGroup"</argument>
+                    <argument>-gg</argument>
+                    <argument>-ke</argument>
+                    <argument>-srd</argument>
+                    <argument>-o</argument>
+                    <argument>".\..\ambari-agent-files.wxs"</argument>
+                  </arguments>
+                  <workingDirectory>${basedir}/target/${final.name}-windows-dist</workingDirectory>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.npanday.plugins</groupId>
+            <artifactId>wix-maven-plugin</artifactId>
+            <version>1.4.0-incubating</version>
+            <extensions>true</extensions>
+            <configuration>
+              <sourceFiles>
+                <sourceFile>src/main/package/msi/ambari-agent.wxs</sourceFile>
+                <sourceFile>target/ambari-agent-files.wxs</sourceFile>
+              </sourceFiles>
+              <outputDirectory>target</outputDirectory>
+              <objectFiles>
+                <objectFile>target/ambari-agent.wixobj</objectFile>
+                <objectFile>target/ambari-agent-files.wixobj</objectFile>
+              </objectFiles>
+              <outputFile>target/ambari-agent-${project.version}.msi</outputFile>
+              <extensions>
+                <extension>WixUIExtension</extension>
+              </extensions>
+            </configuration>
+            <executions>
+              <execution>
+                <id>wix-candle</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>candle</goal>
+                </goals>
+                <configuration>
+                  <arguments>-arch x64</arguments>
+                </configuration>
+              </execution>
+              <execution>
+                <id>wix-light</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>light</goal>
+                </goals>
+                <configuration>
+                  <arguments>-b ${basedir}/target/${final.name}-windows-dist</arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+            <!-- end msi creation -->
+        </plugins>
+      </build>
+    </profile>
     <profile>
       <id>suse11</id>
       <properties>

+ 38 - 0
ambari-agent/src/main/package/msi/ambari-agent.wxs

@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
+  <Product Id="BA555A14-081D-4521-9B35-BC37C50CF5A9" Name="Ambari Agent 1.3.0-SNAPSHOT" Language="1033"
+           Version="1.3.0.0" Manufacturer="Apache Software Foundation"
+           UpgradeCode="6A5C01ED-C9B3-45C0-8A69-4512AC9F65F7">
+    <Package Description="Ambari Agent for Windows" Comments="Ambari Agent for Windows" InstallerVersion="200"
+             Compressed="yes" Platform="x64"/>
+    <Media Id="1" Cabinet="simple.cab" EmbedCab="yes"/>
+    <Directory Id="TARGETDIR" Name="SourceDir">
+      <Directory Id="ProgramFiles64Folder">
+        <Directory Id="AMBARI_AGENT_1.3.0_SNAPSHOT" Name="ambari-agent-1.3.0-SNAPSHOT">
+        </Directory>
+      </Directory>
+    </Directory>
+    <Feature Id="DefaultFeature" Title="Main Feature" Level="1">
+      <ComponentGroupRef Id="AmbariAgentGroup"/>
+    </Feature>
+    <Property Id="WIXUI_INSTALLDIR" Value="AMBARI_AGENT_1.3.0_SNAPSHOT"/>
+    <UI/>
+    <UIRef Id="WixUI_InstallDir"/>
+  </Product>
+</Wix>

+ 17 - 18
ambari-agent/src/main/python/ambari_agent/ActionQueue.py

@@ -153,11 +153,11 @@ class ActionQueue(threading.Thread):
     while not self.backgroundCommandQueue.empty():
       try:
         command = self.backgroundCommandQueue.get(False)
-        if(command.has_key('__handle') and command['__handle'].status == None): 
+        if(command.has_key('__handle') and command['__handle'].status == None):
           self.process_command(command)
       except (Queue.Empty):
         pass
-  
+
   def processStatusCommandQueueSafeEmpty(self):
     while not self.statusCommandQueue.empty():
       try:
@@ -215,17 +215,17 @@ class ActionQueue(threading.Thread):
       'status': self.IN_PROGRESS_STATUS
     })
     self.commandStatuses.put_command_status(command, in_progress_status)
-    
+
     # running command
     commandresult = self.customServiceOrchestrator.runCommand(command,
       in_progress_status['tmpout'], in_progress_status['tmperr'])
-   
-    
+
+
     # dumping results
     if isCommandBackground:
       return
     else:
-      status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS  
+      status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS
     roleResult = self.commandStatuses.generate_report_template(command)
     roleResult.update({
       'stdout': commandresult['stdout'],
@@ -250,18 +250,18 @@ class ActionQueue(threading.Thread):
     # let ambari know that configuration tags were applied
     if status == self.COMPLETED_STATUS:
       configHandler = ActualConfigHandler(self.config, self.configTags)
-      #update 
+      #update
       if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0  :
-        
+
         forceRefreshConfigTags = command['forceRefreshConfigTags']
         logger.info("Got refresh additional component tags command")
-        
+
         for configTag in forceRefreshConfigTags :
           configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag])
-        
+
         roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side
         command['configurationTags'] = configHandler.read_actual_component(command['role'])
-        
+
       if command.has_key('configurationTags'):
         configHandler.write_actual(command['configurationTags'])
         roleResult['configurationTags'] = command['configurationTags']
@@ -288,17 +288,17 @@ class ActionQueue(threading.Thread):
     logger.debug('Start callback: %s' % process_condenced_result)
     logger.debug('The handle is: %s' % handle)
     status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
-    
+
     aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
     if aborted_postfix:
       status = self.FAILED_STATUS
       logger.debug('Set status to: %s , reason = %s' % (status, aborted_postfix))
     else:
       aborted_postfix = ''
-      
-    
+
+
     roleResult = self.commandStatuses.generate_report_template(handle.command)
-    
+
     roleResult.update({
       'stdout': process_condenced_result['stdout'] + aborted_postfix,
       'stderr': process_condenced_result['stderr'] + aborted_postfix,
@@ -306,7 +306,7 @@ class ActionQueue(threading.Thread):
       'structuredOut': str(json.dumps(process_condenced_result['structuredOut'])) if 'structuredOut' in process_condenced_result else '',
       'status': status,
     })
-    
+
     self.commandStatuses.put_command_status(handle.command, roleResult)
 
   def execute_status_command(self, command):
@@ -371,11 +371,10 @@ class ActionQueue(threading.Thread):
     """
     Actions that are executed every time when command status changes
     """
-    self.controller.heartbeat_wait_event.set()
+    self.controller.trigger_heartbeat()
 
   # Removes all commands from the queue
   def reset(self):
     queue = self.commandQueue
     with queue.mutex:
       queue.queue.clear()
-

+ 229 - 0
ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py

@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=/tmp/ambari-agent
+tmp_dir=/tmp/ambari-agent/tmp
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir=/var/lib/ambari-agent/cache
+
+[services]
+
+[python]
+custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=/tmp/ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
+log_lines_count=300
+
+"""
+
+imports = [
+  "hdp/manifests/*.pp",
+  "hdp-hadoop/manifests/*.pp",
+  "hdp-hbase/manifests/*.pp",
+  "hdp-zookeeper/manifests/*.pp",
+  "hdp-oozie/manifests/*.pp",
+  "hdp-pig/manifests/*.pp",
+  "hdp-sqoop/manifests/*.pp",
+  "hdp-templeton/manifests/*.pp",
+  "hdp-hive/manifests/*.pp",
+  "hdp-hcat/manifests/*.pp",
+  "hdp-mysql/manifests/*.pp",
+  "hdp-monitor-webserver/manifests/*.pp",
+  "hdp-repos/manifests/*.pp"
+]
+
+rolesToClass = {
+  'GLUSTERFS': 'hdp-hadoop::glusterfs',
+  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
+  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
+  'NAMENODE': 'hdp-hadoop::namenode',
+  'DATANODE': 'hdp-hadoop::datanode',
+  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
+  'JOBTRACKER': 'hdp-hadoop::jobtracker',
+  'TASKTRACKER': 'hdp-hadoop::tasktracker',
+  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
+  'NODEMANAGER': 'hdp-yarn::nodemanager',
+  'HISTORYSERVER': 'hdp-yarn::historyserver',
+  'YARN_CLIENT': 'hdp-yarn::yarn_client',
+  'HDFS_CLIENT': 'hdp-hadoop::client',
+  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
+  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
+  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
+  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
+  'HBASE_MASTER': 'hdp-hbase::master',
+  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
+  'HBASE_CLIENT': 'hdp-hbase::client',
+  'PIG': 'hdp-pig',
+  'SQOOP': 'hdp-sqoop',
+  'OOZIE_SERVER': 'hdp-oozie::server',
+  'OOZIE_CLIENT': 'hdp-oozie::client',
+  'HIVE_CLIENT': 'hdp-hive::client',
+  'HCAT': 'hdp-hcat',
+  'HIVE_SERVER': 'hdp-hive::server',
+  'HIVE_METASTORE': 'hdp-hive::metastore',
+  'MYSQL_SERVER': 'hdp-mysql::server',
+  'WEBHCAT_SERVER': 'hdp-templeton::server',
+  'DASHBOARD': 'hdp-dashboard',
+  'GANGLIA_SERVER': 'hdp-ganglia::server',
+  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
+  'HTTPD': 'hdp-monitor-webserver',
+  'HUE_SERVER': 'hdp-hue::server',
+  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
+  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
+  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
+  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
+  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
+  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
+  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
+  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
+  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
+  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
+  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
+  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
+  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
+  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
+  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
+  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
+  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
+  'TEZ_CLIENT': 'hdp-tez::tez_client',
+  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
+  'FLUME_SERVER': 'hdp-flume',
+  'JOURNALNODE': 'hdp-hadoop::journalnode',
+  'ZKFC': 'hdp-hadoop::zkfc'
+}
+
+serviceStates = {
+  'START': 'running',
+  'INSTALL': 'installed_and_configured',
+  'STOP': 'stopped'
+}
+
+servicesToPidNames = {
+  'GLUSTERFS' : 'glusterd.pid$',
+  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+  'DATANODE': 'hadoop-{USER}-datanode.pid$',
+  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+  'OOZIE_SERVER': 'oozie.pid',
+  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+  'FLUME_SERVER': 'flume-node.pid',
+  'TEMPLETON_SERVER': 'templeton.pid',
+  'GANGLIA_SERVER': 'gmetad.pid',
+  'GANGLIA_MONITOR': 'gmond.pid',
+  'HBASE_MASTER': 'hbase-{USER}-master.pid',
+  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
+  'KERBEROS_SERVER': 'kadmind.pid',
+  'HIVE_SERVER': 'hive-server.pid',
+  'HIVE_METASTORE': 'hive.pid',
+  'MYSQL_SERVER': 'mysqld.pid',
+  'HUE_SERVER': '/var/run/hue/supervisor.pid',
+  'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+  'NAMENODE': 'hdfs_user',
+  'SECONDARY_NAMENODE': 'hdfs_user',
+  'DATANODE': 'hdfs_user',
+  'JOURNALNODE': 'hdfs_user',
+  'ZKFC': 'hdfs_user',
+  'JOBTRACKER': 'mapred_user',
+  'TASKTRACKER': 'mapred_user',
+  'RESOURCEMANAGER': 'yarn_user',
+  'NODEMANAGER': 'yarn_user',
+  'HISTORYSERVER': 'mapred_user',
+  'HBASE_MASTER': 'hbase_user',
+  'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+  {'var' : 'glusterfs_pid_dir_prefix',
+    'defaultValue' : '/var/run'},
+  {'var' : 'hadoop_pid_dir_prefix',
+    'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'hadoop_pid_dir_prefix',
+    'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'ganglia_runtime_dir',
+    'defaultValue' : '/var/run/ganglia/hdp'},
+  {'var' : 'hbase_pid_dir',
+    'defaultValue' : '/var/run/hbase'},
+  {'var' : 'zk_pid_dir',
+    'defaultValue' : '/var/run/zookeeper'},
+  {'var' : 'oozie_pid_dir',
+    'defaultValue' : '/var/run/oozie'},
+  {'var' : 'hcat_pid_dir',
+    'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'hive_pid_dir',
+    'defaultValue' : '/var/run/hive'},
+  {'var' : 'mysqld_pid_dir',
+    'defaultValue' : '/var/run/mysqld'},
+  {'var' : 'hcat_pid_dir',
+    'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'yarn_pid_dir_prefix',
+    'defaultValue' : '/var/run/hadoop-yarn'},
+  {'var' : 'mapred_pid_dir_prefix',
+    'defaultValue' : '/var/run/hadoop-mapreduce'},
+]
+
+if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+else:
+  configFile = "/etc/ambari-agent/conf/ambari-agent.ini"
+
+if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+else:
+  logfile = "/var/log/ambari-agent/ambari-agent.log"
+
+if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+else:
+  outfile = "/var/log/ambari-agent/ambari-agent.out"

+ 232 - 0
ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py

@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=\\tmp\\ambari-agent
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+ping_port=8670
+cache_dir=\\var\\lib\\ambari-agent\\cache
+
+[services]
+
+[python]
+custom_actions_dir = \\var\\lib\\ambari-agent\\resources\\custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=\\tmp\\ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs=\\etc\\hadoop,\\etc\\hadoop\\conf,\\var\\run\\hadoop,\\var\\log\\hadoop
+rpms=glusterfs,openssl,wget,net-snmp,ntpd,ganglia,nagios,glusterfs
+log_lines_count=300
+
+"""
+
+imports = [
+  "hdp\\manifests\\*.pp",
+  "hdp-hadoop\\manifests\\*.pp",
+  "hdp-hbase\\manifests\\*.pp",
+  "hdp-zookeeper\\manifests\\*.pp",
+  "hdp-oozie\\manifests\\*.pp",
+  "hdp-pig\\manifests\\*.pp",
+  "hdp-sqoop\\manifests\\*.pp",
+  "hdp-templeton\\manifests\\*.pp",
+  "hdp-hive\\manifests\\*.pp",
+  "hdp-hcat\\manifests\\*.pp",
+  "hdp-mysql\\manifests\\*.pp",
+  "hdp-monitor-webserver\\manifests\\*.pp",
+  "hdp-repos\\manifests\\*.pp"
+]
+
+rolesToClass = {
+  'GLUSTERFS': 'hdp-hadoop::glusterfs',
+  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
+  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
+  'NAMENODE': 'hdp-hadoop::namenode',
+  'DATANODE': 'hdp-hadoop::datanode',
+  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
+  'JOBTRACKER': 'hdp-hadoop::jobtracker',
+  'TASKTRACKER': 'hdp-hadoop::tasktracker',
+  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
+  'NODEMANAGER': 'hdp-yarn::nodemanager',
+  'HISTORYSERVER': 'hdp-yarn::historyserver',
+  'YARN_CLIENT': 'hdp-yarn::yarn_client',
+  'HDFS_CLIENT': 'hdp-hadoop::client',
+  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
+  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
+  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
+  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
+  'HBASE_MASTER': 'hdp-hbase::master',
+  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
+  'HBASE_CLIENT': 'hdp-hbase::client',
+  'PIG': 'hdp-pig',
+  'SQOOP': 'hdp-sqoop',
+  'OOZIE_SERVER': 'hdp-oozie::server',
+  'OOZIE_CLIENT': 'hdp-oozie::client',
+  'HIVE_CLIENT': 'hdp-hive::client',
+  'HCAT': 'hdp-hcat',
+  'HIVE_SERVER': 'hdp-hive::server',
+  'HIVE_METASTORE': 'hdp-hive::metastore',
+  'MYSQL_SERVER': 'hdp-mysql::server',
+  'WEBHCAT_SERVER': 'hdp-templeton::server',
+  'DASHBOARD': 'hdp-dashboard',
+  'NAGIOS_SERVER': 'hdp-nagios::server',
+  'GANGLIA_SERVER': 'hdp-ganglia::server',
+  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
+  'HTTPD': 'hdp-monitor-webserver',
+  'HUE_SERVER': 'hdp-hue::server',
+  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
+  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
+  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
+  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
+  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
+  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
+  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
+  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
+  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
+  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
+  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
+  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
+  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
+  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
+  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
+  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
+  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
+  'TEZ_CLIENT': 'hdp-tez::tez_client',
+  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
+  'FLUME_SERVER': 'hdp-flume',
+  'JOURNALNODE': 'hdp-hadoop::journalnode',
+  'ZKFC': 'hdp-hadoop::zkfc'
+}
+
+serviceStates = {
+  'START': 'running',
+  'INSTALL': 'installed_and_configured',
+  'STOP': 'stopped'
+}
+
+servicesToPidNames = {
+  'GLUSTERFS' : 'glusterd.pid$',
+  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+  'DATANODE': 'hadoop-{USER}-datanode.pid$',
+  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+  'OOZIE_SERVER': 'oozie.pid',
+  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+  'FLUME_SERVER': 'flume-node.pid',
+  'TEMPLETON_SERVER': 'templeton.pid',
+  'NAGIOS_SERVER': 'nagios.pid',
+  'GANGLIA_SERVER': 'gmetad.pid',
+  'GANGLIA_MONITOR': 'gmond.pid',
+  'HBASE_MASTER': 'hbase-{USER}-master.pid',
+  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
+  'KERBEROS_SERVER': 'kadmind.pid',
+  'HIVE_SERVER': 'hive-server.pid',
+  'HIVE_METASTORE': 'hive.pid',
+  'MYSQL_SERVER': 'mysqld.pid',
+  'HUE_SERVER': '\\var\\run\\hue\\supervisor.pid',
+  'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+  'NAMENODE': 'hdfs_user',
+  'SECONDARY_NAMENODE': 'hdfs_user',
+  'DATANODE': 'hdfs_user',
+  'JOURNALNODE': 'hdfs_user',
+  'ZKFC': 'hdfs_user',
+  'JOBTRACKER': 'mapred_user',
+  'TASKTRACKER': 'mapred_user',
+  'RESOURCEMANAGER': 'yarn_user',
+  'NODEMANAGER': 'yarn_user',
+  'HISTORYSERVER': 'mapred_user',
+  'HBASE_MASTER': 'hbase_user',
+  'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+  {'var' : 'glusterfs_pid_dir_prefix',
+   'defaultValue' : '\\var\\run'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '\\var\\run\\hadoop'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '\\var\\run\\hadoop'},
+  {'var' : 'ganglia_runtime_dir',
+   'defaultValue' : '\\var\\run\\ganglia\\hdp'},
+  {'var' : 'hbase_pid_dir',
+   'defaultValue' : '\\var\\run\\hbase'},
+  {'var' : '',
+   'defaultValue' : '\\var\\run\\nagios'},
+  {'var' : 'zk_pid_dir',
+   'defaultValue' : '\\var\\run\\zookeeper'},
+  {'var' : 'oozie_pid_dir',
+   'defaultValue' : '\\var\\run\\oozie'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '\\var\\run\\webhcat'},
+  {'var' : 'hive_pid_dir',
+   'defaultValue' : '\\var\\run\\hive'},
+  {'var' : 'mysqld_pid_dir',
+   'defaultValue' : '\\var\\run\\mysqld'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '\\var\\run\\webhcat'},
+  {'var' : 'yarn_pid_dir_prefix',
+   'defaultValue' : '\\var\\run\\hadoop-yarn'},
+  {'var' : 'mapred_pid_dir_prefix',
+   'defaultValue' : '\\var\\run\\hadoop-mapreduce'},
+]
+
+if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+else:
+  configFile = "ambari-agent.ini"
+
+if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+else:
+  logfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.log"
+
+if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+else:
+  outfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.out"

+ 8 - 2
ambari-agent/src/main/python/ambari_agent/AmbariAgent.py

@@ -23,8 +23,14 @@ import sys
 import subprocess
 from Controller import AGENT_AUTO_RESTART_EXIT_CODE
 
-AGENT_SCRIPT = "/usr/lib/python2.6/site-packages/ambari_agent/main.py"
-AGENT_PID_FILE = "/var/run/ambari-agent/ambari-agent.pid"
+if os.environ.has_key("PYTHON_BIN"):
+  AGENT_SCRIPT = os.path.join(os.environ["PYTHON_BIN"],"site-packages/ambari_agent/main.py")
+else:
+  AGENT_SCRIPT = "/usr/lib/python2.6/site-packages/ambari_agent/main.py"
+if os.environ.has_key("AMBARI_PID_DIR"):
+  AGENT_SCRIPT = os.path.join(os.environ["AMBARI_PID_DIR"],"ambari-agent.pid")
+else:
+  AGENT_PID_FILE = "/var/run/ambari-agent/ambari-agent.pid"
 # AGENT_AUTO_RESTART_EXIT_CODE = 77 is exit code which we return when restart_agent() is called
 status = AGENT_AUTO_RESTART_EXIT_CODE
 

+ 41 - 199
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -18,204 +18,31 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+import platform
+
 import ConfigParser
 import StringIO
 import json
 from NetUtil import NetUtil
 
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/tmp/ambari-agent
-tmp_dir=/tmp/ambari-agent/tmp
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-data_cleanup_max_size_MB = 100
-ping_port=8670
-cache_dir=/var/lib/ambari-agent/cache
-
-[services]
-
-[python]
-custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/tmp/ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-log_lines_count=300
-
-"""
-
-imports = [
-  "hdp/manifests/*.pp",
-  "hdp-hadoop/manifests/*.pp",
-  "hdp-hbase/manifests/*.pp",
-  "hdp-zookeeper/manifests/*.pp",
-  "hdp-oozie/manifests/*.pp",
-  "hdp-pig/manifests/*.pp",
-  "hdp-sqoop/manifests/*.pp",
-  "hdp-templeton/manifests/*.pp",
-  "hdp-hive/manifests/*.pp",
-  "hdp-hcat/manifests/*.pp",
-  "hdp-mysql/manifests/*.pp",
-  "hdp-monitor-webserver/manifests/*.pp",
-  "hdp-repos/manifests/*.pp"
-]
-
-rolesToClass = {
-  'GLUSTERFS': 'hdp-hadoop::glusterfs',
-  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
-  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HUE_SERVER': 'hdp-hue::server',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client',
-  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
-  'FLUME_SERVER': 'hdp-flume',
-  'JOURNALNODE': 'hdp-hadoop::journalnode',
-  'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '/var/run/hue/supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathesVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-   'defaultValue' : '/var/run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'ganglia_runtime_dir',
-   'defaultValue' : '/var/run/ganglia/hdp'},
-  {'var' : 'hbase_pid_dir',
-   'defaultValue' : '/var/run/hbase'},
-  {'var' : 'zk_pid_dir',
-   'defaultValue' : '/var/run/zookeeper'},
-  {'var' : 'oozie_pid_dir',
-   'defaultValue' : '/var/run/oozie'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'hive_pid_dir',
-   'defaultValue' : '/var/run/hive'},
-  {'var' : 'mysqld_pid_dir',
-   'defaultValue' : '/var/run/mysqld'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop-mapreduce'},
-]
+SETUP_ACTION = "setup"
+START_ACTION = "start"
+STOP_ACTION = "stop"
+RESET_ACTION = "reset"
+STATUS_ACTION = "status"
+DEBUG_ACTION = "debug"
+
+IS_WINDOWS = platform.system() == "Windows"
+
+if not IS_WINDOWS:
+  from AgentConfig_linux import *
+else:
+  from AgentConfig_windows import *
+
+config = ConfigParser.RawConfigParser()
 
+s = StringIO.StringIO(content)
+config.readfp(s)
 
 class AmbariConfig:
   TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"
@@ -246,12 +73,31 @@ class AmbariConfig:
   def add_section(self, section):
     self.config.add_section(section)
 
+  @staticmethod
+  def getConfigFile():
+    global configFile
+    return configFile
+
+  @staticmethod
+  def getLogFile():
+    global logfile
+    return logfile
+
+  @staticmethod
+  def getOutFile():
+    global outfile
+    return outfile
+
   def setConfig(self, customConfig):
     self.config = customConfig
 
   def getConfig(self):
     return self.config
 
+  def getImports(self):
+    global imports
+    return imports
+
   def getRolesToClass(self):
     global rolesToClass
     return rolesToClass
@@ -264,13 +110,9 @@ class AmbariConfig:
     global servicesToPidNames
     return servicesToPidNames
 
-  def getImports(self):
-    global imports
-    return imports
-
-  def getPidPathesVars(self):
-    global pidPathesVars
-    return pidPathesVars
+  def pidPathVars(self):
+    global pidPathVars
+    return pidPathVars
 
   def has_option(self, section, option):
     return self.config.has_option(section, option)

+ 31 - 13
ambari-agent/src/main/python/ambari_agent/Controller.py

@@ -22,6 +22,7 @@ import logging
 import signal
 import json
 import sys
+import platform
 import os
 import socket
 import time
@@ -46,11 +47,21 @@ logger = logging.getLogger()
 
 AGENT_AUTO_RESTART_EXIT_CODE = 77
 
+IS_WINDOWS = platform.system() == "Windows"
+
 class Controller(threading.Thread):
 
-  def __init__(self, config, range=30):
+  def __init__(self, config, heartbeat_stop_callback = None, range=30):
     threading.Thread.__init__(self)
     logger.debug('Initializing Controller RPC thread.')
+
+    if heartbeat_stop_callback is None:
+      if IS_WINDOWS:
+        from HeartbeatHandlers_windows import HeartbeatStopHandler
+      else:
+        from HeartbeatStopHandler_linux import HeartbeatStopHandler
+      heartbeat_stop_callback = HeartbeatStopHandler()
+
     self.lock = threading.Lock()
     self.safeMode = True
     self.credential = None
@@ -62,7 +73,7 @@ class Controller(threading.Thread):
     self.registerUrl = server_secured_url + '/agent/v1/register/' + self.hostname
     self.heartbeatUrl = server_secured_url + '/agent/v1/heartbeat/' + self.hostname
     self.componentsUrl = server_secured_url + '/agent/v1/components/'
-    self.netutil = NetUtil()
+    self.netutil = NetUtil(heartbeat_stop_callback)
     self.responseId = -1
     self.repeatRegistration = False
     self.isRegistered = False
@@ -71,10 +82,10 @@ class Controller(threading.Thread):
     self.hasMappedComponents = True
     # Event is used for synchronizing heartbeat iterations (to make possible
     # manual wait() interruption between heartbeats )
-    self.heartbeat_wait_event = threading.Event()
+    self.heartbeat_stop_callback = heartbeat_stop_callback
     # List of callbacks that are called at agent registration
     self.registration_listeners = []
-    
+
     # pull config directory out of config
     cache_dir = config.get('agent', 'cache_dir')
     if cache_dir is None:
@@ -197,6 +208,9 @@ class Controller(threading.Thread):
   DEBUG_SUCCESSFULL_HEARTBEATS = 0
   DEBUG_STOP_HEARTBEATING = False
 
+  def trigger_heartbeat(self):
+    self.heartbeat_stop_callback.set_heartbeat()
+
   def heartbeatWithServer(self):
     self.DEBUG_HEARTBEAT_RETRIES = 0
     self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
@@ -261,14 +275,14 @@ class Controller(threading.Thread):
         if 'statusCommands' in response.keys():
           self.addToStatusQueue(response['statusCommands'])
           pass
-          
+
         if 'alertDefinitionCommands' in response.keys():
           self.alert_scheduler_handler.update_definitions(response['alertDefinitionCommands'], True)
           pass
         
         if 'alertExecutionCommands' in response.keys():
           self.alert_scheduler_handler.execute_alert(response['alertExecutionCommands'])
-          pass        
+          pass
 
         if "true" == response['restartAgent']:
           logger.error("Received the restartAgent command")
@@ -284,7 +298,7 @@ class Controller(threading.Thread):
         certVerifFailed = False
         self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
         self.DEBUG_HEARTBEAT_RETRIES = 0
-        self.heartbeat_wait_event.clear()
+        self.heartbeat_stop_callback.reset_heartbeat()
       except ssl.SSLError:
         self.repeatRegistration=False
         self.isRegistered = False
@@ -319,10 +333,10 @@ class Controller(threading.Thread):
       # Sleep for some time
       timeout = self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC \
                 - self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS
-      self.heartbeat_wait_event.wait(timeout=timeout)
-      # Sleep a bit more to allow STATUS_COMMAND results to be collected
-      # and sent in one heartbeat. Also avoid server overload with heartbeats
-      time.sleep(self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
+      if 0 == self.heartbeat_stop_callback.wait(timeout, self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS):
+        # Stop loop when stop event received
+        logger.info("Stop event received")
+        self.DEBUG_STOP_HEARTBEATING=True
     pass
 
   def run(self):
@@ -405,7 +419,10 @@ class Controller(threading.Thread):
 
 def main(argv=None):
   # Allow Ctrl-C
-  signal.signal(signal.SIGINT, signal.SIG_DFL)
+  if IS_WINDOWS:
+    from HeartbeatHandlers_windows import bind_signal_handlers
+  else:
+    from HeartbeatStopHandler_linux import bind_signal_handlers
 
   logger.setLevel(logging.INFO)
   formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
@@ -417,7 +434,8 @@ def main(argv=None):
   logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
 
   config = AmbariConfig.config
-  collector = Controller(config)
+  heartbeat_stop_callback = bind_signal_handlers()
+  collector = Controller(config, heartbeat_stop_callback)
   collector.start()
   collector.run()
 

+ 8 - 6
ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py

@@ -64,7 +64,7 @@ class CustomServiceOrchestrator():
     self.public_fqdn = hostname.public_hostname(config)
     # cache reset will be called on every agent registration
     controller.registration_listeners.append(self.file_cache.reset)
-    
+
     # Clean up old status command files if any
     try:
       os.unlink(self.status_commands_stdout)
@@ -88,7 +88,7 @@ class CustomServiceOrchestrator():
                     "reason - {reason} . Killing process {pid}"
         .format(tid = str(task_id), reason = reason, pid = pid))
         shell.kill_process_with_children(pid)
-      else: 
+      else:
         logger.warn("Unable to find pid by taskId = %s"%task_id)
 
   def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name = None,
@@ -101,7 +101,7 @@ class CustomServiceOrchestrator():
       script_type = command['commandParams']['script_type']
       script = command['commandParams']['script']
       timeout = int(command['commandParams']['command_timeout'])
-      
+
       if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
         server_url_prefix = command['hostLevelParams']['jdk_location']
       else:
@@ -149,7 +149,7 @@ class CustomServiceOrchestrator():
         handle = command['__handle']
         handle.on_background_command_started = self.map_task_to_process
         del command['__handle']
-      
+
       json_path = self.dump_command_to_json(command)
       pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
           self.PRE_HOOK_PREFIX, command_name, script_type)
@@ -187,7 +187,7 @@ class CustomServiceOrchestrator():
         if cancel_reason:
           ret['stdout'] += cancel_reason
           ret['stderr'] += cancel_reason
-  
+
           with open(tmpoutfile, "a") as f:
             f.write(cancel_reason)
           with open(tmperrfile, "a") as f:
@@ -213,7 +213,7 @@ class CustomServiceOrchestrator():
         if not isinstance(pid, int):
           return '\nCommand aborted. ' + pid
     return None
-        
+
   def requestComponentStatus(self, command):
     """
      Component status is determined by exit code, returned by runCommand().
@@ -262,6 +262,8 @@ class CustomServiceOrchestrator():
     # Perform few modifications to stay compatible with the way in which
     public_fqdn = self.public_fqdn
     command['public_hostname'] = public_fqdn
+    # Add cache dir to make it visible for commands
+    command["hostLevelParams"]["agentCacheDir"] = self.config.get('agent', 'cache_dir')
     # Now, dump the json file
     command_type = command['commandType']
     from ActionQueue import ActionQueue  # To avoid cyclic dependency

+ 222 - 107
ambari-agent/src/main/python/ambari_agent/Facter.py

@@ -27,55 +27,29 @@ import shlex
 import socket
 import multiprocessing
 import subprocess
-
+from shell import shellRunner
 import time
 import uuid
 from ambari_commons import OSCheck
 
 log = logging.getLogger()
 
-# selinux command
-GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
-GET_IFCONFIG_CMD = "ifconfig"
-GET_UPTIME_CMD = "cat /proc/uptime"
-GET_MEMINFO_CMD = "cat /proc/meminfo"
-
-class Facter():
-  def __init__(self):
-
-    self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
-    self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
-    self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
-
-  @staticmethod
-  def setDataIfConfigOutput():
-
-    try:
-      result = os.popen(GET_IFCONFIG_CMD).read()
-      return result
-    except OSError:
-      log.warn("Can't execute {0}".format(GET_IFCONFIG_CMD))
-    return ""
-
-  @staticmethod
-  def setDataUpTimeOutput():
 
-    try:
-      result = os.popen(GET_UPTIME_CMD).read()
-      return result
-    except OSError:
-      log.warn("Can't execute {0}".format(GET_UPTIME_CMD))
-    return ""
+def run_os_command(cmd):
+  if type(cmd) == str:
+    cmd = shlex.split(cmd)
+  process = subprocess.Popen(cmd,
+                             stdout=subprocess.PIPE,
+                             stdin=subprocess.PIPE,
+                             stderr=subprocess.PIPE
+  )
+  (stdoutdata, stderrdata) = process.communicate()
+  return process.returncode, stdoutdata, stderrdata
 
-  @staticmethod
-  def setMemInfoOutput():
 
-    try:
-      result = os.popen(GET_MEMINFO_CMD).read()
-      return result
-    except OSError:
-      log.warn("Can't execute {0}".format(GET_MEMINFO_CMD))
-    return ""
+class FacterBase():
+  def __init__(self):
+    pass
 
   # Returns the currently running user id
   def getId(self):
@@ -87,7 +61,7 @@ class Facter():
 
   # Returns the FQDN of the host
   def getFqdn(self):
-    return socket.getfqdn()
+    return socket.getfqdn().lower()
 
   # Returns the host's primary DNS domain name
   def getDomain(self):
@@ -153,15 +127,211 @@ class Facter():
   def getOsFamily(self):
     return OSCheck.get_os_family()
 
+  # Return uptime hours
+  def getUptimeHours(self):
+    return self.getUptimeSeconds() / (60 * 60)
+
+  # Return uptime days
+  def getUptimeDays(self):
+    return self.getUptimeSeconds() / (60 * 60 * 24)
+
+  def facterInfo(self):
+    facterInfo = {}
+    facterInfo['id'] = self.getId()
+    facterInfo['kernel'] = self.getKernel()
+    facterInfo['domain'] = self.getDomain()
+    facterInfo['fqdn'] = self.getFqdn()
+    facterInfo['hostname'] = self.getHostname()
+    facterInfo['macaddress'] = self.getMacAddress()
+    facterInfo['architecture'] = self.getArchitecture()
+    facterInfo['operatingsystem'] = self.getOperatingSystem()
+    facterInfo['operatingsystemrelease'] = self.getOperatingSystemRelease()
+    facterInfo['physicalprocessorcount'] = self.getProcessorcount()
+    facterInfo['processorcount'] = self.getProcessorcount()
+    facterInfo['timezone'] = self.getTimeZone()
+    facterInfo['hardwareisa'] = self.getArchitecture()
+    facterInfo['hardwaremodel'] = self.getArchitecture()
+    facterInfo['kernelrelease'] = self.getKernelRelease()
+    facterInfo['kernelversion'] = self.getKernelVersion()
+    facterInfo['osfamily'] = self.getOsFamily()
+    facterInfo['kernelmajversion'] = self.getKernelMajVersion()
+
+    facterInfo['ipaddress'] = self.getIpAddress()
+    facterInfo['netmask'] = self.getNetmask()
+    facterInfo['interfaces'] = self.getInterfaces()
+
+    facterInfo['uptime_seconds'] = str(self.getUptimeSeconds())
+    facterInfo['uptime_hours'] = str(self.getUptimeHours())
+    facterInfo['uptime_days'] = str(self.getUptimeDays())
+
+    facterInfo['memorysize'] = self.getMemorySize()
+    facterInfo['memoryfree'] = self.getMemoryFree()
+    facterInfo['memorytotal'] = self.getMemoryTotal()
+
+    return facterInfo
+
+  #Convert kB to GB
+  @staticmethod
+  def convertSizeKbToGb(size):
+    return "%0.2f GB" % round(float(size) / (1024.0 * 1024.0), 2)
+
+  #Convert MB to GB
+  @staticmethod
+  def convertSizeMbToGb(size):
+    return "%0.2f GB" % round(float(size) / (1024.0), 2)
+
+
+class FacterWindows(FacterBase):
+  GET_SYSTEM_INFO_CMD = "systeminfo"
+  GET_MEMORY_CMD = '$mem =(Get-WMIObject Win32_OperatingSystem -ComputerName "LocalHost" ); echo "$($mem.FreePhysicalMemory) $($mem.TotalVisibleMemorySize)"'
+  GET_PAGE_FILE_INFO = '$pgo=(Get-WmiObject Win32_PageFileUsage); echo "$($pgo.AllocatedBaseSize) $($pgo.AllocatedBaseSize-$pgo.CurrentUsage)"'
+  GET_UPTIME_CMD = 'echo $([int]((get-date)-[system.management.managementdatetimeconverter]::todatetime((get-wmiobject -class win32_operatingsystem).Lastbootuptime)).TotalSeconds)'
+
+  # Return first ip adress
+  def getIpAddress(self):
+    #TODO check if we need ipconfig
+    return socket.gethostbyname(socket.gethostname().lower())
+
+  # Return  netmask
+  def getNetmask(self):
+    #TODO return correct netmask
+    return 'OS NOT SUPPORTED'
+
+  # Return interfaces
+  def getInterfaces(self):
+    #TODO return correct interfaces
+    return 'OS NOT SUPPORTED'
+
+  # Return uptime seconds
+  def getUptimeSeconds(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_UPTIME_CMD).output.replace('\n', '').replace('\r',
+                                                                                                                '')
+      return int(result)
+    except:
+      log.warn("Can not get SwapFree")
+    return 0
+
+  # Return memoryfree
+  def getMemoryFree(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[0].replace('\n',
+                                                                                                            '').replace(
+        '\r', '')
+      return result
+    except:
+      log.warn("Can not get MemoryFree")
+    return 0
+
+  # Return memorytotal
+  def getMemoryTotal(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[-1].replace('\n',
+                                                                                                             '').replace(
+        '\r', '')
+      return result
+    except:
+      log.warn("Can not get MemoryTotal")
+    return 0
+
+  # Return swapfree
+  def getSwapFree(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_PAGE_FILE_INFO).output.split(" ")[-1].replace('\n',
+                                                                                                                 '').replace(
+        '\r', '')
+      return result
+    except:
+      log.warn("Can not get SwapFree")
+    return 0
+
+  # Return swapsize
+  def getSwapSize(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_PAGE_FILE_INFO).output.split(" ")[0].replace('\n',
+                                                                                                                '').replace(
+        '\r', '')
+      return result
+    except:
+      log.warn("Can not get SwapFree")
+    return 0
+
+  # Return memorysize
+  def getMemorySize(self):
+    try:
+      runner = shellRunner()
+      result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[-1].replace('\n',
+                                                                                                             '').replace(
+        '\r', '')
+      return result
+    except:
+      log.warn("Can not get MemorySize")
+    return 0
+
+  def facterInfo(self):
+    facterInfo = FacterBase.facterInfo(self)
+    facterInfo['swapsize'] = FacterBase.convertSizeMbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = FacterBase.convertSizeMbToGb(self.getSwapFree())
+    return facterInfo
+
+
+class FacterLinux(FacterBase):
+  # selinux command
+  GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
+  GET_IFCONFIG_CMD = "ifconfig"
+  GET_UPTIME_CMD = "cat /proc/uptime"
+  GET_MEMINFO_CMD = "cat /proc/meminfo"
+
+  def __init__(self):
+
+    self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
+    self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
+    self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
+
+  @staticmethod
+  def setDataIfConfigOutput():
+
+    try:
+      result = os.popen(FacterLinux.GET_IFCONFIG_CMD).read()
+      return result
+    except OSError:
+      log.warn("Can't execute {0}".format(FacterLinux.GET_IFCONFIG_CMD))
+    return ""
+
+  @staticmethod
+  def setDataUpTimeOutput():
+
+    try:
+      result = os.popen(FacterLinux.GET_UPTIME_CMD).read()
+      return result
+    except OSError:
+      log.warn("Can't execute {0}".format(FacterLinux.GET_UPTIME_CMD))
+    return ""
+
+  @staticmethod
+  def setMemInfoOutput():
+
+    try:
+      result = os.popen(FacterLinux.GET_MEMINFO_CMD).read()
+      return result
+    except OSError:
+      log.warn("Can't execute {0}".format(FacterLinux.GET_MEMINFO_CMD))
+    return ""
+
   def isSeLinux(self):
 
     try:
-      retcode, out, err = run_os_command(GET_SE_LINUX_ST_CMD)
+      retcode, out, err = run_os_command(FacterLinux.GET_SE_LINUX_ST_CMD)
       se_status = re.search('(enforcing|permissive|enabled)', out)
       if se_status:
         return True
     except OSError:
-      log.warn("Could not run {0}: OK".format(GET_SE_LINUX_ST_CMD))
+      log.warn("Could not run {0}: OK".format(FacterLinux.GET_SE_LINUX_ST_CMD))
     return False
 
   # Function that returns list of values that matches
@@ -183,10 +353,6 @@ class Facter():
 
     return result
 
-  #Convert kB to GB
-  def convertSizeKbToGb(self, size):
-    return "%0.2f GB" % round(float(size) / (1024.0 * 1024.0), 2)
-
   # Return first ip adress
   def getIpAddress(self):
     ip_pattern="(?: inet addr:)(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
@@ -195,7 +361,7 @@ class Facter():
     result = self.data_return_first(ip_pattern,self.DATA_IFCONFIG_OUTPUT)
     if result == '':
       log.warn("Can't get an ip address from {0}".format(self.DATA_IFCONFIG_OUTPUT))
-      return socket.gethostbyname(socket.gethostname())
+      return socket.gethostbyname(socket.gethostname().lower())
     else:
       return result
 
@@ -231,15 +397,6 @@ class Facter():
       log.warn("Can't get an uptime value from {0}".format(self.DATA_UPTIME_OUTPUT))
       return 0
 
-
-  # Return uptime hours
-  def getUptimeHours(self):
-    return self.getUptimeSeconds() / (60 * 60)
-
-  # Return uptime days
-  def getUptimeDays(self):
-    return self.getUptimeSeconds() / (60 * 60 * 24)
-
   # Return memoryfree
   def getMemoryFree(self):
     #:memoryfree_mb => "MemFree",
@@ -284,55 +441,18 @@ class Facter():
       log.warn("Can't get memory size from {0}".format(self.DATA_MEMINFO_OUTPUT))
       return 0
 
-
   def facterInfo(self):
-    facterInfo = {}
-    facterInfo['id'] = self.getId()
-    facterInfo['kernel'] = self.getKernel()
-    facterInfo['domain'] = self.getDomain()
-    facterInfo['fqdn'] = self.getFqdn()
-    facterInfo['hostname'] = self.getHostname()
-    facterInfo['macaddress'] = self.getMacAddress()
-    facterInfo['architecture'] = self.getArchitecture()
-    facterInfo['operatingsystem'] = self.getOperatingSystem()
-    facterInfo['operatingsystemrelease'] = self.getOperatingSystemRelease()
-    facterInfo['physicalprocessorcount'] = self.getProcessorcount()
-    facterInfo['processorcount'] = self.getProcessorcount()
-    facterInfo['timezone'] = self.getTimeZone()
-    facterInfo['hardwareisa'] = self.getArchitecture()
-    facterInfo['hardwaremodel'] = self.getArchitecture()
-    facterInfo['kernelrelease'] = self.getKernelRelease()
-    facterInfo['kernelversion'] = self.getKernelVersion()
-    facterInfo['osfamily'] = self.getOsFamily()
+    facterInfo = FacterBase.facterInfo(self)
     facterInfo['selinux'] = self.isSeLinux()
-    facterInfo['kernelmajversion'] = self.getKernelMajVersion()
-
-    facterInfo['ipaddress'] = self.getIpAddress()
-    facterInfo['netmask'] = self.getNetmask()
-    facterInfo['interfaces'] = self.getInterfaces()
-
-    facterInfo['uptime_seconds'] = str(self.getUptimeSeconds())
-    facterInfo['uptime_hours'] = str(self.getUptimeHours())
-    facterInfo['uptime_days'] = str(self.getUptimeDays())
-
-    facterInfo['memorysize'] = self.getMemorySize()
-    facterInfo['memoryfree'] = self.getMemoryFree()
-    facterInfo['swapsize'] = self.convertSizeKbToGb(self.getSwapSize())
-    facterInfo['swapfree'] = self.convertSizeKbToGb(self.getSwapFree())
-    facterInfo['memorytotal'] = self.getMemoryTotal()
-
+    facterInfo['swapsize'] = FacterBase.convertSizeKbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = FacterBase.convertSizeKbToGb(self.getSwapFree())
     return facterInfo
 
-def run_os_command(cmd):
-  if type(cmd) == str:
-    cmd = shlex.split(cmd)
-  process = subprocess.Popen(cmd,
-                             stdout=subprocess.PIPE,
-                             stdin=subprocess.PIPE,
-                             stderr=subprocess.PIPE
-  )
-  (stdoutdata, stderrdata) = process.communicate()
-  return process.returncode, stdoutdata, stderrdata
+
+if platform.system() == "Windows":
+  Facter = FacterWindows
+else:
+  Facter = FacterLinux
 
 
 def main(argv=None):
@@ -341,8 +461,3 @@ def main(argv=None):
 
 if __name__ == '__main__':
   main()
-
-
-
-
-

+ 2 - 1
ambari-agent/src/main/python/ambari_agent/FileCache.py

@@ -24,6 +24,7 @@ import os
 import shutil
 import zipfile
 import urllib2
+import urllib
 
 logger = logging.getLogger()
 
@@ -155,7 +156,7 @@ class FileCache():
     filename - file inside directory we are trying to fetch
     """
     return "{0}/{1}/{2}".format(server_url_prefix,
-                                    directory, filename)
+                                urllib.pathname2url(directory), filename)
 
 
   def fetch_url(self, url):

+ 31 - 2
ambari-agent/src/main/python/ambari_agent/Hardware.py

@@ -21,12 +21,15 @@ limitations under the License.
 import os.path
 import logging
 import subprocess
+import platform
+from shell import shellRunner
 from Facter import Facter
 
 logger = logging.getLogger()
 
 class Hardware:
   SSH_KEY_PATTERN = 'ssh.*key'
+  WINDOWS_GET_DRIVES_CMD ="foreach ($drive in [System.IO.DriveInfo]::getdrives()){$available = $drive.TotalFreeSpace;$used = $drive.TotalSize-$drive.TotalFreeSpace;$percent = ($used*100)/$drive.TotalSize;$size = $drive.TotalSize;$type = $drive.DriveFormat;$mountpoint = $drive.RootDirectory.FullName;echo \"$available $used $percent% $size $type $mountpoint\"}"
 
   def __init__(self):
     self.hardware = {}
@@ -59,8 +62,15 @@ class Hardware:
 
   @staticmethod
   def osdisks():
-    """ Run df to find out the disks on the host. Only works on linux 
-    platforms. Note that this parser ignores any filesystems with spaces 
+    if platform.system() == "Windows":
+      return Hardware._osdisks_win()
+    else:
+      return Hardware._osdisks_linux()
+
+  @staticmethod
+  def _osdisks_linux():
+    """ Run df to find out the disks on the host. Only works on linux
+    platforms. Note that this parser ignores any filesystems with spaces
     and any mounts with spaces. """
     mounts = []
     df = subprocess.Popen(["df", "-kPT"], stdout=subprocess.PIPE)
@@ -74,6 +84,25 @@ class Hardware:
     pass
     return mounts
 
+  @staticmethod
+  def _osdisks_win():
+    mounts = []
+    runner = shellRunner()
+    command_result = runner.runPowershell(script_block=Hardware.WINDOWS_GET_DRIVES_CMD)
+    if command_result.exitCode != 0:
+      return mounts
+    else:
+      for drive in [line for line in command_result.output.split(os.linesep) if line != '']:
+        available, used, percent, size, type, mountpoint = drive.split(" ")
+        mounts.append({"available": available,
+                       "used": used,
+                       "percent": percent,
+                       "size": size,
+                       "type": type,
+                       "mountpoint": mountpoint})
+
+    return mounts
+
   def get(self):
     return self.hardware
 

+ 58 - 0
ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py

@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import win32event
+
+from ambari_commons.exceptions import FatalException
+
+
+def bind_signal_handlers(agentPid):
+  return HeartbeatStopHandler()
+
+
+class HeartbeatStopHandler:
+  def __init__(self, stopEvent = None):
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      #Allow standalone testing
+      self._heventStop = win32event.CreateEvent(None, 0, 0, None)
+    else:
+      #Allow one unique event per process
+      self._heventStop = stopEvent
+
+  def set_heartbeat(self):
+    win32event.SetEvent(self._heventHeartbeat)
+
+  def reset_heartbeat(self):
+    win32event.ResetEvent(self._heventHeartbeat)
+
+  def wait(self, timeout1, timeout2 = 0):
+    timeout = int(timeout1 + timeout2) * 1000
+
+    result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
+    if(win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
+      raise FatalException(-1, "Error waiting for stop/heartbeat events: " + string(result))
+    if(win32event.WAIT_TIMEOUT == result):
+      return -1
+    return result - win32event.WAIT_OBJECT_0

+ 91 - 0
ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py

@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import logging
+import signal
+import threading
+import traceback
+
+
+logger = logging.getLogger()
+
+_handler = None
+
+def signal_handler(signum, frame):
+  _handler.set_stop()
+
+def bind_signal_handlers(agentPid):
+  if os.getpid() == agentPid:
+    signal.signal(signal.SIGINT, signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+    signal.signal(signal.SIGUSR1, debug)
+
+  global _handler
+  _handler = HeartbeatStopHandler()
+
+  return _handler
+
+def debug(sig, frame):
+  """Interrupt running process, and provide a python prompt for
+  interactive debugging."""
+  d={'_frame':frame}         # Allow access to frame object.
+  d.update(frame.f_globals)  # Unless shadowed by global
+  d.update(frame.f_locals)
+
+  message  = "Signal received : entering python shell.\nTraceback:\n"
+  message += ''.join(traceback.format_stack(frame))
+  logger.info(message)
+
+class HeartbeatStopHandler:
+  def __init__(self, stopEvent = None):
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self.heartbeat_wait_event = threading.Event()
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      #Allow standalone testing
+      self.stop_event = threading.Event()
+    else:
+      #Allow one unique event per process
+      self.stop_event = stopEvent
+
+  def set_heartbeat(self):
+    self.heartbeat_wait_event.set()
+
+  def reset_heartbeat(self):
+    self.heartbeat_wait_event.clear()
+
+  def set_stop(self):
+    self.stop_event.set()
+
+  def wait(self, timeout1, timeout2 = 0):
+    if self.heartbeat_wait_event.wait(timeout = timeout1):
+      #Event signaled, exit
+      return 0
+    # Stop loop when stop event received
+    # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
+    # and sent in one heartbeat. Also avoid server overload with heartbeats
+    if self.stop_event.wait(timeout = timeout2):
+      logger.info("Stop event received")
+      return 1
+    #Timeout
+    return -1

+ 3 - 2
ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py

@@ -31,12 +31,13 @@ class HostCheckReportFileHandler:
   HOST_CHECK_FILE = "hostcheck.result"
 
   def __init__(self, config):
-    if config != None:
+    self.hostCheckFilePath = None
+    if config is not None:
       hostCheckFileDir = config.get('agent', 'prefix')
       self.hostCheckFilePath = os.path.join(hostCheckFileDir, self.HOST_CHECK_FILE)
 
   def writeHostCheckFile(self, hostInfo):
-    if self.hostCheckFilePath == None:
+    if self.hostCheckFilePath is None:
       return
 
     try:

+ 6 - 389
ambari-agent/src/main/python/ambari_agent/HostInfo.py

@@ -18,394 +18,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
-import os
-import glob
-import logging
-import pwd
-import re
-import time
-import subprocess
-import threading
-import shlex
 import platform
-import hostname
-from PackagesAnalyzer import PackagesAnalyzer
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from Hardware import Hardware
-from ambari_commons import OSCheck, OSConst, Firewall
-import socket
 
-logger = logging.getLogger()
-
-# service cmd
-SERVICE_CMD = "service"
-
-
-class HostInfo:
-  # List of project names to be used to find alternatives folders etc.
-  DEFAULT_PROJECT_NAMES = [
-    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
-    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
-    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
-    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
-    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
-  ]
-
-  # List of live services checked for on the host, takes a map of plan strings
-  DEFAULT_LIVE_SERVICES = [
-    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
-  ]
-
-  # Set of default users (need to be replaced with the configured user names)
-  DEFAULT_USERS = [
-    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
-    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
-    "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
-  ]
-
-  # Filters used to identify processed
-  PROC_FILTER = [
-    "hadoop", "zookeeper"
-  ]
-
-  # Additional path patterns to find existing directory
-  DIRNAME_PATTERNS = [
-    "/tmp/hadoop-", "/tmp/hsperfdata_"
-  ]
-
-  # Default set of directories that are checked for existence of files and folders
-  DEFAULT_DIRS = [
-    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
-  ]
-
-  # Packages that are used to find repos (then repos are used to find other packages)
-  PACKAGES = [
-    "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
-    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
-  ]
-
-  # Additional packages to look for (search packages that start with these)
-  ADDITIONAL_PACKAGES = [
-    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse", 
-    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
-  ]
-
-  # ignore packages from repos whose names start with these strings
-  IGNORE_PACKAGES_FROM_REPOS = [
-    "ambari", "installed"
-  ]
-
-  # ignore required packages
-  IGNORE_PACKAGES = [
-    "epel-release"
-  ]
-
-  # ignore repos from the list of repos to be cleaned
-  IGNORE_REPOS = [
-    "ambari", "HDP-UTILS"
-  ]
-
-  # default timeout for async invoked processes
-  TIMEOUT_SECONDS = 60
-  RESULT_UNAVAILABLE = "unable_to_determine"
-
-  DEFAULT_SERVICE_NAME = "ntpd"
-  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
-
-  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
-
-  event = threading.Event()
-
-  current_umask = -1
-
-  def __init__(self, config=None):
-    self.packages = PackagesAnalyzer()
-    self.config = config
-    self.reportFileHandler = HostCheckReportFileHandler(config)
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def hadoopVarRunCount(self):
-    if not os.path.exists('/var/run/hadoop'):
-      return 0
-    pids = glob.glob('/var/run/hadoop/*/*.pid')
-    return len(pids)
-
-  def hadoopVarLogCount(self):
-    if not os.path.exists('/var/log/hadoop'):
-      return 0
-    logs = glob.glob('/var/log/hadoop/*/*.log')
-    return len(logs)
-
-  def etcAlternativesConf(self, projects, etcResults):
-    if not os.path.exists('/etc/alternatives'):
-      return []
-    projectRegex = "'" + '|'.join(projects) + "'"
-    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
-    for conf in files:
-      result = {}
-      filePath = os.path.join('/etc/alternatives', conf)
-      if os.path.islink(filePath):
-        realConf = os.path.realpath(filePath)
-        result['name'] = conf
-        result['target'] = realConf
-        etcResults.append(result)
-
-  def checkLiveServices(self, services, result):
-    osType = OSCheck.get_os_family()
-    for service in services:
-      svcCheckResult = {}
-      if isinstance(service, dict):
-        serviceName = service[osType]
-      else:
-        serviceName = service
-
-      service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
-      service_check_live[1] = serviceName
-
-      svcCheckResult['name'] = serviceName
-      svcCheckResult['status'] = "UNKNOWN"
-      svcCheckResult['desc'] = ""
-      try:
-        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-        out, err = osStat.communicate()
-        if 0 != osStat.returncode:
-          svcCheckResult['status'] = "Unhealthy"
-          svcCheckResult['desc'] = out
-          if len(out) == 0:
-            svcCheckResult['desc'] = err
-        else:
-          svcCheckResult['status'] = "Healthy"
-      except Exception, e:
-        svcCheckResult['status'] = "Unhealthy"
-        svcCheckResult['desc'] = repr(e)
-      result.append(svcCheckResult)
-
-  def checkUsers(self, users, results):
-    f = open('/etc/passwd', 'r')
-    for userLine in f:
-      fields = userLine.split(":")
-      if fields[0] in users:
-        result = {}
-        homeDir = fields[5]
-        result['name'] = fields[0]
-        result['homeDir'] = fields[5]
-        result['status'] = "Available"
-        if not os.path.exists(homeDir):
-          result['status'] = "Invalid home directory"
-        results.append(result)
-
-  def osdiskAvailableSpace(self, path):
-    diskInfo = {}
-    try:
-      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
-      dfdata = df.communicate()[0]
-      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
-    except:
-      pass
-    return diskInfo
-
-  def createAlerts(self, alerts):
-    existingUsers = []
-    self.checkUsers(self.DEFAULT_USERS, existingUsers)
-    dirs = []
-    self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
-    alert = {
-      'name': 'host_alert',
-      'instance': None,
-      'service': 'AMBARI',
-      'component': 'host',
-      'host': hostname.hostname(self.config),
-      'state': 'OK',
-      'label': 'Disk space',
-      'text': 'Used disk space less than 80%'}
-    message = ""
-    mountinfoSet = []
-    for dir in dirs:
-      if dir["type"] == 'directory':
-        mountinfo = self.osdiskAvailableSpace(dir['name'])
-        if int(mountinfo["percent"].strip('%')) >= 80:
-          if not mountinfo in mountinfoSet:
-            mountinfoSet.append(mountinfo)
-          message += str(dir['name']) + ";\n"
-
-    if message != "":
-      message = "These discs have low space:\n" + str(mountinfoSet) + "\n They include following critical directories:\n" + message
-      alert['state'] = 'WARNING'
-      alert['text'] = message
-    alerts.append(alert)
-    return alerts
-
-  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
-    foldersToIgnore = []
-    for user in existingUsers:
-      foldersToIgnore.append(user['homeDir'])
-    try:
-      for dirName in basePaths:
-        for project in projectNames:
-          path = os.path.join(dirName.strip(), project.strip())
-          if not path in foldersToIgnore and os.path.exists(path):
-            obj = {}
-            obj['type'] = self.dirType(path)
-            obj['name'] = path
-            dirs.append(obj)
-    except:
-      pass
-
-  def javaProcs(self, list):
-    try:
-      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
-      for pid in pids:
-        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
-        cmd = cmd.replace('\0', ' ')
-        if not 'AmbariServer' in cmd:
-          if 'java' in cmd:
-            dict = {}
-            dict['pid'] = int(pid)
-            dict['hadoop'] = False
-            for filter in self.PROC_FILTER:
-              if filter in cmd:
-                dict['hadoop'] = True
-            dict['command'] = cmd.strip()
-            for line in open(os.path.join('/proc', pid, 'status')):
-              if line.startswith('Uid:'):
-                uid = int(line.split()[1])
-                dict['user'] = pwd.getpwuid(uid).pw_name
-            list.append(dict)
-    except:
-      pass
-    pass
-
-  def getReposToRemove(self, repos, ignoreList):
-    reposToRemove = []
-    for repo in repos:
-      addToRemoveList = True
-      for ignoreRepo in ignoreList:
-        if self.packages.nameMatch(ignoreRepo, repo):
-          addToRemoveList = False
-          continue
-      if addToRemoveList:
-        reposToRemove.append(repo)
-    return reposToRemove
-
-  def getUMask(self):
-    if (self.current_umask == -1):
-     self.current_umask = os.umask(self.current_umask)
-     os.umask(self.current_umask)
-     return self.current_umask
-    else:
-     return self.current_umask
-
-  def getTransparentHugePage(self):
-    # This file exist only on redhat 6
-    thp_regex = "\[(.+)\]"
-    if os.path.isfile(self.THP_FILE):
-      with open(self.THP_FILE) as f:
-        file_content = f.read()
-        return re.search(thp_regex, file_content).groups()[0]
-    else:
-      return ""
-
-  def checkIptables(self):
-    return Firewall().getFirewallObject().check_iptables()
-
-  """ Return various details about the host
-  componentsMapped: indicates if any components are mapped to this host
-  commandsInProgress: indicates if any commands are in progress
-  """
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
-    dict['hostHealth'] = {}
-
-    java = []
-    self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
-
-    liveSvcs = []
-    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
-
-    dict['umask'] = str(self.getUMask())
-
-    dict['transparentHugePage'] = self.getTransparentHugePage()
-    dict['iptablesIsRunning'] = self.checkIptables()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    # If commands are in progress or components are already mapped to this host
-    # Then do not perform certain expensive host checks
-    if componentsMapped or commandsInProgress:
-      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
-      dict['installedPackages'] = []
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
-
-    else:
-      etcs = []
-      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
-      dict['alternatives'] = etcs
-
-      existingUsers = []
-      self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
-
-      dirs = []
-      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
-      dict['stackFoldersAndFiles'] = dirs
-
-      installedPackages = []
-      availablePackages = []
-      self.packages.allInstalledPackages(installedPackages)
-      self.packages.allAvailablePackages(availablePackages)
-
-      repos = []
-      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
-                                      self.IGNORE_PACKAGES_FROM_REPOS, repos)
-      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
-      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
-        self.ADDITIONAL_PACKAGES, installedPackages)
-      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
-      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
-
-      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
-      dict['existingRepos'] = repos
-
-      self.reportFileHandler.writeHostCheckFile(dict)
-      pass
-
-    # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
-    pass
-
-  def checkReverseLookup(self):
-    """
-    Check if host fqdn resolves to current host ip
-    """
-    try:
-      host_name = socket.gethostname()
-      host_ip = socket.gethostbyname(host_name)
-      host_fqdn = socket.getfqdn()
-      fqdn_ip = socket.gethostbyname(host_fqdn)
-      return host_ip == fqdn_ip
-    except socket.error:
-      pass
-    return False
-
-def main(argv=None):
-  h = HostInfo()
-  struct = {}
-  h.register(struct)
-  print struct
-
-
-if __name__ == '__main__':
-  main()
+if platform.system() == "Windows":
+  import HostInfo_win
+  HostInfo = HostInfo_win.HostInfo
+else:
+  import HostInfo_linux
+  HostInfo = HostInfo_linux.HostInfo

+ 411 - 0
ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py

@@ -0,0 +1,411 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import glob
+import logging
+import pwd
+import re
+import time
+import subprocess
+import threading
+import shlex
+import platform
+import hostname
+from PackagesAnalyzer import PackagesAnalyzer
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from Hardware import Hardware
+from ambari_commons import OSCheck, OSConst, Firewall
+import socket
+
+logger = logging.getLogger()
+
+# service cmd
+SERVICE_CMD = "service"
+
+
+class HostInfo:
+  # List of project names to be used to find alternatives folders etc.
+  DEFAULT_PROJECT_NAMES = [
+    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
+    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
+    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
+    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
+    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
+  ]
+
+  # List of live services checked for on the host, takes a map of plan strings
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
+  ]
+
+  # Set of default users (need to be replaced with the configured user names)
+  DEFAULT_USERS = [
+    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+    "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
+  ]
+
+  # Filters used to identify processed
+  PROC_FILTER = [
+    "hadoop", "zookeeper"
+  ]
+
+  # Additional path patterns to find existing directory
+  DIRNAME_PATTERNS = [
+    "/tmp/hadoop-", "/tmp/hsperfdata_"
+  ]
+
+  # Default set of directories that are checked for existence of files and folders
+  DEFAULT_DIRS = [
+    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
+  ]
+
+  # Packages that are used to find repos (then repos are used to find other packages)
+  PACKAGES = [
+    "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
+    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
+  ]
+
+  # Additional packages to look for (search packages that start with these)
+  ADDITIONAL_PACKAGES = [
+    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
+    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
+  ]
+
+  # ignore packages from repos whose names start with these strings
+  IGNORE_PACKAGES_FROM_REPOS = [
+    "ambari", "installed"
+  ]
+
+  # ignore required packages
+  IGNORE_PACKAGES = [
+    "epel-release"
+  ]
+
+  # ignore repos from the list of repos to be cleaned
+  IGNORE_REPOS = [
+    "ambari", "HDP-UTILS"
+  ]
+
+  # default timeout for async invoked processes
+  TIMEOUT_SECONDS = 60
+  RESULT_UNAVAILABLE = "unable_to_determine"
+
+  DEFAULT_SERVICE_NAME = "ntpd"
+  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
+
+  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+
+  event = threading.Event()
+
+  current_umask = -1
+
+  def __init__(self, config=None):
+    self.packages = PackagesAnalyzer()
+    self.config = config
+    self.reportFileHandler = HostCheckReportFileHandler(config)
+
+  def dirType(self, path):
+    if not os.path.exists(path):
+      return 'not_exist'
+    elif os.path.islink(path):
+      return 'sym_link'
+    elif os.path.isdir(path):
+      return 'directory'
+    elif os.path.isfile(path):
+      return 'file'
+    return 'unknown'
+
+  def hadoopVarRunCount(self):
+    if not os.path.exists('/var/run/hadoop'):
+      return 0
+    pids = glob.glob('/var/run/hadoop/*/*.pid')
+    return len(pids)
+
+  def hadoopVarLogCount(self):
+    if not os.path.exists('/var/log/hadoop'):
+      return 0
+    logs = glob.glob('/var/log/hadoop/*/*.log')
+    return len(logs)
+
+  def etcAlternativesConf(self, projects, etcResults):
+    if not os.path.exists('/etc/alternatives'):
+      return []
+    projectRegex = "'" + '|'.join(projects) + "'"
+    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
+    for conf in files:
+      result = {}
+      filePath = os.path.join('/etc/alternatives', conf)
+      if os.path.islink(filePath):
+        realConf = os.path.realpath(filePath)
+        result['name'] = conf
+        result['target'] = realConf
+        etcResults.append(result)
+
+  def checkLiveServices(self, services, result):
+    osType = OSCheck.get_os_family()
+    for service in services:
+      svcCheckResult = {}
+      if isinstance(service, dict):
+        serviceName = service[osType]
+      else:
+        serviceName = service
+
+      service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
+      service_check_live[1] = serviceName
+
+      svcCheckResult['name'] = serviceName
+      svcCheckResult['status'] = "UNKNOWN"
+      svcCheckResult['desc'] = ""
+      try:
+        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+          stderr=subprocess.PIPE)
+        out, err = osStat.communicate()
+        if 0 != osStat.returncode:
+          svcCheckResult['status'] = "Unhealthy"
+          svcCheckResult['desc'] = out
+          if len(out) == 0:
+            svcCheckResult['desc'] = err
+        else:
+          svcCheckResult['status'] = "Healthy"
+      except Exception, e:
+        svcCheckResult['status'] = "Unhealthy"
+        svcCheckResult['desc'] = repr(e)
+      result.append(svcCheckResult)
+
+  def checkUsers(self, users, results):
+    f = open('/etc/passwd', 'r')
+    for userLine in f:
+      fields = userLine.split(":")
+      if fields[0] in users:
+        result = {}
+        homeDir = fields[5]
+        result['name'] = fields[0]
+        result['homeDir'] = fields[5]
+        result['status'] = "Available"
+        if not os.path.exists(homeDir):
+          result['status'] = "Invalid home directory"
+        results.append(result)
+
+  def osdiskAvailableSpace(self, path):
+    diskInfo = {}
+    try:
+      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
+      dfdata = df.communicate()[0]
+      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
+    except:
+      pass
+    return diskInfo
+
+  def createAlerts(self, alerts):
+    existingUsers = []
+    self.checkUsers(self.DEFAULT_USERS, existingUsers)
+    dirs = []
+    self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+    alert = {
+      'name': 'host_alert',
+      'instance': None,
+      'service': 'AMBARI',
+      'component': 'host',
+      'host': hostname.hostname(self.config),
+      'state': 'OK',
+      'label': 'Disk space',
+      'text': 'Used disk space less than 80%'}
+    message = ""
+    mountinfoSet = []
+    for dir in dirs:
+      if dir["type"] == 'directory':
+        mountinfo = self.osdiskAvailableSpace(dir['name'])
+        if int(mountinfo["percent"].strip('%')) >= 80:
+          if not mountinfo in mountinfoSet:
+            mountinfoSet.append(mountinfo)
+          message += str(dir['name']) + ";\n"
+
+    if message != "":
+      message = "These discs have low space:\n" + str(mountinfoSet) + "\n They include following critical directories:\n" + message
+      alert['state'] = 'WARNING'
+      alert['text'] = message
+    alerts.append(alert)
+    return alerts
+
+  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
+    foldersToIgnore = []
+    for user in existingUsers:
+      foldersToIgnore.append(user['homeDir'])
+    try:
+      for dirName in basePaths:
+        for project in projectNames:
+          path = os.path.join(dirName.strip(), project.strip())
+          if not path in foldersToIgnore and os.path.exists(path):
+            obj = {}
+            obj['type'] = self.dirType(path)
+            obj['name'] = path
+            dirs.append(obj)
+    except:
+      pass
+
+  def javaProcs(self, list):
+    try:
+      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+      for pid in pids:
+        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
+        cmd = cmd.replace('\0', ' ')
+        if not 'AmbariServer' in cmd:
+          if 'java' in cmd:
+            dict = {}
+            dict['pid'] = int(pid)
+            dict['hadoop'] = False
+            for filter in self.PROC_FILTER:
+              if filter in cmd:
+                dict['hadoop'] = True
+            dict['command'] = cmd.strip()
+            for line in open(os.path.join('/proc', pid, 'status')):
+              if line.startswith('Uid:'):
+                uid = int(line.split()[1])
+                dict['user'] = pwd.getpwuid(uid).pw_name
+            list.append(dict)
+    except:
+      pass
+    pass
+
+  def getReposToRemove(self, repos, ignoreList):
+    reposToRemove = []
+    for repo in repos:
+      addToRemoveList = True
+      for ignoreRepo in ignoreList:
+        if self.packages.nameMatch(ignoreRepo, repo):
+          addToRemoveList = False
+          continue
+      if addToRemoveList:
+        reposToRemove.append(repo)
+    return reposToRemove
+
+  def getUMask(self):
+    if (self.current_umask == -1):
+      self.current_umask = os.umask(self.current_umask)
+      os.umask(self.current_umask)
+      return self.current_umask
+    else:
+      return self.current_umask
+
+  def getTransparentHugePage(self):
+    # This file exist only on redhat 6
+    thp_regex = "\[(.+)\]"
+    if os.path.isfile(self.THP_FILE):
+      with open(self.THP_FILE) as f:
+        file_content = f.read()
+        return re.search(thp_regex, file_content).groups()[0]
+    else:
+      return ""
+
+  def checkIptables(self):
+    return Firewall().getFirewallObject().check_iptables()
+
+  """ Return various details about the host
+  componentsMapped: indicates if any components are mapped to this host
+  commandsInProgress: indicates if any commands are in progress
+  """
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['transparentHugePage'] = self.getTransparentHugePage()
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+
+    else:
+      etcs = []
+      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
+      dict['alternatives'] = etcs
+
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+
+      dirs = []
+      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+      dict['stackFoldersAndFiles'] = dirs
+
+      installedPackages = []
+      availablePackages = []
+      self.packages.allInstalledPackages(installedPackages)
+      self.packages.allAvailablePackages(availablePackages)
+
+      repos = []
+      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
+        self.IGNORE_PACKAGES_FROM_REPOS, repos)
+      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
+      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
+        self.ADDITIONAL_PACKAGES, installedPackages)
+      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
+      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
+
+      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
+      dict['existingRepos'] = repos
+
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+    pass
+
+  def checkReverseLookup(self):
+    """
+    Check if host fqdn resolves to current host ip
+    """
+    try:
+      host_name = socket.gethostname()
+      host_ip = socket.gethostbyname(host_name)
+      host_fqdn = socket.getfqdn()
+      fqdn_ip = socket.gethostbyname(host_fqdn)
+      return host_ip == fqdn_ip
+    except socket.error:
+      pass
+    return False
+
+def main(argv=None):
+  h = HostInfo()
+  struct = {}
+  h.register(struct)
+  print struct
+
+
+if __name__ == '__main__':
+  main()

+ 231 - 0
ambari-agent/src/main/python/ambari_agent/HostInfo_win.py

@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import logging
+import time
+import subprocess
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from shell import shellRunner
+from ambari_commons.os_check import OSCheck, OSConst
+from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
+import socket
+
+logger = logging.getLogger()
+
+# OS info
+OS_VERSION = OSCheck().get_os_major_version()
+OS_TYPE = OSCheck.get_os_type()
+OS_FAMILY = OSCheck.get_os_family()
+
+class HostInfo:
+  # List of live services checked for on the host, takes a map of plan strings
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.WINSRV_FAMILY: "W32Time"}
+  ]
+
+  # Set of default users (need to be replaced with the configured user names)
+  DEFAULT_USERS = [
+    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+    "hue", "yarn"
+  ]
+
+  # Filters used to identify processed
+  PROC_FILTER = [
+    "hadoop", "zookeeper"
+  ]
+
+  RESULT_UNAVAILABLE = "unable_to_determine"
+
+  SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
+  GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
+  GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
+
+  current_umask = -1
+
+  def __init__(self, config=None):
+    self.reportFileHandler = HostCheckReportFileHandler(config)
+
+  def dirType(self, path):
+    if not os.path.exists(path):
+      return 'not_exist'
+    elif os.path.islink(path):
+      return 'sym_link'
+    elif os.path.isdir(path):
+      return 'directory'
+    elif os.path.isfile(path):
+      return 'file'
+    return 'unknown'
+
+  def checkLiveServices(self, services, result):
+    osType = OSCheck.get_os_family()
+    for service in services:
+      svcCheckResult = {}
+      if isinstance(service, dict):
+        serviceName = service[osType]
+      else:
+        serviceName = service
+
+      service_check_live = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.SERVICE_STATUS_CMD.format(serviceName)]
+      svcCheckResult['name'] = serviceName
+      svcCheckResult['status'] = "UNKNOWN"
+      svcCheckResult['desc'] = ""
+      try:
+        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE)
+        out, err = osStat.communicate()
+        if 0 != osStat.returncode:
+          svcCheckResult['status'] = "Unhealthy"
+          svcCheckResult['desc'] = out
+          if len(out) == 0:
+            svcCheckResult['desc'] = err
+        else:
+          svcCheckResult['status'] = "Healthy"
+      except Exception, e:
+        svcCheckResult['status'] = "Unhealthy"
+        svcCheckResult['desc'] = repr(e)
+      result.append(svcCheckResult)
+
+  #TODO get user directory
+  def checkUsers(self, users, results):
+    get_users_cmd = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_USERS_CMD]
+    try:
+      osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE,                               stderr=subprocess.PIPE)
+      out, err = osStat.communicate()
+    except:
+      raise Exception("Failed to get users.")
+    for user in out.split(os.linesep):
+      if user in users:
+        result = {}
+        result['name'] = user
+        result['status'] = "Available"
+        results.append(result)
+
+  def createAlerts(self, alerts):
+    #TODO AMBARI-7849 Implement createAlerts for Windows
+    return alerts
+
+  def javaProcs(self, list):
+    try:
+      runner = shellRunner()
+      command_result = runner.run(["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_JAVA_PROC_CMD])
+      if command_result["exitCode"] == 0:
+        splitted_output = command_result["output"].split(os.linesep)
+        for i in [index for index in range(0,len(splitted_output)) if (index % 3)==0]:
+          pid = splitted_output[i]
+          cmd = splitted_output[i+1]
+          user = splitted_output[i+2]
+          if not 'AmbariServer' in cmd:
+            if 'java' in cmd:
+              dict = {}
+              dict['pid'] = int(pid)
+              dict['hadoop'] = False
+              for filter in self.PROC_FILTER:
+                if filter in cmd:
+                  dict['hadoop'] = True
+              dict['command'] = cmd.strip()
+              dict['user'] = user
+              list.append(dict)
+    except Exception as e:
+      pass
+    pass
+
+  def getUMask(self):
+    if (self.current_umask == -1):
+      self.current_umask = os.umask(self.current_umask)
+      os.umask(self.current_umask)
+      return self.current_umask
+    else:
+      return self.current_umask
+
+  def checkIptables(self):
+    out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
+    if out[0] != 0:
+      logger.warn("Unable to check firewall status:{0}".format(out[2]))
+      return False
+    profiles_status = [i for i in out[1].split("\n") if not i == ""]
+    if "1" in profiles_status:
+      return True
+    return False
+
+  """ Return various details about the host
+  componentsMapped: indicates if any components are mapped to this host
+  commandsInProgress: indicates if any commands are in progress
+  """
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+    else:
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+      #TODO check HDP stack and folders here
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+    pass
+
+  def checkReverseLookup(self):
+    """
+    Check if host fqdn resolves to current host ip
+    """
+    try:
+      host_name = socket.gethostname().lower()
+      host_ip = socket.gethostbyname(host_name)
+      host_fqdn = socket.getfqdn().lower()
+      fqdn_ip = socket.gethostbyname(host_fqdn)
+      return host_ip == fqdn_ip
+    except socket.error:
+      pass
+    return False
+
+def main(argv=None):
+  h = HostInfo()
+  struct = {}
+  h.register(struct)
+  print struct
+
+
+if __name__ == '__main__':
+  main()

+ 1 - 1
ambari-agent/src/main/python/ambari_agent/LiveStatus.py

@@ -65,7 +65,7 @@ class LiveStatus:
       status = self.DEAD_STATUS # CLIENT components can't have status STARTED
     elif component in self.COMPONENTS:
       statusCheck = StatusCheck(AmbariConfig.servicesToPidNames,
-                                AmbariConfig.pidPathesVars, self.globalConfig,
+                                AmbariConfig.pidPathVars, self.globalConfig,
                                 AmbariConfig.servicesToLinuxUser)
       serviceStatus = statusCheck.getStatus(self.component)
       if serviceStatus is None:

+ 27 - 3
ambari-agent/src/main/python/ambari_agent/NetUtil.py

@@ -15,10 +15,10 @@
 
 
 from urlparse import urlparse
-import time
 import logging
 import httplib
 from ssl import SSLError
+import platform
 
 ERROR_SSL_WRONG_VERSION = "SSLError: Failed to connect. Please check openssl library versions. \n" +\
               "Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details."
@@ -39,6 +39,23 @@ class NetUtil:
   # For testing purposes
   DEBUG_STOP_RETRIES_FLAG = False
 
+  # Stop implementation
+  # Typically, it waits for a certain time for the daemon/service to receive the stop signal.
+  # Received the number of seconds to wait as an argument
+  # Returns true if the application is stopping, false if continuing execution
+  stopCallback = None
+
+  def __init__(self, stop_callback=None):
+    if stop_callback is None:
+      IS_WINDOWS = platform.system() == "Windows"
+      if IS_WINDOWS:
+        from HeartbeatHandlers_windows import HeartbeatStopHandler
+      else:
+        from HeartbeatStopHandler_linux import HeartbeatStopHandler
+      stop_callback = HeartbeatStopHandler()
+
+    self.stopCallback = stop_callback
+
   def checkURL(self, url):
     """Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
     (like unreachable server or wrong HTTP code) result will be False.
@@ -78,6 +95,7 @@ class NetUtil:
 
     Returns count of retries
     """
+    connected = False
     if logger is not None:
       logger.debug("Trying to connect to %s", server_url)
 
@@ -85,11 +103,17 @@ class NetUtil:
     while (max_retries == -1 or retries < max_retries) and not self.DEBUG_STOP_RETRIES_FLAG:
       server_is_up, responseBody = self.checkURL(self.SERVER_STATUS_REQUEST.format(server_url))
       if server_is_up:
+        connected = True
         break
       else:
         if logger is not None:
           logger.warn('Server at {0} is not reachable, sleeping for {1} seconds...'.format(server_url,
             self.CONNECT_SERVER_RETRY_INTERVAL_SEC))
         retries += 1
-        time.sleep(self.CONNECT_SERVER_RETRY_INTERVAL_SEC)
-    return retries
+
+      if 0 == self.stopCallback.wait(self.CONNECT_SERVER_RETRY_INTERVAL_SEC):
+        #stop waiting
+        if logger is not None:
+          logger.info("Stop event received")
+        self.DEBUG_STOP_RETRIES_FLAG = True
+    return retries, connected

+ 7 - 7
ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py

@@ -77,7 +77,7 @@ class PackagesAnalyzer:
   # All installed packages in systems supporting yum
   def allInstalledPackages(self, allInstalledPackages):
     osType = OSCheck.get_os_family()
-    
+
     if osType == OSConst.SUSE_FAMILY:
       return self.lookUpZypperPackages(
         ["zypper", "search", "--installed-only", "--details"],
@@ -90,11 +90,11 @@ class PackagesAnalyzer:
     elif osType == OSConst.UBUNTU_FAMILY:
        return self.lookUpAptPackages(
         LIST_INSTALLED_PACKAGES_UBUNTU,
-        allInstalledPackages)   
+        allInstalledPackages)
 
   def allAvailablePackages(self, allAvailablePackages):
     osType = OSCheck.get_os_family()
-    
+
     if osType == OSConst.SUSE_FAMILY:
       return self.lookUpZypperPackages(
         ["zypper", "search", "--uninstalled-only", "--details"],
@@ -107,16 +107,16 @@ class PackagesAnalyzer:
     elif osType == OSConst.UBUNTU_FAMILY:
        return self.lookUpAptPackages(
         LIST_AVAILABLE_PACKAGES_UBUNTU,
-        allAvailablePackages)   
-      
-  def lookUpAptPackages(self, command, allPackages):   
+        allAvailablePackages)
+
+  def lookUpAptPackages(self, command, allPackages):
     try:
       result = self.subprocessWithTimeout(command)
       if 0 == result['retCode']:
         for x in result['out'].split('\n'):
           if x.strip():
             allPackages.append(x.split(' '))
-      
+
     except:
       pass
 

+ 19 - 18
ambari-agent/src/main/python/ambari_agent/PythonExecutor.py

@@ -23,9 +23,10 @@ import os
 import subprocess
 import pprint
 import threading
+import platform
 from threading import Thread
 import time
-from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle 
+from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
 
 from Grep import Grep
 import shell, sys
@@ -58,7 +59,7 @@ class PythonExecutor:
       tmpout =  open(tmpoutfile, 'a')
       tmperr =  open(tmperrfile, 'a')
     return tmpout, tmperr
-    
+
   def run_file(self, script, script_params, tmp_dir, tmpoutfile, tmperrfile,
                timeout, tmpstructedoutfile, logger_level, callback, task_id,
                override_output_files = True, handle = None):
@@ -84,7 +85,7 @@ class PythonExecutor:
     logger.info("Running command " + pprint.pformat(pythonCommand))
     if(handle == None) :
       tmpout, tmperr = self.open_subporcess_files(tmpoutfile, tmperrfile, override_output_files)
-      
+
       process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
       # map task_id to pid
       callback(task_id, process.pid)
@@ -100,7 +101,7 @@ class PythonExecutor:
       return self.prepare_process_result(process, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
     else:
       holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
-      
+
       background = BackgroundThread(holder, self)
       background.start()
       return {"exitcode": 777}
@@ -117,7 +118,7 @@ class PythonExecutor:
     result = self.condenseOutput(out, error, returncode, structured_out)
     logger.info("Result: %s" % result)
     return result
-  
+
   def read_result_from_files(self, out_path, err_path, structured_out_path):
     out = open(out_path, 'r').read()
     error = open(err_path, 'r').read()
@@ -134,21 +135,23 @@ class PythonExecutor:
       else:
         structured_out = {}
     return out, error, structured_out
-  
+
   def launch_python_subprocess(self, command, tmpout, tmperr):
     """
     Creates subprocess with given parameters. This functionality was moved to separate method
     to make possible unit testing
     """
+    close_fds = None if platform.system() == "Windows" else True
     return subprocess.Popen(command,
       stdout=tmpout,
-      stderr=tmperr, close_fds=True)
-    
+      stderr=tmperr, close_fds=close_fds)
+
   def isSuccessfull(self, returncode):
     return not self.python_process_has_been_killed and returncode == 0
 
   def python_command(self, script, script_params):
-    python_binary = sys.executable
+    #we need manually pass python executable on windows because sys.executable will return service wrapper
+    python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
     python_command = [python_binary, script] + script_params
     return python_command
 
@@ -180,31 +183,29 @@ class Holder:
     self.err_file = err_file
     self.structured_out_file = structured_out_file
     self.handle = handle
-    
+
 class BackgroundThread(threading.Thread):
   def __init__(self, holder, pythonExecutor):
     threading.Thread.__init__(self)
     self.holder = holder
     self.pythonExecutor = pythonExecutor
-  
+
   def run(self):
     process_out, process_err  = self.pythonExecutor.open_subporcess_files(self.holder.out_file, self.holder.err_file, True)
-    
+
     logger.info("Starting process command %s" % self.holder.command)
     process = self.pythonExecutor.launch_python_subprocess(self.holder.command, process_out, process_err)
-    
+
     logger.info("Process has been started. Pid = %s" % process.pid)
-    
+
     self.holder.handle.pid = process.pid
     self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
     self.holder.handle.on_background_command_started(self.holder.handle.command['taskId'], process.pid)
-    
+
     process.communicate()
-    
+
     self.holder.handle.exitCode = process.returncode
     process_condenced_result = self.pythonExecutor.prepare_process_result(process, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
     logger.info("Calling callback with args %s" % process_condenced_result)
     self.holder.handle.on_background_command_complete_callback(process_condenced_result, self.holder.handle)
     logger.info("Exiting from thread for holder pid %s" % self.holder.handle.pid)
-    
-  

+ 3 - 3
ambari-agent/src/main/python/ambari_agent/StatusCheck.py

@@ -54,7 +54,7 @@ class StatusCheck:
       
   def fillDirValues(self):
     try:
-      for pidVar in self.pidPathesVars:
+      for pidVar in self.pidPathVars:
         pidVarName = pidVar['var']
         pidDefaultvalue = pidVar['defaultValue']
         if self.globalConfig.has_key(pidVarName):
@@ -64,11 +64,11 @@ class StatusCheck:
     except Exception as e:
         logger.error("Error while filling directories values " + str(e))
         
-  def __init__(self, serviceToPidDict, pidPathesVars, globalConfig,
+  def __init__(self, serviceToPidDict, pidPathVars, globalConfig,
     servicesToLinuxUser):
     
     self.serToPidDict = serviceToPidDict.copy()
-    self.pidPathesVars = pidPathesVars
+    self.pidPathVars = pidPathVars
     self.pidPathes = []
     self.sh = shellRunner()
     self.pidFilesDict = {}

+ 4 - 4
ambari-agent/src/main/python/ambari_agent/hostname.py

@@ -44,11 +44,11 @@ def hostname(config):
       if (0 == osStat.returncode and 0 != len(out.strip())):
         cached_hostname = out.strip()
       else:
-        cached_hostname = socket.getfqdn()
+        cached_hostname = socket.getfqdn().lower()
     except:
-      cached_hostname = socket.getfqdn()
+      cached_hostname = socket.getfqdn().lower()
   except:
-    cached_hostname = socket.getfqdn()
+    cached_hostname = socket.getfqdn().lower()
   return cached_hostname
 
 
@@ -81,7 +81,7 @@ def public_hostname(config):
     handle.close()
     cached_public_hostname = str
   except Exception, e:
-    cached_public_hostname = socket.getfqdn()
+    cached_public_hostname = socket.getfqdn().lower()
   return cached_public_hostname
 
 def main(argv=None):

+ 50 - 57
ambari-agent/src/main/python/ambari_agent/main.py

@@ -25,60 +25,45 @@ import sys
 import traceback
 import os
 import time
+import platform
 import ConfigParser
 import ProcessHelper
 from Controller import Controller
-from AmbariConfig import AmbariConfig
+import AmbariConfig
 from NetUtil import NetUtil
 from PingPortListener import PingPortListener
 import hostname
 from DataCleaner import DataCleaner
 import socket
-
 logger = logging.getLogger()
+
 formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
 agentPid = os.getpid()
-config = AmbariConfig()
+config = AmbariConfig.AmbariConfig()
 configFile = config.CONFIG_FILE
 two_way_ssl_property = config.TWO_WAY_SSL_PROPERTY
 
-if 'AMBARI_LOG_DIR' in os.environ:
-  logfile = os.environ['AMBARI_LOG_DIR'] + "/ambari-agent.log"
-else:
-  logfile = "/var/log/ambari-agent/ambari-agent.log"
-
-def signal_handler(signum, frame):
-  #we want the handler to run only for the agent process and not
-  #for the children (e.g. namenode, etc.)
-  if os.getpid() != agentPid:
-    os._exit(0)
-  logger.info('signal received, exiting.')
-  ProcessHelper.stopAgent()
-
-def debug(sig, frame):
-  """Interrupt running process, and provide a python prompt for
-  interactive debugging."""
-  d={'_frame':frame}         # Allow access to frame object.
-  d.update(frame.f_globals)  # Unless shadowed by global
-  d.update(frame.f_locals)
-
-  message  = "Signal received : entering python shell.\nTraceback:\n"
-  message += ''.join(traceback.format_stack(frame))
-  logger.info(message)
+IS_WINDOWS = platform.system() == "Windows"
 
+if IS_WINDOWS:
+  from HeartbeatHandlers_windows import bind_signal_handlers
+else:
+  from HeartbeatStopHandler_linux import bind_signal_handlers
+  from HeartbeatStopHandler_linux import signal_handler
+  from HeartbeatStopHandler_linux import debug
 
 def setup_logging(verbose):
   formatter = logging.Formatter(formatstr)
-  rotateLog = logging.handlers.RotatingFileHandler(logfile, "a", 10000000, 25)
+  rotateLog = logging.handlers.RotatingFileHandler(AmbariConfig.AmbariConfig.getLogFile(), "a", 10000000, 25)
   rotateLog.setFormatter(formatter)
   logger.addHandler(rotateLog)
 
   if verbose:
-    logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
+    logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=AmbariConfig.AmbariConfig.getLogFile())
     logger.setLevel(logging.DEBUG)
     logger.info("loglevel=logging.DEBUG")
   else:
-    logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
+    logging.basicConfig(format=formatstr, level=logging.INFO, filename=AmbariConfig.AmbariConfig.getLogFile())
     logger.setLevel(logging.INFO)
     logger.info("loglevel=logging.INFO")
 
@@ -89,35 +74,30 @@ def update_log_level(config):
     loglevel = config.get('agent', 'loglevel')
     if loglevel is not None:
       if loglevel == 'DEBUG':
-        logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
+        logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=AmbariConfig.AmbariConfig.getLogFile())
         logger.setLevel(logging.DEBUG)
         logger.info("Newloglevel=logging.DEBUG")
       else:
-        logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
+        logging.basicConfig(format=formatstr, level=logging.INFO, filename=AmbariConfig.AmbariConfig.getLogFile())
         logger.setLevel(logging.INFO)
         logger.debug("Newloglevel=logging.INFO")
   except Exception, err:
     logger.info("Default loglevel=DEBUG")
 
 
-def bind_signal_handlers():
-  signal.signal(signal.SIGINT, signal_handler)
-  signal.signal(signal.SIGTERM, signal_handler)
-  signal.signal(signal.SIGUSR1, debug)
-
-
 #  ToDo: move that function inside AmbariConfig
 def resolve_ambari_config():
   global config
+  configPath = os.path.abspath(AmbariConfig.AmbariConfig.getConfigFile())
+
   try:
-    if os.path.exists(configFile):
-        config.read(configFile)
+    if os.path.exists(configPath):
+      config.read(configPath)
     else:
-      raise Exception("No config found, use default")
+      raise Exception("No config found at {0}, use default".format(configPath))
 
   except Exception, err:
     logger.warn(err)
-  return config
 
 
 def perform_prestart_checks(expected_hostname):
@@ -137,16 +117,21 @@ def perform_prestart_checks(expected_hostname):
       logger.error(msg)
       sys.exit(1)
   # Check if there is another instance running
-  if os.path.isfile(ProcessHelper.pidfile):
+  if os.path.isfile(ProcessHelper.pidfile) and not IS_WINDOWS:
     print("%s already exists, exiting" % ProcessHelper.pidfile)
     sys.exit(1)
   # check if ambari prefix exists
-  elif not os.path.isdir(config.get("agent", "prefix")):
+  elif config.has_option('agent', 'prefix') and not os.path.isdir(os.path.abspath(config.get('agent', 'prefix'))):
     msg = "Ambari prefix dir %s does not exists, can't continue" \
           % config.get("agent", "prefix")
     logger.error(msg)
     print(msg)
     sys.exit(1)
+  elif not config.has_option('agent', 'prefix'):
+    msg = "Ambari prefix dir %s not configured, can't continue"
+    logger.error(msg)
+    print(msg)
+    sys.exit(1)
 
 
 def daemonize():
@@ -207,7 +192,9 @@ def reset_agent(options):
 
   os._exit(0)
 
-def main():
+# event - event, that will be passed to Controller and NetUtil to make able to interrupt loops form outside process
+# we need this for windows os, where no sigterm available
+def main(heartbeat_stop_callback=None):
   global config
   parser = OptionParser()
   parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
@@ -222,7 +209,7 @@ def main():
   default_cfg = {'agent': {'prefix': '/home/ambari'}}
   config.load(default_cfg)
 
-  bind_signal_handlers()
+  bind_signal_handlers(agentPid)
 
   if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
     stop_agent()
@@ -231,16 +218,18 @@ def main():
     reset_agent(sys.argv)
 
   # Check for ambari configuration file.
-  config = resolve_ambari_config()
+  resolve_ambari_config()
 
   # Starting data cleanup daemon
   data_cleaner = None
-  if int(config.get('agent', 'data_cleanup_interval')) > 0:
+  if config.has_option('agent', 'data_cleanup_interval') and int(config.get('agent','data_cleanup_interval')) > 0:
     data_cleaner = DataCleaner(config)
     data_cleaner.start()
 
   perform_prestart_checks(expected_hostname)
-  daemonize()
+
+  if not IS_WINDOWS:
+    daemonize()
 
   # Starting ping port listener
   try:
@@ -264,15 +253,19 @@ def main():
     logger.warn("Unable to determine the IP address of the Ambari server '%s'", server_hostname)
 
   # Wait until server is reachable
-  netutil = NetUtil()
-  netutil.try_to_connect(server_url, -1, logger)
-
-  # Launch Controller communication
-  controller = Controller(config)
-  controller.start()
-  controller.join()
-  stop_agent()
+  netutil = NetUtil(heartbeat_stop_callback)
+  retries, connected = netutil.try_to_connect(server_url, -1, logger)
+  # Ambari Agent was stopped using stop event
+  if connected:
+    # Launch Controller communication
+    controller = Controller(config, heartbeat_stop_callback)
+    controller.start()
+    controller.join()
+  if not IS_WINDOWS:
+    stop_agent()
   logger.info("finished")
 
 if __name__ == "__main__":
-  main()
+  heartbeat_stop_callback = bind_signal_handlers(agentPid)
+
+  main(heartbeat_stop_callback)

+ 16 - 12
ambari-agent/src/main/python/ambari_agent/security.py

@@ -27,12 +27,12 @@ import json
 import pprint
 import traceback
 import hostname
+import platform
 
 logger = logging.getLogger()
 
-GEN_AGENT_KEY = "openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
-  -subj /OU=%(hostname)s/\
-        -out %(keysdir)s/%(hostname)s.csr"
+GEN_AGENT_KEY = 'openssl req -new -newkey rsa:1024 -nodes -keyout "%(keysdir)s'+os.sep+'%(hostname)s.key" '\
+	'-subj /OU=%(hostname)s/ -out "%(keysdir)s'+os.sep+'%(hostname)s.csr"'
 
 
 class VerifiedHTTPSConnection(httplib.HTTPSConnection):
@@ -141,30 +141,30 @@ class CachedHTTPSConnection:
 class CertificateManager():
   def __init__(self, config):
     self.config = config
-    self.keysdir = self.config.get('security', 'keysdir')
+    self.keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
     self.server_crt = self.config.get('security', 'server_crt')
     self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' \
        + self.config.get('server', 'url_port')
 
   def getAgentKeyName(self):
-    keysdir = self.config.get('security', 'keysdir')
+    keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
     return keysdir + os.sep + hostname.hostname(self.config) + ".key"
 
   def getAgentCrtName(self):
-    keysdir = self.config.get('security', 'keysdir')
+    keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
     return keysdir + os.sep + hostname.hostname(self.config) + ".crt"
 
   def getAgentCrtReqName(self):
-    keysdir = self.config.get('security', 'keysdir')
+    keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
     return keysdir + os.sep + hostname.hostname(self.config) + ".csr"
 
   def getSrvrCrtName(self):
-    keysdir = self.config.get('security', 'keysdir')
+    keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
     return keysdir + os.sep + "ca.crt"
 
   def checkCertExists(self):
 
-    s = self.config.get('security', 'keysdir') + os.sep + "ca.crt"
+    s = os.path.abspath(self.config.get('security', 'keysdir')) + os.sep + "ca.crt"
 
     server_crt_exists = os.path.exists(s)
 
@@ -240,10 +240,14 @@ class CertificateManager():
 
   def genAgentCrtReq(self):
     generate_script = GEN_AGENT_KEY % {'hostname': hostname.hostname(self.config),
-                                     'keysdir': self.config.get('security', 'keysdir')}
+                                     'keysdir' : os.path.abspath(self.config.get('security', 'keysdir'))}
     logger.info(generate_script)
-    p = subprocess.Popen([generate_script], shell=True, stdout=subprocess.PIPE)
-    p.communicate()
+    if platform.system() == 'Windows':
+      p = subprocess.Popen(generate_script, stdout=subprocess.PIPE)
+      p.communicate()
+    else:
+      p = subprocess.Popen([generate_script], shell=True, stdout=subprocess.PIPE)
+      p.communicate()
 
   def initSecurity(self):
     self.checkCertExists()

+ 93 - 20
ambari-agent/src/main/python/ambari_agent/shell.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# !/usr/bin/env python
 
 '''
 Licensed to the Apache Software Foundation (ASF) under one
@@ -29,26 +29,79 @@ import time
 import traceback
 import AmbariConfig
 import pprint
+import platform
 
-try:
+if platform.system() != "Windows":
+  try:
     import pwd
-except ImportError:
+  except ImportError:
     import winpwd as pwd
 
-global serverTracker
-serverTracker = {}
 logger = logging.getLogger()
 
+shellRunner = None
 threadLocal = threading.local()
-gracefull_kill_delay = 5 # seconds between SIGTERM and SIGKILL
-tempFiles = [] 
+gracefull_kill_delay = 5  # seconds between SIGTERM and SIGKILL
+
+tempFiles = []
+
+
 def noteTempFile(filename):
   tempFiles.append(filename)
 
+
 def getTempFiles():
   return tempFiles
 
-def kill_process_with_children(parent_pid):
+
+class _dict_to_object:
+  def __init__(self, entries):
+    self.__dict__.update(entries)
+
+  def __getitem__(self, item):
+    return self.__dict__[item]
+
+
+# windows specific code
+def _kill_process_with_children_windows(parent_pid):
+  shellRunner().run(["taskkill", "/T", "/PID", "{0}".format(parent_pid)])
+
+
+class shellRunnerWindows:
+  # Run any command
+  def run(self, script, user=None):
+    logger.warn("user argument ignored on windows")
+    code = 0
+    if not isinstance(script, list):
+      cmd = " "
+      cmd = cmd.join(script)
+    else:
+      cmd = script
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE, shell=False)
+    out, err = p.communicate()
+    code = p.wait()
+    logger.debug("Exitcode for %s is %d" % (cmd, code))
+    return {'exitCode': code, 'output': out, 'error': err}
+
+  def runPowershell(self, file=None, script_block=None, args=[]):
+    logger.warn("user argument ignored on windows")
+    code = 0
+    cmd = None
+    if file:
+      cmd = ['powershell', '-WindowStyle', 'Hidden', '-File', file] + args
+    elif script_block:
+      cmd = ['powershell', '-WindowStyle', 'Hidden', '-Command', script_block] + args
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE, shell=False)
+    out, err = p.communicate()
+    code = p.wait()
+    logger.debug("Exitcode for %s is %d" % (cmd, code))
+    return _dict_to_object({'exitCode': code, 'output': out, 'error': err})
+
+
+#linux specific code
+def _kill_process_with_children_linux(parent_pid):
   def kill_tree_function(pid, signal):
     '''
     Kills process tree starting from a given pid.
@@ -58,15 +111,17 @@ def kill_process_with_children(parent_pid):
     # a given PID and then passes list of "kill -<SIGNAL> PID" commands to 'sh'
     # shell.
     CMD = """ps xf | awk -v PID=""" + str(pid) + \
-        """ ' $1 == PID { P = $1; next } P && /_/ { P = P " " $1;""" + \
-        """K=P } P && !/_/ { P="" }  END { print "kill -""" \
-        + str(signal) + """ "K }' | sh """
+          """ ' $1 == PID { P = $1; next } P && /_/ { P = P " " $1;""" + \
+          """K=P } P && !/_/ { P="" }  END { print "kill -""" \
+          + str(signal) + """ "K }' | sh """
     process = subprocess.Popen(CMD, stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE, shell=True)
     process.communicate()
-  run_kill_function(kill_tree_function, parent_pid)
 
-def run_kill_function(kill_function, pid):
+  _run_kill_function(kill_tree_function, parent_pid)
+
+
+def _run_kill_function(kill_function, pid):
   try:
     kill_function(pid, signal.SIGTERM)
   except Exception, e:
@@ -81,17 +136,19 @@ def run_kill_function(kill_function, pid):
     logger.error("Failed to send SIGKILL to PID %d. Process exited?" % (pid))
     logger.error("Reported error: " + repr(e))
 
-def changeUid():
+
+def _changeUid():
   try:
     os.setuid(threadLocal.uid)
   except Exception:
     logger.warn("can not switch user for running command.")
 
-class shellRunner:
+
+class shellRunnerLinux:
   # Run any command
   def run(self, script, user=None):
     try:
-      if user!=None:
+      if user != None:
         user = pwd.getpwnam(user)[2]
       else:
         user = os.getuid()
@@ -101,12 +158,28 @@ class shellRunner:
     code = 0
     cmd = " "
     cmd = cmd.join(script)
-    p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, 
+    p = subprocess.Popen(cmd, preexec_fn=_changeUid, stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE, shell=True, close_fds=True)
     out, err = p.communicate()
     code = p.wait()
-    logger.debug("Exitcode for %s is %d" % (cmd,code))
+    logger.debug("Exitcode for %s is %d" % (cmd, code))
     return {'exitCode': code, 'output': out, 'error': err}
 
-  def getServerTracker(self):
-    return serverTracker
+
+def kill_process_with_children(parent_pid):
+  if platform.system() == "Windows":
+    _kill_process_with_children_windows(parent_pid)
+  else:
+    _kill_process_with_children_linux(parent_pid)
+
+def changeUid():
+  if not platform.system() == "Windows":
+    try:
+      os.setuid(threadLocal.uid)
+    except Exception:
+      logger.warn("can not switch user for running command.")
+
+if platform.system() == "Windows":
+  shellRunner = shellRunnerWindows
+else:
+  shellRunner = shellRunnerLinux

+ 82 - 0
ambari-agent/src/packages/windows.xml

@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
+  <!--This 'all' id is not appended to the produced bundle because we do this:
+    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+  -->
+  <id>windows-dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>src/main/python/ambari_agent</directory>
+      <outputDirectory>/sbin/ambari_agent</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/../ambari-common/src/main/python/resource_management</directory>
+      <outputDirectory>/sbin/resource_management</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/../ambari-common/src/main/python/ambari_commons</directory>
+      <outputDirectory>/sbin/ambari_commons</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</directory>
+      <outputDirectory>/sbin/ambari_jinja2</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/conf/windows</directory>
+      <outputDirectory>/</outputDirectory>
+      <excludes>
+        <exclude>service_wrapper.py</exclude>
+        <exclude>createservice.ps1</exclude>
+      </excludes>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/conf/windows</directory>
+      <outputDirectory>/sbin</outputDirectory>
+      <includes>
+        <include>service_wrapper.py</include>
+        <include>createservice.ps1</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${target.cache.dir}</directory>
+      <outputDirectory>/cache</outputDirectory>
+    </fileSet>
+    <!--empty directory-->
+    <fileSet>
+      <directory>./</directory>
+      <outputDirectory>/keys</outputDirectory>
+      <excludes>
+        <exclude>*/**</exclude>
+      </excludes>
+    </fileSet>
+  </fileSets>
+  <files>
+    <file>
+      <source>${project.basedir}/../version</source>
+      <outputDirectory>data</outputDirectory>
+      <filtered>true</filtered>
+    </file>
+  </files>
+</assembly>

+ 17 - 17
ambari-agent/src/test/python/ambari_agent/TestAlerts.py

@@ -18,9 +18,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+from stacks.utils.RMFTestCase import *
+import os
 import socket
 import sys
-import os
 
 from ambari_agent.AlertSchedulerHandler import AlertSchedulerHandler
 from ambari_agent.alerts.collector import AlertCollector
@@ -429,12 +430,12 @@ class TestAlerts(TestCase):
     
     ash = AlertSchedulerHandler(test_file_path, test_stack_path, test_host_scripts_path)
     ash.start()
-    
+
     self.assertEquals(1, ash.get_job_count())
     ash.reschedule()
     self.assertEquals(1, ash.get_job_count())
-        
-  
+
+
   def test_alert_collector_purge(self):
     json = { "name": "namenode_process",
       "service": "HDFS",
@@ -466,13 +467,13 @@ class TestAlerts(TestCase):
     self.assertEquals(6, pa.interval())
 
     res = pa.collect()
-    
+
     self.assertTrue(collector.alerts()[0] is not None)
     self.assertEquals('CRITICAL', collector.alerts()[0]['state'])
-    
+
     collector.remove_by_uuid('c1f73191-4481-4435-8dae-fd380e4c0be1')
     self.assertEquals(0,len(collector.alerts()))
-    
+
 
   def test_disabled_definitions(self):
     test_file_path = os.path.join('ambari_agent', 'dummy_files')
@@ -509,24 +510,23 @@ class TestAlerts(TestCase):
 
     pa = PortAlert(json, json['source'])
     ash.schedule_definition(pa)
-    
+
     self.assertEquals(2, ash.get_job_count())
-    
+
     json['enabled'] = False
     pa = PortAlert(json, json['source'])
     ash.schedule_definition(pa)
-    
+
     # verify disabled alert not scheduled
     self.assertEquals(2, ash.get_job_count())
-    
+
     json['enabled'] = True
     pa = PortAlert(json, json['source'])
     ash.schedule_definition(pa)
-    
+
     # verify enabled alert was scheduled
     self.assertEquals(3, ash.get_job_count())
 
-
   def test_immediate_alert(self):
     test_file_path = os.path.join('ambari_agent', 'dummy_files')
     test_stack_path = os.path.join('ambari_agent', 'dummy_files')
@@ -538,10 +538,10 @@ class TestAlerts(TestCase):
     self.assertEquals(1, ash.get_job_count())
     self.assertEquals(0, len(ash._collector.alerts()))
 
-    execution_commands = [ { 
+    execution_commands = [ {
         "clusterName": "c1",
-        "hostName": "c6401.ambari.apache.org",    
-        "alertDefinition": {         
+        "hostName": "c6401.ambari.apache.org",
+        "alertDefinition": {
           "name": "namenode_process",
           "service": "HDFS",
           "component": "NAMENODE",
@@ -565,7 +565,7 @@ class TestAlerts(TestCase):
           }
         }
       } ]
-    
+
     # execute the alert immediately and verify that the collector has the result
     ash.execute_alert(execution_commands)
     self.assertEquals(1, len(ash._collector.alerts()))

+ 0 - 1
ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py

@@ -45,4 +45,3 @@ class TestCertGeneration(TestCase):
   def tearDown(self):
     shutil.rmtree(self.tmpdir)
 
-

+ 3 - 3
ambari-agent/src/test/python/ambari_agent/TestController.py

@@ -385,7 +385,7 @@ class TestController(unittest.TestCase):
 
     hearbeat = MagicMock()
     self.controller.heartbeat = hearbeat
-
+    event_mock.return_value = False
     dumpsMock.return_value = "data"
 
     sendRequest = MagicMock(name="sendRequest")
@@ -512,7 +512,7 @@ class TestController(unittest.TestCase):
     response["restartAgent"] = "false"
     self.controller.heartbeatWithServer()
 
-    sleepMock.assert_called_with(
+    event_mock.assert_any_call(timeout=
       self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
 
     # Check that server continues to heartbeat after connection errors
@@ -533,7 +533,7 @@ class TestController(unittest.TestCase):
     self.controller.heartbeatWithServer()
     self.assertTrue(sendRequest.call_count > 5)
 
-    sleepMock.assert_called_with(
+    event_mock.assert_called_with(timeout=
       self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
 
     sys.stdout = sys.__stdout__

+ 21 - 20
ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py

@@ -95,7 +95,8 @@ class TestCustomServiceOrchestrator(TestCase):
       'clusterHostInfo':{'namenode_host' : ['1'],
                          'slave_hosts'   : ['0', '1'],
                          'all_hosts'     : ['h1.hortonworks.com', 'h2.hortonworks.com'],
-                         'all_ping_ports': ['8670:0,1']}
+                         'all_ping_ports': ['8670:0,1']},
+      'hostLevelParams':{}
     }
     
     decompress_cluster_host_info_mock.return_value = {'namenode_host' : ['h2.hortonworks.com'],
@@ -315,15 +316,15 @@ class TestCustomServiceOrchestrator(TestCase):
     self.assertTrue(os.path.exists(err))
     os.remove(out)
     os.remove(err)
-    
+
   from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
-    
+
   @patch("shell.kill_process_with_children")
   @patch.object(FileCache, "__init__")
   @patch.object(CustomServiceOrchestrator, "resolve_script_path")
   @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path")
   @patch.object(StackVersionsFileHandler, "read_stack_version")
-  def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock,  
+  def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock,
                                       kill_process_with_children_mock):
     FileCache_mock.return_value = None
     FileCache_mock.cache_dir = MagicMock()
@@ -334,9 +335,9 @@ class TestCustomServiceOrchestrator(TestCase):
     cfg.set('agent', 'tolerate_download_failures', 'true')
     cfg.set('agent', 'prefix', '.')
     cfg.set('agent', 'cache_dir', 'background_tasks')
-     
+
     actionQueue = ActionQueue(cfg, dummy_controller)
-    
+
     dummy_controller.actionQueue = actionQueue
     orchestrator = CustomServiceOrchestrator(cfg, dummy_controller)
     orchestrator.file_cache = MagicMock()
@@ -344,42 +345,42 @@ class TestCustomServiceOrchestrator(TestCase):
       return ""
     orchestrator.file_cache.get_service_base_dir = f
     actionQueue.customServiceOrchestrator = orchestrator
-    
+
     import TestActionQueue
     import copy
-    
+
     TestActionQueue.patch_output_file(orchestrator.python_executor)
     orchestrator.python_executor.prepare_process_result = MagicMock()
     orchestrator.dump_command_to_json = MagicMock()
- 
+
     lock = threading.RLock()
     complete_done = threading.Condition(lock)
-    
+
     complete_was_called = {}
     def command_complete_w(process_condenced_result, handle):
       with lock:
         complete_was_called['visited']= ''
         complete_done.wait(3)
-     
-    actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(actionQueue.on_background_command_complete_callback, command_complete_w, None) 
+
+    actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(actionQueue.on_background_command_complete_callback, command_complete_w, None)
     execute_command = copy.deepcopy(TestActionQueue.TestActionQueue.background_command)
     actionQueue.put([execute_command])
     actionQueue.processBackgroundQueueSafeEmpty()
-     
-    time.sleep(.1) 
-    
+
+    time.sleep(.1)
+
     orchestrator.cancel_command(19,'')
     self.assertTrue(kill_process_with_children_mock.called)
     kill_process_with_children_mock.assert_called_with(33)
-     
+
     with lock:
       complete_done.notifyAll()
 
     with lock:
       self.assertTrue(complete_was_called.has_key('visited'))
-    
+
     time.sleep(.1)
-     
+
     runningCommand = actionQueue.commandStatuses.get_command_status(19)
     self.assertTrue(runningCommand is not None)
     self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS)
@@ -501,12 +502,12 @@ class TestCustomServiceOrchestrator(TestCase):
     }
     dummy_controller = MagicMock()
     orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-    
+
     import TestActionQueue
     TestActionQueue.patch_output_file(orchestrator.python_executor)
     orchestrator.python_executor.condenseOutput = MagicMock()
     orchestrator.dump_command_to_json = MagicMock()
-    
+
     ret = orchestrator.runCommand(command, "out.txt", "err.txt")
     self.assertEqual(ret['exitcode'], 777)
 

+ 1 - 1
ambari-agent/src/test/python/ambari_agent/TestHostname.py

@@ -34,7 +34,7 @@ class TestHostname(TestCase):
     hostname.cached_hostname = None
     hostname.cached_public_hostname = None
     config = AmbariConfig()
-    self.assertEquals(hostname.hostname(config), socket.getfqdn(),
+    self.assertEquals(hostname.hostname(config), socket.getfqdn().lower(),
                       "hostname should equal the socket-based hostname")
     pass
 

+ 13 - 10
ambari-agent/src/test/python/ambari_agent/TestMain.py

@@ -52,23 +52,22 @@ class TestMain(unittest.TestCase):
     sys.stdout = sys.__stdout__
 
 
+  @patch("ambari_agent.HeartbeatStopHandler_linux")
   @patch("os._exit")
   @patch("os.getpid")
   @patch.object(ProcessHelper, "stopAgent")
-  def test_signal_handler(self, stopAgent_mock, os_getpid_mock, os_exit_mock):
+  def test_signal_handler(self, stopAgent_mock, os_getpid_mock, os_exit_mock, heartbeat_handler_mock):
     # testing exit of children
     main.agentPid = 4444
     os_getpid_mock.return_value = 5555
     main.signal_handler("signum", "frame")
-    self.assertTrue(os_exit_mock.called)
-
+    heartbeat_handler_mock.set_stop.assert_called()
     os_exit_mock.reset_mock()
 
     # testing exit of main process
     os_getpid_mock.return_value = main.agentPid
     main.signal_handler("signum", "frame")
-    self.assertFalse(os_exit_mock.called)
-    self.assertTrue(stopAgent_mock.called)
+    heartbeat_handler_mock.set_stop.assert_called()
 
 
   @patch.object(main.logger, "addHandler")
@@ -122,7 +121,7 @@ class TestMain(unittest.TestCase):
 
   @patch("signal.signal")
   def test_bind_signal_handlers(self, signal_mock):
-    main.bind_signal_handlers()
+    main.bind_signal_handlers(os.getpid())
     # Check if on SIGINT/SIGTERM agent is configured to terminate
     signal_mock.assert_any_call(signal.SIGINT, main.signal_handler)
     signal_mock.assert_any_call(signal.SIGTERM, main.signal_handler)
@@ -269,7 +268,7 @@ class TestMain(unittest.TestCase):
   @patch.object(main, "setup_logging")
   @patch.object(main, "bind_signal_handlers")
   @patch.object(main, "stop_agent")
-  @patch.object(main, "resolve_ambari_config")
+  @patch.object(AmbariConfig, "getConfigFile")
   @patch.object(main, "perform_prestart_checks")
   @patch.object(main, "daemonize")
   @patch.object(main, "update_log_level")
@@ -285,21 +284,25 @@ class TestMain(unittest.TestCase):
   def test_main(self, ping_port_init_mock, ping_port_start_mock, data_clean_init_mock,data_clean_start_mock,
                 parse_args_mock, join_mock, start_mock, Controller_init_mock, try_to_connect_mock,
                 update_log_level_mock, daemonize_mock, perform_prestart_checks_mock,
-                resolve_ambari_config_mock, stop_mock, bind_signal_handlers_mock,
+                ambari_config_mock,
+                stop_mock, bind_signal_handlers_mock,
                 setup_logging_mock, socket_mock):
     data_clean_init_mock.return_value = None
     Controller_init_mock.return_value = None
     ping_port_init_mock.return_value = None
     options = MagicMock()
     parse_args_mock.return_value = (options, MagicMock)
-
+    try_to_connect_mock.return_value = (0, True)
+    # use default unix config
+    ambari_config_mock.return_value = os.path.abspath("../../../conf/unix/ambari-agent.ini")
     #testing call without command-line arguments
+
     main.main()
 
     self.assertTrue(setup_logging_mock.called)
     self.assertTrue(bind_signal_handlers_mock.called)
     self.assertTrue(stop_mock.called)
-    self.assertTrue(resolve_ambari_config_mock.called)
+    #self.assertTrue(resolve_ambari_config_mock.called)
     self.assertTrue(perform_prestart_checks_mock.called)
     self.assertTrue(daemonize_mock.called)
     self.assertTrue(update_log_level_mock.called)

+ 8 - 5
ambari-agent/src/test/python/ambari_agent/TestNetUtil.py

@@ -21,6 +21,7 @@ limitations under the License.
 from ambari_agent import NetUtil
 from mock.mock import MagicMock, patch
 import unittest
+import threading
 
 class TestNetUtil(unittest.TestCase):
 
@@ -51,15 +52,17 @@ class TestNetUtil(unittest.TestCase):
 
 
   @patch("time.sleep")
-  def test_try_to_connect(self, sleepMock):
-
+  @patch.object(threading._Event, "wait")
+  def test_try_to_connect(self, event_mock,
+                            sleepMock):
+    event_mock.return_value = False
     netutil = NetUtil.NetUtil()
     checkURL = MagicMock(name="checkURL")
     checkURL.return_value = True, "test"
     netutil.checkURL = checkURL
 
     # one successful get
-    self.assertEqual(0, netutil.try_to_connect("url", 10))
+    self.assertEqual((0, True), netutil.try_to_connect("url", 10))
 
     # got successful after N retries
     gets = [[True, ""], [False, ""], [False, ""]]
@@ -67,9 +70,9 @@ class TestNetUtil(unittest.TestCase):
     def side_effect(*args):
       return gets.pop()
     checkURL.side_effect = side_effect
-    self.assertEqual(2, netutil.try_to_connect("url", 10))
+    self.assertEqual((2, True), netutil.try_to_connect("url", 10))
 
     # max retries
     checkURL.side_effect = None
     checkURL.return_value = False, "test"
-    self.assertEqual(5, netutil.try_to_connect("url", 5))
+    self.assertEqual((5,False), netutil.try_to_connect("url", 5))

+ 8 - 8
ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py

@@ -47,7 +47,7 @@ class TestStatusCheck(TestCase):
 
   def setUp(self):
 
-    self.pidPathesVars = [
+    self.pidPathVars = [
       {'var' : '',
       'defaultValue' : PID_DIR}
     ]
@@ -84,7 +84,7 @@ class TestStatusCheck(TestCase):
   @patch.object(StatusCheck, 'getIsLive')
   def test_live(self, get_is_live_mock):
 
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
 
     self.assertTrue(StatusCheck.USER_PATTERN in self.serviceToPidDict[COMPONENT_LIVE])
@@ -108,11 +108,11 @@ class TestStatusCheck(TestCase):
     logger_info_mock.side_effect = my_side_effect
     
     # call this three times
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
     # logged not more then once
     self.assert_(TestStatusCheck.timesLogged <= 1, "test_dont_relog_serToPidDict logged more then once")
@@ -129,7 +129,7 @@ class TestStatusCheck(TestCase):
     self.pidFilesDict[one_more_pid_file_name] = one_more_pid_full_path
     self.is_live_values[one_more_pid_full_path] = False
 
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
 
     statusCheck.pidFilesDict = self.pidFilesDict
@@ -149,7 +149,7 @@ class TestStatusCheck(TestCase):
     badServiceToPidDict = self.serviceToPidDict.copy()
     badServiceToPidDict['BAD_COMPONENT'] = 'prefix' + StatusCheck.USER_PATTERN
 
-    statusCheck = StatusCheck(badServiceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(badServiceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
 
     statusCheck.pidFilesDict = self.pidFilesDict
@@ -162,7 +162,7 @@ class TestStatusCheck(TestCase):
   # Ensure that status checker return False for dead process
   @patch.object(StatusCheck, 'getIsLive')
   def test_dead(self, get_is_live_mock):
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
       self.globalConfig, self.servicesToLinuxUser)
 
     statusCheck.pidFilesDict = self.pidFilesDict

+ 1 - 1
ambari-agent/src/test/python/resource_management/TestContentSources.py

@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
-
+from stacks.utils.RMFTestCase import *
 from unittest import TestCase
 from mock.mock import patch, MagicMock
 

+ 1 - 1
ambari-agent/src/test/python/resource_management/TestDirectoryResource.py

@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
-
+from stacks.utils.RMFTestCase import *
 from unittest import TestCase
 from mock.mock import patch, MagicMock
 import os

+ 2 - 0
ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py

@@ -15,6 +15,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
+
+from stacks.utils.RMFTestCase import *
 import os
 
 from unittest import TestCase

+ 1 - 0
ambari-agent/src/test/python/resource_management/TestExecuteResource.py

@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+from stacks.utils.RMFTestCase import *
 from unittest import TestCase
 from mock.mock import patch, MagicMock, call
 

+ 1 - 0
ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py

@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+from stacks.utils.RMFTestCase import *
 from unittest import TestCase
 from mock.mock import patch, MagicMock
 from resource_management import *

+ 2 - 0
ambari-agent/src/test/python/resource_management/TestSubstituteVars.py

@@ -17,6 +17,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
+
+from stacks.utils.RMFTestCase import *
 from unittest import TestCase, main
 from resource_management.libraries.functions.substitute_vars import substitute_vars
 

+ 4 - 2
ambari-agent/src/test/python/unitTests.py

@@ -23,7 +23,8 @@ import doctest
 from os.path import dirname, split, isdir
 import logging.handlers
 import logging
-from random import shuffle
+#TODO Add an option to randomize the tests' execution
+#from random import shuffle
 
 LOG_FILE_NAME='tests.log'
 SELECTED_PREFIX = "_"
@@ -55,7 +56,8 @@ def all_tests_suite():
   for directory in os.listdir(src_dir):
     if os.path.isdir(directory):
       files_list += os.listdir(src_dir + os.sep + directory)
-  shuffle(files_list)
+  #TODO Add an option to randomize the tests' execution
+  #shuffle(files_list)
   tests_list = []
 
   logger.info('------------------------TESTS LIST:-------------------------------------')

+ 1 - 0
ambari-client/groovy-client/pom.xml

@@ -87,6 +87,7 @@
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
           <excludes>
+            <exclude>**/*.iml</exclude>
             <exclude>src/main/resources/blueprints/**</exclude>
             <exclude>src/test/resources/**</exclude>
           </excludes>

+ 3 - 3
ambari-client/python-client/pom.xml

@@ -75,7 +75,7 @@
         <executions>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>python</executable>
               <workingDirectory>src/test/python</workingDirectory>
               <arguments>
                 <argument>unitTests.py</argument>
@@ -93,7 +93,7 @@
           </execution>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>python</executable>
               <workingDirectory>target/python-client-${project.version}</workingDirectory>
               <arguments>
                 <argument>${project.basedir}/src/main/python/setup.py</argument>
@@ -115,7 +115,7 @@
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
-        <version>2.0.1</version>
+        <version>2.1-alpha-2</version>
         <executions>
           <execution>
             <phase>none</phase>

+ 79 - 0
ambari-common/src/main/python/ambari_commons/ambari_service.py

@@ -0,0 +1,79 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+import win32service
+
+from ambari_commons.os_windows import WinService
+
+
+AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
+
+ENV_PYTHON_PATH = "PYTHONPATH"
+
+
+class AmbariService(WinService):
+  _svc_name_ = "Ambari Service"
+  _svc_display_name_ = "Ambari Service"
+  _svc_description_ = "Ambari Service"
+
+  # Sets the current dir and adjusts the PYTHONPATH env variable before calling SvcDoRun()
+  def SvcRun(self):
+    self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+
+    import servicemanager
+
+    parser = self._InitOptionsParser()
+    (self.options, args) = parser.parse_args()
+
+    try:
+      is_debugging = servicemanager.Debugging()
+    except:
+      is_debugging = False
+
+    if not is_debugging:
+      # Save the current dir, or the script dir if none set (typical for services)
+      script_path = os.path.dirname(__file__.replace('/', os.sep))
+      # the script resides in the sbin/ambari_commons subdir
+      self.options.current_dir = os.path.normpath(script_path + "\\..\\..")
+      os.chdir(self.options.current_dir)
+
+      python_path = os.path.normpath(script_path + "\\..")
+
+      #update the environment vars: set PYTHONPATH = $script_dir\sbin;%PYTHONPATH%
+      if os.environ.has_key(ENV_PYTHON_PATH):
+        python_path += os.pathsep + os.environ[ENV_PYTHON_PATH]
+      os.environ[ENV_PYTHON_PATH] = python_path
+
+    self.SvcDoRun()
+    pass
+
+  # Call during initialization to implement standard service versioning
+  @classmethod
+  def _AdjustServiceVersion(cls):
+    if os.environ.has_key(AMBARI_VERSION_VAR):
+      ambariVer = os.environ[AMBARI_VERSION_VAR]
+    else:
+      ambariVer = "1.3.0-SNAPSHOT"
+    AmbariService._svc_display_name_ += "-" + ambariVer
+    AmbariService._svc_description_ += " v" + ambariVer
+
+  # Override to customize the command-line arguments
+  def _InitOptionsParser(self):
+    pass

+ 35 - 0
ambari-common/src/main/python/ambari_commons/exceptions.py

@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+class FatalException(Exception):
+    def __init__(self, code, reason):
+      self.code = code
+      self.reason = reason
+
+    def __str__(self):
+        return repr("Fatal exception: %s, exit code %s" % (self.reason, self.code))
+
+
+class NonFatalException(Exception):
+  def __init__(self, reason):
+    self.reason = reason
+
+  def __str__(self):
+    return repr("NonFatal exception: %s" % self.reason)

+ 148 - 0
ambari-common/src/main/python/ambari_commons/inet_utils.py

@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import sys
+import urllib2
+
+from exceptions import *
+from logging_utils import *
+
+def download_file(link, destination, chunk_size=16 * 1024):
+  print_info_msg("Downloading {0} to {1}".format(link, destination))
+  if os.path.exists(destination):
+      print_warning_msg("File {0} already exists, assuming it was downloaded before".format(destination))
+      return
+
+  force_download_file(link, destination, chunk_size)
+
+
+def download_progress(file_name, downloaded_size, blockSize, totalSize):
+  percent = int(downloaded_size * 100 / totalSize)
+  status = "\r" + file_name
+
+  if totalSize < blockSize:
+    status += "... %d%%" % (100)
+  else:
+    status += "... %d%% (%.1f MB of %.1f MB)" % (
+      percent, downloaded_size / 1024 / 1024.0, totalSize / 1024 / 1024.0)
+  sys.stdout.write(status)
+  sys.stdout.flush()
+
+
+def find_range_components(meta):
+  file_size = 0
+  seek_pos = 0
+  hdr_range = meta.getheaders("Content-Range")
+  if len(hdr_range) > 0:
+    range_comp1 = hdr_range[0].split('/')
+    if len(range_comp1) > 1:
+      range_comp2 = range_comp1[0].split(' ') #split away the "bytes" prefix
+      if len(range_comp2) == 0:
+        raise FatalException(12, 'Malformed Content-Range response header: "{}".' % hdr_range)
+      range_comp3 = range_comp2[1].split('-')
+      seek_pos = int(range_comp3[0])
+      if range_comp1[1] != '*': #'*' == unknown length
+        file_size = int(range_comp1[1])
+
+  if file_size == 0:
+    #Try the old-fashioned way
+    hdrLen = meta.getheaders("Content-Length")
+    if len(hdrLen) == 0:
+      raise FatalException(12, "Response header doesn't contain Content-Length. Chunked Transfer-Encoding is not supported for now.")
+    file_size = int(hdrLen[0])
+
+  return (file_size, seek_pos)
+
+
+def force_download_file(link, destination, chunk_size = 16 * 1024, progress_func = download_progress):
+  request = urllib2.Request(link)
+
+  if os.path.exists(destination) and not os.path.isfile(destination):
+    #Directory specified as target? Must be a mistake. Bail out, don't assume anything.
+    err = 'Download target {} is a directory.' % destination
+    raise FatalException(1, err)
+
+  (dest_path, file_name) = os.path.split(destination)
+
+  temp_dest = destination + ".tmpdownload"
+  partial_size = 0
+
+  if os.path.exists(temp_dest):
+    #Support for resuming downloads, in case the process is killed while downloading a file
+    #  set resume range
+    # See http://stackoverflow.com/questions/6963283/python-urllib2-resume-download-doesnt-work-when-network-reconnects
+    partial_size = os.stat(temp_dest).st_size
+    if partial_size > chunk_size:
+      #Re-download the last chunk, to minimize the possibilities of file corruption
+      resume_pos = partial_size - chunk_size
+      request.add_header("Range", "bytes=%s-" % resume_pos)
+  else:
+    #Make sure the full dir structure is in place, otherwise file open will fail
+    if not os.path.exists(dest_path):
+      os.makedirs(dest_path)
+
+  response = urllib2.urlopen(request)
+  (file_size, seek_pos) = find_range_components(response.info())
+
+  print_info_msg("Downloading to: %s Bytes: %s" % (destination, file_size))
+
+  if partial_size < file_size:
+    if seek_pos == 0:
+      #New file, create it
+      open_mode = 'wb'
+    else:
+      #Resuming download of an existing file
+      open_mode = 'rb+' #rb+ doesn't create the file, using wb to create it
+    f = open(temp_dest, open_mode)
+
+    try:
+      #Resume the download from where it left off
+      if seek_pos > 0:
+        f.seek(seek_pos)
+
+      file_size_dl = seek_pos
+      while True:
+        buffer = response.read(chunk_size)
+        if not buffer:
+            break
+
+        file_size_dl += len(buffer)
+        f.write(buffer)
+
+        progress_func(file_name, file_size_dl, chunk_size, file_size)
+    finally:
+      f.close()
+
+    sys.stdout.write('\n')
+    sys.stdout.flush()
+
+  print_info_msg("Finished downloading {0} to {1}".format(link, destination))
+
+  downloaded_size = os.stat(temp_dest).st_size
+  if downloaded_size != file_size:
+    err = 'Size of downloaded file {} is {} bytes, it is probably damaged or incomplete' % (destination, downloaded_size)
+    raise FatalException(1, err)
+
+  # when download is complete -> mv temp_dest destination
+  if os.path.exists(destination):
+    #Windows behavior: rename fails if the destination file exists
+    os.unlink(destination)
+  os.rename(temp_dest, destination)

+ 49 - 0
ambari-common/src/main/python/ambari_commons/logging_utils.py

@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+VERBOSE = False
+SILENT = False
+DEBUG_MODE = False
+
+# terminal styles
+BOLD_ON = '\033[1m'
+BOLD_OFF = '\033[0m'
+
+#
+# Prints an "info" messsage.
+#
+def print_info_msg(msg):
+  if VERBOSE:
+    print("INFO: " + msg)
+
+#
+# Prints an "error" messsage.
+#
+def print_error_msg(msg):
+  print("ERROR: " + msg)
+
+#
+# Prints a "warning" messsage.
+#
+def print_warning_msg(msg, bold=False):
+  if bold:
+    print(BOLD_ON + "WARNING: " + msg + BOLD_OFF)
+  else:
+    print("WARNING: " + msg)

+ 111 - 15
ambari-common/src/main/python/ambari_commons/os_check.py

@@ -36,16 +36,40 @@ def linux_distribution():
   PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
 
   if PYTHON_VER < 26:
-    linux_distribution = platform.dist()
+    (distname, version, id)  = platform.dist()
   elif os.path.exists('/etc/redhat-release'):
-    linux_distribution = platform.dist()
+    (distname, version, id)  = platform.dist()
   else:
-    linux_distribution = platform.linux_distribution()
+    (distname, version, id) = platform.linux_distribution()
 
-  return linux_distribution
+  return (platform.system(), os.name, distname, version, id)
 
+def windows_distribution():
+  from os_windows import get_windows_version
+
+  # Only support Windows Server 64 bit
+  (win_release, win_version, win_csd, win_ptype) = platform.win32_ver()
+
+  if win_version.startswith("6.2."):
+    # win32_ver() doesn't work correctly for Windows Server 2012 R2 and Windows 8.1
+    (win_ver_major, win_ver_minor, win_ver_build) = get_windows_version()
+    if win_ver_major == 6 and win_ver_minor == 3:
+      win_release = "2012ServerR2"
+      win_version = "%d.%d.%d" % (win_ver_major, win_ver_minor, win_ver_build)
+
+  #if win_version
+  return (platform.system(), os.name, "win" + win_release, win_version, win_ptype)
 
 class OS_CONST_TYPE(type):
+  # os platforms
+  LINUX_OS = 'linux'
+  WINDOWS_OS = 'windows'
+
+  # os families
+  REDHAT_FAMILY = 'redhat'
+  DEBIAN_FAMILY = 'debian'
+  SUSE_FAMILY = 'suse'
+  WINSRV_FAMILY = 'winsrv'
 
   # Declare here os type mapping
   OS_FAMILY_COLLECTION = []
@@ -58,7 +82,8 @@ class OS_CONST_TYPE(type):
       Initialize internal data structures from file
     """
     try:
-      f = open(os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE))
+      fpath = os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE)
+      f = open(fpath)
       json_data = json.load(f)
       f.close()
       for family in json_data:
@@ -69,7 +94,7 @@ class OS_CONST_TYPE(type):
           'os_list': json_data[family][JSON_OS_TYPE]
         }]
     except:
-      raise Exception("Couldn't load '%s' file" % OSFAMILY_JSON_RESOURCE)
+      raise Exception("Couldn't load '%s' file" % fpath)
 
   def __init__(cls, name, bases, dct):
     cls.initialize_data()
@@ -89,17 +114,45 @@ class OS_CONST_TYPE(type):
       return name[:-7]
     raise Exception("Unknown class property '%s'" % name)
 
+def get_os_distribution():
+  if platform.system() == 'Windows':
+    dist = windows_distribution()
+  else:
+    if platform.system() == 'Mac':
+      raise Exception("MacOS not supported. Exiting...")
+    else:
+      # Linux
+      # Read content from /etc/*-release file
+      # Full release name
+      dist = linux_distribution()
+  return dist
 
 class OSConst:
   __metaclass__ = OS_CONST_TYPE
 
 
 class OSCheck:
+  _dist = get_os_distribution()
+
+  @staticmethod
+  def get_os_os():
+    """
+    Return values:
+    windows, linux
+
+    In case cannot detect - exit.
+    """
+    # Read content from /etc/*-release file
+    # Full release name
+    os_os = OSCheck._dist[0].lower()
+
+    return os_os
 
   @staticmethod
   def get_os_type():
     """
     Return values:
+    win2008server, win2012server,
     redhat, fedora, centos, oraclelinux, ascendos,
     amazon, xenserver, oel, ovs, cloudlinux, slc, scientific, psbm,
     ubuntu, debian, sles, sled, opensuse, suse ... and others
@@ -108,8 +161,7 @@ class OSCheck:
     """
     # Read content from /etc/*-release file
     # Full release name
-    dist = linux_distribution()
-    operatingSystem = dist[0].lower()
+    operatingSystem  = OSCheck._dist[2].lower()
 
     # special cases
     if os.path.exists('/etc/oracle-release'):
@@ -147,10 +199,7 @@ class OSCheck:
 
     In case cannot detect raises exception.
     """
-    # Read content from /etc/*-release file
-    # Full release name
-    dist = linux_distribution()
-    dist = dist[1]
+    dist = OSCheck._dist[3]
 
     if dist:
       return dist
@@ -173,8 +222,7 @@ class OSCheck:
 
     In case cannot detect raises exception.
     """
-    dist = linux_distribution()
-    dist = dist[2].lower()
+    dist = OSCheck._dist[4].lower()
 
     if dist:
       return dist
@@ -225,6 +273,48 @@ class OSCheck:
       pass
     return False
 
+  @staticmethod
+  def is_windows_family():
+    """
+     Return true if it is so or false if not
+
+     This is safe check for windows family, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
+        return True
+    except Exception:
+      pass
+    return False
+
+  @staticmethod
+  def is_linux_os():
+    """
+     Return true if it is so or false if not
+
+     This is safe check for linux os, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_os() == OSConst.LINUX_OS:
+        return True
+    except Exception:
+      pass
+    return False
+
+  @staticmethod
+  def is_windows_os():
+    """
+     Return true if it is so or false if not
+
+     This is safe check for windows os, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_os() == OSConst.WINDOWS_OS:
+        return True
+    except Exception:
+      pass
+    return False
+
   @staticmethod
   def is_redhat7():
     """
@@ -238,4 +328,10 @@ class OSCheck:
         return True
     except Exception:
       pass
-    return False
+    return False
+
+# OS info
+OS_VERSION = OSCheck().get_os_major_version()
+OS_TYPE = OSCheck.get_os_type()
+OS_FAMILY = OSCheck.get_os_family()
+OS_OS = OSCheck.get_os_os()

+ 81 - 0
ambari-common/src/main/python/ambari_commons/os_linux.py

@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import getpass
+
+import os
+import pwd
+import shlex
+import subprocess
+
+from logging_utils import *
+
+
+NR_CHMOD_CMD = 'chmod {0} {1} {2}'
+NR_CHOWN_CMD = 'chown {0} {1} {2}'
+
+ULIMIT_CMD = "ulimit -n"
+
+
+def run_os_command(cmd):
+  print_info_msg('about to run command: ' + str(cmd))
+  if type(cmd) == str:
+    cmd = shlex.split(cmd)
+  process = subprocess.Popen(cmd,
+                             stdout=subprocess.PIPE,
+                             stdin=subprocess.PIPE,
+                             stderr=subprocess.PIPE
+                             )
+  (stdoutdata, stderrdata) = process.communicate()
+  return process.returncode, stdoutdata, stderrdata
+
+def os_change_owner(filePath, user):
+  uid = pwd.getpwnam(user).pw_uid
+  gid = pwd.getpwnam(user).pw_gid
+  os.chown(filePath, uid, gid)
+
+def os_is_root():
+  '''
+  Checks effective UUID
+  Returns True if a program is running under root-level privileges.
+  '''
+  return os.geteuid() == 0
+
+def os_set_file_permissions(file, mod, recursive, user):
+  WARN_MSG = "Command {0} returned exit code {1} with message: {2}"
+  if recursive:
+    params = " -R "
+  else:
+    params = ""
+  command = NR_CHMOD_CMD.format(params, mod, file)
+  retcode, out, err = run_os_command(command)
+  if retcode != 0:
+    print_warning_msg(WARN_MSG.format(command, file, err))
+  command = NR_CHOWN_CMD.format(params, user, file)
+  retcode, out, err = run_os_command(command)
+  if retcode != 0:
+    print_warning_msg(WARN_MSG.format(command, file, err))
+
+def os_set_open_files_limit(maxOpenFiles):
+  command = "%s %s" % (ULIMIT_CMD, str(maxOpenFiles))
+  run_os_command(command)
+
+
+def os_getpass(prompt):
+  return getpass.unix_getpass(prompt)

+ 102 - 0
ambari-common/src/main/python/ambari_commons/os_utils.py

@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import shutil
+import string
+
+from os_check import *
+
+if OSCheck.is_windows_os():
+  from os_windows import *
+else:
+  # MacOS not supported
+  from os_linux import *
+
+from logging_utils import *
+from exceptions import FatalException
+
+
+def is_valid_filepath(filepath):
+  if not filepath or not os.path.exists(filepath) or os.path.isdir(filepath):
+    print 'Invalid path, please provide the absolute file path.'
+    return False
+  else:
+    return True
+
+def quote_path(filepath):
+  if(filepath.find(' ') != -1):
+    filepath_ret = '"' + filepath + '"'
+  else:
+    filepath_ret = filepath
+  return filepath_ret
+
+def search_file(filename, search_path, pathsep=os.pathsep):
+  """ Given a search path, find file with requested name """
+  for path in string.split(search_path, pathsep):
+    candidate = os.path.join(path, filename)
+    if os.path.exists(candidate):
+      return os.path.abspath(candidate)
+  return None
+
+def copy_file(src, dest_file):
+  try:
+    shutil.copyfile(src, dest_file)
+  except Exception, e:
+    err = "Can not copy file {0} to {1} due to: {2} . Please check file " \
+              "permissions and free disk space.".format(src, dest_file, e.message)
+    raise FatalException(1, err)
+
+def copy_files(files, dest_dir):
+  if os.path.isdir(dest_dir):
+    for filepath in files:
+      shutil.copy(filepath, dest_dir)
+    return 0
+  else:
+    return -1
+
+def remove_file(filePath):
+  if os.path.exists(filePath):
+    try:
+      os.remove(filePath)
+    except Exception, e:
+      print_warning_msg('Unable to remove file: ' + str(e))
+      return 1
+  pass
+  return 0
+
+def set_file_permissions(file, mod, user, recursive):
+  if os.path.exists(file):
+    os_set_file_permissions(file, mod, recursive, user)
+  else:
+    print_info_msg("File %s does not exist" % file)
+
+def is_root():
+  return os_is_root()
+
+# Proxy to the os implementation
+def change_owner(filePath, user):
+  os_change_owner(filePath, user)
+
+# Proxy to the os implementation
+def set_open_files_limit(maxOpenFiles):
+  os_set_open_files_limit(maxOpenFiles)
+
+def get_password(prompt):
+  return os_getpass(prompt)

+ 563 - 0
ambari-common/src/main/python/ambari_commons/os_windows.py

@@ -0,0 +1,563 @@
+# !/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import os
+import getpass
+import shlex
+import subprocess
+import sys
+import time
+import win32api
+import win32event
+import win32service
+import win32con
+import win32serviceutil
+import wmi
+import random
+import string
+
+import ctypes
+
+from win32security import *
+from win32api import *
+from winerror import ERROR_INVALID_HANDLE
+from win32process import GetExitCodeProcess, STARTF_USESTDHANDLES, STARTUPINFO, CreateProcessAsUser
+from win32event import WaitForSingleObject, INFINITE
+import msvcrt
+import tempfile
+from win32event import *
+from win32api import CloseHandle
+
+from ambari_commons.exceptions import *
+from logging_utils import *
+
+from win32security import LsaOpenPolicy, POLICY_CREATE_ACCOUNT, POLICY_LOOKUP_NAMES, LookupAccountName, \
+  LsaAddAccountRights, LsaRemoveAccountRights, SE_SERVICE_LOGON_NAME
+from win32net import NetUserAdd
+from win32netcon import USER_PRIV_USER, UF_NORMAL_ACCOUNT, UF_SCRIPT
+import pywintypes
+
+SERVICE_STATUS_UNKNOWN = "unknown"
+SERVICE_STATUS_STARTING = "starting"
+SERVICE_STATUS_RUNNING = "running"
+SERVICE_STATUS_STOPPING = "stopping"
+SERVICE_STATUS_STOPPED = "stopped"
+SERVICE_STATUS_NOT_INSTALLED = "not installed"
+
+WHOAMI_GROUPS = "whoami /groups"
+ADMIN_ACCOUNT = "BUILTIN\\Administrators"
+
+class OSVERSIONINFOEXW(ctypes.Structure):
+    _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
+                ('dwMajorVersion', ctypes.c_ulong),
+                ('dwMinorVersion', ctypes.c_ulong),
+                ('dwBuildNumber', ctypes.c_ulong),
+                ('dwPlatformId', ctypes.c_ulong),
+                ('szCSDVersion', ctypes.c_wchar*128),
+                ('wServicePackMajor', ctypes.c_ushort),
+                ('wServicePackMinor', ctypes.c_ushort),
+                ('wSuiteMask', ctypes.c_ushort),
+                ('wProductType', ctypes.c_byte),
+                ('wReserved', ctypes.c_byte)]
+
+def get_windows_version():
+    """
+    Get's the OS major and minor versions.  Returns a tuple of
+    (OS_MAJOR, OS_MINOR).
+    """
+    os_version = OSVERSIONINFOEXW()
+    os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
+    retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
+    if retcode != 0:
+        raise Exception("Failed to get OS version")
+
+    return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber
+
+CHECK_FIREWALL_SCRIPT = """[string]$CName = $env:computername
+$reg = [Microsoft.Win32.RegistryKey]::OpenRemoteBaseKey("LocalMachine",$computer)
+$domain = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\DomainProfile").GetValue("EnableFirewall")
+$standart = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\StandardProfile").GetValue("EnableFirewall")
+$public = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\PublicProfile").GetValue("EnableFirewall")
+Write-Host $domain
+Write-Host $standart
+Write-Host $public
+"""
+
+def _create_tmp_files():
+  out_file = tempfile.TemporaryFile(mode="r+b")
+  err_file = tempfile.TemporaryFile(mode="r+b")
+  return (msvcrt.get_osfhandle(out_file.fileno()),
+          msvcrt.get_osfhandle(err_file.fileno()),
+          out_file,
+          err_file)
+
+
+def _get_files_output(out, err):
+  out.seek(0)
+  err.seek(0)
+  return out.read().strip(), err.read().strip()
+
+
+def _safe_duplicate_handle(h):
+  try:
+    h = DuplicateHandle(GetCurrentProcess(),
+                        h,
+                        GetCurrentProcess(),
+                        0,
+                        True,
+                        win32con.DUPLICATE_SAME_ACCESS)
+    return True, h
+  except Exception as exc:
+    if exc.winerror == ERROR_INVALID_HANDLE:
+      return True, None
+  return False, None
+
+
+def run_os_command_impersonated(cmd, user, password, domain='.'):
+  si = STARTUPINFO()
+
+  out_handle, err_handle, out_file, err_file = _create_tmp_files()
+
+  ok, si.hStdInput = _safe_duplicate_handle(GetStdHandle(STD_INPUT_HANDLE))
+
+  if not ok:
+    raise Exception("Unable to create StdInput for child process")
+  ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
+  if not ok:
+    raise Exception("Unable to create StdOut for child process")
+  ok, si.hStdError = _safe_duplicate_handle(err_handle)
+  if not ok:
+    raise Exception("Unable to create StdErr for child process")
+
+  si.dwFlags = STARTF_USESTDHANDLES
+  si.lpDesktop = ""
+
+  user_token = LogonUser(user, domain, password, win32con.LOGON32_LOGON_SERVICE, win32con.LOGON32_PROVIDER_DEFAULT)
+  primary_token = DuplicateTokenEx(user_token, SecurityImpersonation, 0, TokenPrimary)
+  info = CreateProcessAsUser(primary_token, None, cmd, None, None, 1, 0, None, None, si)
+
+  hProcess, hThread, dwProcessId, dwThreadId = info
+  hThread.Close()
+
+  try:
+    WaitForSingleObject(hProcess, INFINITE)
+  except KeyboardInterrupt:
+    pass
+
+  out, err = _get_files_output(out_file, err_file)
+  exitcode = GetExitCodeProcess(hProcess)
+
+  return exitcode, out, err
+
+def run_os_command(cmd, env=None):
+  if isinstance(cmd,basestring):
+    cmd = cmd.replace("\\", "\\\\")
+    cmd = shlex.split(cmd)
+  process = subprocess.Popen(cmd,
+                             stdout=subprocess.PIPE,
+                             stdin=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=env
+  )
+  (stdoutdata, stderrdata) = process.communicate()
+  return process.returncode, stdoutdata, stderrdata
+
+# execute powershell script passed in script_content. Script will be in temporary file to avoid different escape
+# and formatting problems.
+def run_powershell_script(script_content):
+  tmp_dir = tempfile.gettempdir()
+  random_filename = ''.join(random.choice(string.lowercase) for i in range(10))
+  script_file = open(os.path.join(tmp_dir,random_filename+".ps1"),"w")
+  script_file.write(script_content)
+  script_file.close()
+  result = run_os_command("powershell  -ExecutionPolicy unrestricted -File {0}".format(script_file.name))
+  os.remove(script_file.name)
+  return result
+
+def os_change_owner(filePath, user):
+  cmd = ['icacls', filePath, '/setowner', user]
+  retcode, outdata, errdata = run_os_command(cmd)
+  return retcode
+
+def os_is_root():
+  '''
+  Checks whether the current user is a member of the Administrators group
+  Returns True if yes, otherwise False
+  '''
+  retcode, out, err = run_os_command(WHOAMI_GROUPS)
+  if retcode != 0:
+    err_msg = "Unable to check the current user's group memberships. Command {0} returned exit code {1} with message: {2}".format(WHOAMI_GROUPS, retcode, err)
+    print_warning_msg(err_msg)
+    raise FatalException(retcode, err_msg)
+
+  #Check for Administrators group membership
+  if -1 != out.find('\n' + ADMIN_ACCOUNT):
+    return True
+
+  return False
+
+def os_set_file_permissions(file, mod, recursive, user):
+  retcode = 0
+
+  #WARN_MSG = "Command {0} returned exit code {1} with message: {2}"
+  #if recursive:
+  #  params = " -R "
+  #else:
+  #  params = ""
+  #command = NR_CHMOD_CMD.format(params, mod, file)
+  #retcode, out, err = run_os_command(command)
+  #if retcode != 0:
+  #  print_warning_msg(WARN_MSG.format(command, file, err))
+  #command = NR_CHOWN_CMD.format(params, user, file)
+  #retcode, out, err = run_os_command(command)
+  #if retcode != 0:
+  #  print_warning_msg(WARN_MSG.format(command, file, err))
+
+  # rights = mod
+  # acls_remove_cmd = "icacls {0} /remove {1}".format(file, user)
+  # retcode, out, err = run_os_command(acls_remove_cmd)
+  # if retcode == 0:
+  #   acls_modify_cmd = "icacls {0} /grant {1}:{2}".format(file, user, rights)
+  #   retcode, out, err = run_os_command(acls_modify_cmd)
+  return retcode
+
+
+def os_set_open_files_limit(maxOpenFiles):
+  # No open files limit in Windows. Not messing around with the System Resource Manager, at least for now.
+  pass
+
+
+def os_getpass(prompt, stream=None):
+  """Prompt for password with echo off, using Windows getch()."""
+  if sys.stdin is not sys.__stdin__:
+    return getpass.fallback_getpass(prompt, stream)
+
+  import msvcrt
+
+  for c in prompt:
+    msvcrt.putch(c)
+
+  pw = ""
+  while True:
+    c = msvcrt.getch()
+    if c == '\r' or c == '\n':
+      break
+    if c == '\003':
+      raise KeyboardInterrupt
+    if c == '\b':
+      if pw == '':
+        pass
+      else:
+        pw = pw[:-1]
+        msvcrt.putch('\b')
+        msvcrt.putch(" ")
+        msvcrt.putch('\b')
+    else:
+      pw = pw + c
+      msvcrt.putch("*")
+
+  msvcrt.putch('\r')
+  msvcrt.putch('\n')
+  return pw
+
+#[fbarca] Not used for now, keep it around just in case
+def wait_for_pid_wmi(processName, parentPid, pattern, timeout):
+  """
+    Check pid for existence during timeout
+  """
+  tstart = time.time()
+  pid_live = 0
+
+  c = wmi.WMI(find_classes=False)
+  qry = "select * from Win32_Process where Name=\"%s\" and ParentProcessId=%d" % (processName, parentPid)
+
+  while int(time.time() - tstart) <= timeout:
+    for proc in c.query(qry):
+      cmdLine = proc.CommandLine
+      if cmdLine is not None and pattern in cmdLine:
+        return pid_live
+    time.sleep(1)
+  return 0
+
+
+#need this for redirecting output form python process to file
+class SyncStreamWriter(object):
+  def __init__(self, stream, hMutexWrite):
+    self.stream = stream
+    self.hMutexWrite = hMutexWrite
+
+  def write(self, data):
+    #Ensure that the output is thread-safe when writing from 2 separate streams into the same file
+    #  (typical when redirecting both stdout and stderr to the same file).
+    win32event.WaitForSingleObject(self.hMutexWrite, win32event.INFINITE)
+    try:
+      self.stream.write(data)
+      self.stream.flush()
+    finally:
+      win32event.ReleaseMutex(self.hMutexWrite)
+
+  def __getattr__(self, attr):
+    return getattr(self.stream, attr)
+
+
+class SvcStatusCallback(object):
+  def __init__(self, svc):
+    self.svc = svc
+
+  def reportStartPending(self):
+    self.svc.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+
+  def reportStarted(self):
+    self.svc.ReportServiceStatus(win32service.SERVICE_RUNNING)
+
+  def reportStopPending(self):
+    self.svc.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+  def reportStopped(self):
+    self.svc.ReportServiceStatus(win32service.SERVICE_STOPPED)
+
+
+class WinServiceController:
+  @staticmethod
+  def Start(serviceName, waitSecs=30):
+    err = 0
+    try:
+      win32serviceutil.StartService(serviceName)
+      if waitSecs:
+        win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
+    except win32service.error, exc:
+      print "Error starting service: %s" % exc.strerror
+      err = exc.winerror
+    return err
+
+  @staticmethod
+  def Stop(serviceName, waitSecs=30):
+    err = 0
+    try:
+      if waitSecs:
+        win32serviceutil.StopServiceWithDeps(serviceName, waitSecs=waitSecs)
+      else:
+        win32serviceutil.StopService(serviceName)
+        if waitSecs:
+          win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_STOPPED, waitSecs)
+    except win32service.error, exc:
+      print "Error stopping service: %s (%d)" % (exc.strerror, exc.winerror)
+      err = exc.winerror
+    return err
+
+  @staticmethod
+  def QueryStatus(serviceName):
+    statusString = SERVICE_STATUS_UNKNOWN
+
+    try:
+      status = win32serviceutil.QueryServiceStatus(serviceName)[1]
+
+      if status == win32service.SERVICE_STOPPED:
+        statusString = SERVICE_STATUS_STOPPED
+      elif status == win32service.SERVICE_START_PENDING:
+        statusString = SERVICE_STATUS_STARTING
+      elif status == win32service.SERVICE_RUNNING:
+        statusString = SERVICE_STATUS_RUNNING
+      elif status == win32service.SERVICE_STOP_PENDING:
+        statusString = SERVICE_STATUS_STOPPING
+    except win32api.error:
+      statusString = SERVICE_STATUS_NOT_INSTALLED
+      pass
+
+    return statusString
+
+  @staticmethod
+  def EnsureServiceIsStarted(serviceName, waitSecs=30):
+    err = 0
+    try:
+      status = win32serviceutil.QueryServiceStatus(serviceName)[1]
+      if win32service.SERVICE_RUNNING != status:
+        if win32service.SERVICE_START_PENDING != status:
+          win32serviceutil.StartService(serviceName)
+        if waitSecs:
+          win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
+    except win32service.error, exc:
+      err = exc.winerror
+    return err
+
+
+class WinService(win32serviceutil.ServiceFramework):
+  # _svc_name_ = The service name
+  # _svc_display_name_ = The service display name
+  # _svc_description_ = The service description
+
+  _heventSvcStop = win32event.CreateEvent(None, 0, 0, None)
+  _hmtxOut = win32event.CreateMutex(None, False, None)  #[fbarca] Python doesn't support critical sections
+
+  def __init__(self, *args):
+    win32serviceutil.ServiceFramework.__init__(self, *args)
+
+  def SvcDoRun(self):
+    try:
+      self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+      self.ServiceMain()
+    except Exception, x:
+      #TODO: Log exception
+      self.SvcStop()
+
+  def SvcStop(self):
+    self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+    win32event.SetEvent(self._heventSvcStop)
+
+  # Service code entry point. Override it to implement the intended functionality.
+  def ServiceMain(self):
+    #Default implementation, does nothing.
+    win32event.WaitForSingleObject(self._heventSvcStop, win32event.INFINITE)
+    pass
+
+  def DefCtrlCHandler(self):
+    print_info_msg("Ctrl+C handler invoked. Stopping.")
+    win32event.SetEvent(self._heventSvcStop)
+    pass
+
+  #username domain\\username : The Username the service is to run under
+  #password password : The password for the username
+  #startup [manual|auto|disabled|delayed] : How the service starts, default = auto
+  #interactive : Allow the service to interact with the desktop.
+  #perfmonini file: .ini file to use for registering performance monitor data
+  #perfmondll file: .dll file to use when querying the service for performance data, default = perfmondata.dll
+  @classmethod
+  def Install(cls, startupMode = "auto", username = None, password = None, interactive = False,
+              perfMonIni = None, perfMonDll = None):
+    installArgs = [sys.argv[0], "--startup=" + startupMode]
+    if username is not None and username:
+      installArgs.append("--username=" + username)
+      if password is not None and password:
+        installArgs.append("--password=" + password)
+    if interactive:
+      installArgs.append("--interactive")
+    if perfMonIni is not None and perfMonIni:
+      installArgs.append("--perfmonini=" + perfMonIni)
+    if perfMonDll is not None and perfMonDll:
+      installArgs.append("--perfmondll=" + perfMonDll)
+    installArgs.append("install")
+    win32serviceutil.HandleCommandLine(cls, None, installArgs)
+
+  @classmethod
+  def Start(cls, waitSecs = 30):
+    return WinServiceController.Start(cls._svc_name_, waitSecs)
+
+  @classmethod
+  def Stop(cls, waitSecs = 30):
+    return WinServiceController.Stop(cls._svc_name_, waitSecs)
+
+  @classmethod
+  def QueryStatus(cls):
+    return WinServiceController.QueryStatus(cls._svc_name_)
+
+  @classmethod
+  def set_ctrl_c_handler(cls, ctrlHandler):
+    win32api.SetConsoleCtrlHandler(ctrlHandler, True)
+    pass
+
+  def _RedirectOutputStreamsToFile(self, outFilePath):
+    outFileDir = os.path.dirname(outFilePath)
+    if not os.path.exists(outFileDir):
+      os.makedirs(outFileDir)
+
+    out_writer = SyncStreamWriter(file(outFilePath, "w"), self._hmtxOut)
+    sys.stderr = out_writer
+    sys.stdout = out_writer
+    pass
+
+  def CheckForStop(self):
+    #Check for stop event to be signaled
+    return win32event.WAIT_OBJECT_0 == win32event.WaitForSingleObject(self._heventSvcStop, 1)
+
+  def _StopOrWaitForChildProcessToFinish(self, childProcess):
+    #Wait for the child process to finish or for the stop event to be signaled
+    if(win32event.WAIT_OBJECT_0 == win32event.WaitForMultipleObjects([self._heventSvcStop, childProcess._handle], False, win32event.INFINITE)):
+      # The OS only detaches the child process when the master process exits.
+      # We must kill it manually.
+      try:
+        #Sending signal.CTRL_BREAK_EVENT doesn't work. It only detaches the child process from the master.
+        #  Must brutally terminate the child process. Sorry Java.
+        childProcess.terminate()
+      except OSError, e:
+        print_info_msg("Unable to stop Ambari Server - " + str(e))
+        return False
+
+    return True
+
+class SystemWideLock(object):
+
+  def __init__(self, name):
+    self._mutex = CreateMutex(None, 0, name)
+
+  def lock(self, timeout=0):
+    result = WaitForSingleObject(self._mutex, timeout)
+    if result in [WAIT_TIMEOUT, WAIT_ABANDONED, WAIT_FAILED]:
+      return False
+    elif result == WAIT_OBJECT_0:
+      return True
+
+  def unlock(self):
+    try:
+      ReleaseMutex(self._mutex)
+      return True
+    except:
+      return False
+
+  def __del__(self):
+    CloseHandle(self._mutex)
+
+class UserHelper(object):
+  ACTION_OK = 0
+  USER_EXISTS = 1
+  ACTION_FAILED = -1
+
+  def __init__(self):
+    self._policy = LsaOpenPolicy(None, POLICY_CREATE_ACCOUNT | POLICY_LOOKUP_NAMES)
+
+  def create_user(self, name, password, comment="Ambari user"):
+    user_info = {}
+    user_info['name'] = name
+    user_info['password'] = password
+    user_info['priv'] = USER_PRIV_USER
+    user_info['comment'] = comment
+    user_info['flags'] = UF_NORMAL_ACCOUNT | UF_SCRIPT
+    try:
+      NetUserAdd(None, 1, user_info)
+    except pywintypes.error as e:
+      if e.winerror == 2224:
+        return UserHelper.USER_EXISTS, e.strerror
+      else:
+        return UserHelper.ACTION_FAILED, e.strerror
+    return UserHelper.ACTION_OK, "User created."
+
+  def add_user_privilege(self, name, privilege):
+    try:
+      acc_sid = LookupAccountName(None, name)[0]
+      LsaAddAccountRights(self._policy, acc_sid, (privilege,))
+    except pywintypes.error as e:
+      return UserHelper.ACTION_FAILED, e.strerror
+    return UserHelper.ACTION_OK, "Privilege added."
+
+  def remove_user_privilege(self, name, privilege):
+    try:
+      acc_sid = LookupAccountName(None, name)[0]
+      LsaRemoveAccountRights(self._policy, acc_sid, 0, (privilege,))
+    except pywintypes.error as e:
+      return UserHelper.ACTION_FAILED, e.strerror
+    return UserHelper.ACTION_OK, "Privilege removed."

+ 11 - 0
ambari-common/src/main/python/ambari_commons/resources/os_family.json

@@ -41,5 +41,16 @@
     "versions": [
       11
     ]
+  },
+  "winsrv": {
+    "distro": [
+      "win2008server",
+      "win2008serverr2",
+      "win2012server",
+      "win2012serverr2"
+    ],
+    "versions": [
+      6
+    ]
   }
 }

+ 30 - 0
ambari-common/src/main/python/ambari_commons/str_utils.py

@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+def compress_backslashes(s):
+  s1 = s
+  while (-1 != s1.find('\\\\')):
+    s1 = s1.replace('\\\\', '\\')
+  return s1
+
+def ensure_double_backslashes(s):
+  s1 = compress_backslashes(s)
+  s2 = s1.replace('\\', '\\\\')
+  return s2

+ 22 - 19
ambari-common/src/main/python/resource_management/core/logger.py

@@ -26,7 +26,7 @@ from resource_management.libraries.script.config_dictionary import UnknownConfig
 
 class Logger:
   logger = logging.getLogger("resource_management")
-  
+
   # unprotected_strings : protected_strings map
   sensitive_strings = {}
 
@@ -41,8 +41,8 @@ class Logger:
   @staticmethod
   def info(text):
     Logger.logger.info(Logger.get_protected_text(text))
-  
-  @staticmethod  
+
+  @staticmethod
   def debug(text):
     Logger.logger.debug(Logger.get_protected_text(text))
 
@@ -57,11 +57,11 @@ class Logger:
   @staticmethod
   def info_resource(resource):
     Logger.info(Logger.get_protected_text(Logger._get_resource_repr(resource)))
-  
-  @staticmethod  
+
+  @staticmethod
   def debug_resource(resource):
     Logger.debug(Logger.get_protected_text(Logger._get_resource_repr(resource)))
-    
+
   @staticmethod
   def get_protected_text(text):
     """
@@ -69,17 +69,17 @@ class Logger:
     """
     for unprotected_string, protected_string in Logger.sensitive_strings.iteritems():
       text = text.replace(unprotected_string, protected_string)
-      
+
     return text
-    
-  @staticmethod  
+
+  @staticmethod
   def _get_resource_repr(resource):
     MESSAGE_MAX_LEN = 256
     logger_level = logging._levelNames[Logger.logger.level]
-    
+
     arguments_str = ""
     for x,y in resource.arguments.iteritems():
-      
+
       # strip unicode 'u' sign
       if isinstance(y, unicode):
         # don't show long messages
@@ -87,7 +87,7 @@ class Logger:
           y = '...'
         val = repr(y).lstrip('u')
       # don't show dicts of configurations
-      # usually too long  
+      # usually too long
       elif logger_level != 'DEBUG' and isinstance(y, dict):
         val = "..."
       # for configs which didn't come
@@ -95,14 +95,17 @@ class Logger:
         val = "[EMPTY]"
       # correctly output 'mode' (as they are octal values like 0755)
       elif y and x == 'mode':
-        val = oct(y)
+        try:
+          val = oct(y)
+        except:
+          val = repr(y)
       else:
         val = repr(y)
-      
-      
+
+
       arguments_str += "'{0}': {1}, ".format(x, val)
-      
-    if arguments_str:  
+
+    if arguments_str:
       arguments_str = arguments_str[:-2]
-    
-    return unicode("{0} {{{1}}}").format(resource, arguments_str)
+
+    return unicode("{0} {{{1}}}").format(resource, arguments_str)

+ 6 - 0
ambari-common/src/main/python/resource_management/core/providers/__init__.py

@@ -50,6 +50,12 @@ PROVIDERS = dict(
   ubuntu=dict(
     Package="resource_management.core.providers.package.apt.AptProvider",
   ),
+  winsrv=dict(
+    Service="resource_management.core.providers.windows.service.ServiceProvider",
+    Execute="resource_management.core.providers.windows.system.ExecuteProvider",
+    File="resource_management.core.providers.windows.system.FileProvider",
+    Directory="resource_management.core.providers.windows.system.DirectoryProvider"
+  ),
   default=dict(
     File="resource_management.core.providers.system.FileProvider",
     Directory="resource_management.core.providers.system.DirectoryProvider",

+ 20 - 0
ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py

@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

+ 65 - 0
ambari-common/src/main/python/resource_management/core/providers/windows/service.py

@@ -0,0 +1,65 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.core.providers import Provider
+from resource_management.core.base import Fail
+import win32service
+import time
+
+
+_schSCManager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
+
+
+class ServiceProvider(Provider):
+  def action_start(self):
+    self._service_handle = self._service_handle if hasattr(self, "_service_handle") else \
+      win32service.OpenService(_schSCManager, self.resource.service_name, win32service.SERVICE_ALL_ACCESS)
+    if not self.status():
+      win32service.StartService(self._service_handle, None)
+      self.wait_status(win32service.SERVICE_RUNNING)
+
+  def action_stop(self):
+    self._service_handle = self._service_handle if hasattr(self, "_service_handle") else \
+      win32service.OpenService(_schSCManager, self.resource.service_name, win32service.SERVICE_ALL_ACCESS)
+    if self.status():
+      win32service.ControlService(self._service_handle, win32service.SERVICE_CONTROL_STOP)
+      self.wait_status(win32service.SERVICE_STOPPED)
+
+  def action_restart(self):
+    self._service_handle = win32service.OpenService(_schSCManager, self.resource.service_name,
+                                                    win32service.SERVICE_ALL_ACCESS)
+    self.action_stop()
+    self.action_start()
+
+  def action_reload(self):
+    raise Fail("Reload for Service resource not supported on windows")
+
+  def status(self):
+    if win32service.QueryServiceStatusEx(self._service_handle)["CurrentState"] == win32service.SERVICE_RUNNING:
+      return True
+    return False
+
+  def get_current_status(self):
+    return win32service.QueryServiceStatusEx(self._service_handle)["CurrentState"]
+
+  def wait_status(self, status, timeout=5):
+    begin = time.time()
+    while self.get_current_status() != status and (timeout == 0 or time.time() - begin < timeout):
+      time.sleep(1)

+ 382 - 0
ambari-common/src/main/python/resource_management/core/providers/windows/system.py

@@ -0,0 +1,382 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.core.providers import Provider
+from resource_management.core.logger import Logger
+from resource_management.core.base import Fail
+from resource_management.core import ExecuteTimeoutException
+import time
+import os
+import subprocess
+import shutil
+from resource_management.libraries.script import Script
+import win32con
+from win32security import *
+from win32api import *
+from winerror import ERROR_INVALID_HANDLE
+from win32profile import CreateEnvironmentBlock
+from win32process import GetExitCodeProcess, STARTF_USESTDHANDLES, STARTUPINFO, CreateProcessAsUser
+from win32event import WaitForSingleObject, INFINITE
+from win32security import *
+import msvcrt
+import tempfile
+
+def _create_tmp_files(env=None):
+  dirname = None
+  if env is None:
+    env = os.environ
+
+  for env_var_name in 'TMPDIR', 'TEMP', 'TMP':
+    if env.has_key(env_var_name):
+      dirname = env[env_var_name]
+      if dirname and os.path.exists(dirname):
+        break
+
+  if dirname is None:
+    for dirname2 in r'c:\temp', r'c:\tmp', r'\temp', r'\tmp':
+      try:
+        os.makedirs(dirname2)
+        dirname = dirname2
+        break
+      except:
+        pass
+
+  if dirname is None:
+    raise Exception('Unable to create temp dir. Insufficient access rights.')
+
+  out_file = tempfile.TemporaryFile(mode="r+b", dir=dirname)
+  err_file = tempfile.TemporaryFile(mode="r+b", dir=dirname)
+  return (msvcrt.get_osfhandle(out_file.fileno()),
+          msvcrt.get_osfhandle(err_file.fileno()),
+          out_file,
+          err_file)
+
+
+def _get_files_output(out, err):
+  out.seek(0)
+  err.seek(0)
+  return out.read().strip(), err.read().strip()
+
+
+def _safe_duplicate_handle(h):
+  try:
+    h = DuplicateHandle(GetCurrentProcess(),
+                        h,
+                        GetCurrentProcess(),
+                        0,
+                        True,
+                        win32con.DUPLICATE_SAME_ACCESS)
+    return True, h
+  except Exception as exc:
+    if exc.winerror == ERROR_INVALID_HANDLE:
+      return True, None
+  return False, None
+
+
+def _merge_env(env1, env2, merge_keys=['PYTHONPATH']):
+  """
+  Merge env2 into env1. Also current python instance variables from merge_keys list taken into account and they will be
+  merged with equivalent keys from env1 and env2 using system path separator.
+  :param env1: first environment, usually returned by CreateEnvironmentBlock
+  :param env2: custom environment
+  :param merge_keys: env variables to merge as PATH
+  :return: merged environment
+  """
+  env1 = dict(env1)  # copy to new dict in case env1 is os.environ
+  if env2:
+    for key, value in env2.iteritems():
+      if not key in merge_keys:
+        env1[key] = value
+  # strnsform keys and values to str(windows can not accept unicode)
+  result_env = {}
+  for key, value in env1.iteritems():
+    if not key in merge_keys:
+      result_env[str(key)] = str(value)
+  #merge keys from merge_keys
+  def put_values(key, env, result):
+    if env and key in env:
+      result.extend(env[key].split(os.pathsep))
+
+  for key in merge_keys:
+    all_values = []
+    for env in [env1, env2, os.environ]:
+      put_values(key, env, all_values)
+    result_env[str(key)] = str(os.pathsep.join(set(all_values)))
+  return result_env
+
+def AdjustPrivilege(htoken, priv, enable = 1):
+  # Get the ID for the privilege.
+  privId = LookupPrivilegeValue(None, priv)
+  # Now obtain the privilege for this token.
+  # Create a list of the privileges to be added.
+  privState = SE_PRIVILEGE_ENABLED if enable else 0
+  newPrivileges = [(privId, privState)]
+  # and make the adjustment.
+  AdjustTokenPrivileges(htoken, 0, newPrivileges)
+
+def QueryPrivilegeState(hToken, priv):
+  # Get the ID for the privilege.
+  privId = LookupPrivilegeValue(None, priv)
+  privList = GetTokenInformation(hToken, TokenPrivileges)
+  privState = 0
+  for (id, attr) in privList:
+    if id == privId:
+      privState = attr
+  Logger.debug('Privilege state: {}={} ({}) Enabled={}'.format(privId, priv, LookupPrivilegeDisplayName(None, priv), privState))
+  return privState
+
+# Execute command. As windows hdp stack heavily relies on proper environment it is better to reload fresh environment
+# on every execution. env variable will me merged with fresh environment for user.
+def _call_command(command, logoutput=False, cwd=None, env=None, wait_for_finish=True, timeout=None, user=None):
+  # TODO implement timeout, wait_for_finish
+  Logger.info("Executing %s" % (command))
+  if user:
+    proc_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_ADJUST_PRIVILEGES)
+
+    old_states = []
+
+    privileges = [
+      SE_ASSIGNPRIMARYTOKEN_NAME,
+      SE_INCREASE_QUOTA_NAME,
+    ]
+
+    for priv in privileges:
+      old_states.append(QueryPrivilegeState(proc_token, priv))
+      AdjustPrivilege(proc_token, priv)
+      QueryPrivilegeState(proc_token, priv)
+
+    user_token = LogonUser(user, ".", Script.get_password(user), win32con.LOGON32_LOGON_SERVICE,
+                           win32con.LOGON32_PROVIDER_DEFAULT)
+    env_token = DuplicateTokenEx(user_token, SecurityIdentification, TOKEN_QUERY, TokenPrimary)
+    # getting updated environment for impersonated user and merge it with custom env
+    current_env = CreateEnvironmentBlock(env_token, False)
+    current_env = _merge_env(current_env, env)
+
+    si = STARTUPINFO()
+    out_handle, err_handle, out_file, err_file = _create_tmp_files(current_env)
+    ok, si.hStdInput = _safe_duplicate_handle(GetStdHandle(STD_INPUT_HANDLE))
+    if not ok:
+      raise Exception("Unable to create StdInput for child process")
+    ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
+    if not ok:
+      raise Exception("Unable to create StdOut for child process")
+    ok, si.hStdError = _safe_duplicate_handle(err_handle)
+    if not ok:
+      raise Exception("Unable to create StdErr for child process")
+
+    Logger.debug("Redirecting stdout to '{}', stderr to '{}'".format(out_file.name, err_file.name))
+
+    si.dwFlags = win32con.STARTF_USESTDHANDLES
+    si.lpDesktop = ""
+
+    try:
+      info = CreateProcessAsUser(user_token, None, command, None, None, 1, win32con.CREATE_NO_WINDOW, current_env, cwd, si)
+      hProcess, hThread, dwProcessId, dwThreadId = info
+      hThread.Close()
+
+      try:
+        WaitForSingleObject(hProcess, INFINITE)
+      except KeyboardInterrupt:
+        pass
+      out, err = _get_files_output(out_file, err_file)
+      code = GetExitCodeProcess(hProcess)
+    finally:
+      for priv in privileges:
+        old_state = old_states.pop(0)
+        AdjustPrivilege(proc_token, priv, old_state)
+  else:
+    # getting updated environment for current process and merge it with custom env
+    cur_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY)
+    current_env = CreateEnvironmentBlock(cur_token, False)
+    current_env = _merge_env(current_env, env)
+    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+                            cwd=cwd, env=current_env, shell=False)
+    out, err = proc.communicate()
+    code = proc.returncode
+
+  if logoutput and out:
+    Logger.info(out)
+  if logoutput and err:
+    Logger.info(err)
+  return code, out, err
+
+
+# see msdn Icacls doc for rights
+def _set_file_acl(file, user, rights):
+  acls_modify_cmd = "icacls {0} /grant {1}:{2}".format(file, user, rights)
+  acls_remove_cmd = "icacls {0} /remove {1}".format(file, user)
+  code, out, err = _call_command(acls_remove_cmd)
+  if code != 0:
+    raise Fail("Can not remove rights for path {0} and user {1}".format(file, user))
+  code, out, err = _call_command(acls_modify_cmd)
+  if code != 0:
+    raise Fail("Can not set rights {0} for path {1} and user {2}".format(file, user))
+  else:
+    return
+
+
+class FileProvider(Provider):
+  def action_create(self):
+    path = self.resource.path
+
+    if os.path.isdir(path):
+      raise Fail("Applying %s failed, directory with name %s exists" % (self.resource, path))
+
+    dirname = os.path.dirname(path)
+    if not os.path.isdir(dirname):
+      raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
+
+    write = False
+    content = self._get_content()
+    if not os.path.exists(path):
+      write = True
+      reason = "it doesn't exist"
+    elif self.resource.replace:
+      if content is not None:
+        with open(path, "rb") as fp:
+          old_content = fp.read()
+        if content != old_content:
+          write = True
+          reason = "contents don't match"
+          if self.resource.backup:
+            self.resource.env.backup_file(path)
+
+    if write:
+      Logger.info("Writing %s because %s" % (self.resource, reason))
+      with open(path, "wb") as fp:
+        if content:
+          fp.write(content)
+
+    if self.resource.owner and self.resource.mode:
+      _set_file_acl(self.resource.path, self.resource.owner, self.resource.mode)
+
+  def action_delete(self):
+    path = self.resource.path
+
+    if os.path.isdir(path):
+      raise Fail("Applying %s failed, %s is directory not file!" % (self.resource, path))
+
+    if os.path.exists(path):
+      Logger.info("Deleting %s" % self.resource)
+      os.unlink(path)
+
+  def _get_content(self):
+    content = self.resource.content
+    if content is None:
+      return None
+    elif isinstance(content, basestring):
+      return content
+    elif hasattr(content, "__call__"):
+      return content()
+    raise Fail("Unknown source type for %s: %r" % (self, content))
+
+
+class ExecuteProvider(Provider):
+  def action_run(self):
+    if self.resource.creates:
+      if os.path.exists(self.resource.creates):
+        return
+
+    Logger.debug("Executing %s" % self.resource)
+
+    if self.resource.path != []:
+      if not self.resource.environment:
+        self.resource.environment = {}
+
+      self.resource.environment['PATH'] = os.pathsep.join(self.resource.path)
+
+    for i in range(0, self.resource.tries):
+      try:
+        code, _, _ = _call_command(self.resource.command, logoutput=self.resource.logoutput,
+                                   cwd=self.resource.cwd, env=self.resource.environment,
+                                   wait_for_finish=self.resource.wait_for_finish,
+                                   timeout=self.resource.timeout, user=self.resource.user)
+        if code != 0 and not self.resource.ignore_failures:
+          raise Fail("Failed to execute " + self.resource.command)
+        break
+      except Fail as ex:
+        if i == self.resource.tries - 1:  # last try
+          raise ex
+        else:
+          Logger.info("Retrying after %d seconds. Reason: %s" % (self.resource.try_sleep, str(ex)))
+          time.sleep(self.resource.try_sleep)
+      except ExecuteTimeoutException:
+        err_msg = ("Execution of '%s' was killed due timeout after %d seconds") % (
+          self.resource.command, self.resource.timeout)
+
+        if self.resource.on_timeout:
+          Logger.info("Executing '%s'. Reason: %s" % (self.resource.on_timeout, err_msg))
+          _call_command(self.resource.on_timeout)
+        else:
+          raise Fail(err_msg)
+
+
+class DirectoryProvider(Provider):
+  def action_create(self):
+    path = DirectoryProvider._trim_uri(self.resource.path)
+    if not os.path.exists(path):
+      Logger.info("Creating directory %s" % self.resource)
+      if self.resource.recursive:
+        os.makedirs(path)
+      else:
+        dirname = os.path.dirname(path)
+        if not os.path.isdir(dirname):
+          raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
+
+        os.mkdir(path)
+
+    if not os.path.isdir(path):
+      raise Fail("Applying %s failed, file %s already exists" % (self.resource, path))
+
+    if self.resource.owner and self.resource.mode:
+      _set_file_acl(path, self.resource.owner, self.resource.mode)
+
+  def action_delete(self):
+    path = self.resource.path
+    if os.path.exists(path):
+      if not os.path.isdir(path):
+        raise Fail("Applying %s failed, %s is not a directory" % (self.resource, path))
+
+      Logger.info("Removing directory %s and all its content" % self.resource)
+      shutil.rmtree(path)
+
+  @staticmethod
+  def _trim_uri(file_uri):
+    if file_uri.startswith("file:///"):
+      return file_uri[8:]
+    return file_uri
+    # class res: pass
+    # resource = res()
+    # resource.creates = None
+    # resource.path =[]
+    # resource.tries = 1
+    # resource.logoutput = True
+    # resource.cwd = None
+    # resource.environment = None
+    # resource.wait_for_finish = True
+    # resource.timeout = None
+    # resource.command = "cmd /C echo 1 & echo 2"
+    # provider = ExecuteProvider(resource)
+    # provider.action_run()
+    # pass
+    # _set_file_acl("C:\\lol.txt", "Administrator","f")
+    # pass
+    # pass

+ 9 - 0
ambari-common/src/main/python/resource_management/libraries/functions/__init__.py

@@ -20,6 +20,8 @@ Ambari Agent
 
 """
 
+import platform
+
 from resource_management.libraries.functions.default import *
 from resource_management.libraries.functions.format import *
 from resource_management.libraries.functions.get_kinit_path import *
@@ -31,3 +33,10 @@ from resource_management.libraries.functions.get_port_from_url import *
 from resource_management.libraries.functions.hive_check import *
 from resource_management.libraries.functions.version import *
 from resource_management.libraries.functions.format_jvm_option import *
+
+IS_WINDOWS = platform.system() == "Windows"
+
+if IS_WINDOWS:
+  from resource_management.libraries.functions.windows_service_utils import *
+  from resource_management.libraries.functions.install_hdp_msi import *
+  from resource_management.libraries.functions.reload_windows_env import *

+ 6 - 2
ambari-common/src/main/python/resource_management/libraries/functions/default.py

@@ -20,7 +20,7 @@ Ambari Agent
 
 """
 
-__all__ = ["default"]
+__all__ = ['default', 'default_string']
 from resource_management.libraries.script import Script
 from resource_management.libraries.script.config_dictionary import UnknownConfiguration
 from resource_management.core.logger import Logger
@@ -37,4 +37,8 @@ def default(name, default_value):
         Logger.debug("Cannot find configuration: '%s'. Using '%s' value as default" % (name, default_value))
       return default_value
 
-  return curr_dict
+  return curr_dict
+
+def default_string(name, default_value, delimiter):
+  default_list = default(name, default_value)
+  return delimiter.join(default_list)

+ 11 - 4
ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py

@@ -23,12 +23,19 @@ Ambari Agent
 __all__ = ["get_unique_id_and_date"]
 import datetime
 from resource_management.core import shell
-
+from ambari_commons import os_check
 def get_unique_id_and_date():
+  if os_check.OSCheck.is_windows_os():
+    from ambari_commons.os_windows import run_os_command
+    code, out, err = run_os_command("cmd /c vol C:")
+    for line in out.splitlines():
+      if line.startswith(" Volume Serial Number is"):
+        id = line[25:]
+  else:
     out = shell.checked_call("hostid")[1].split('\n')[-1] # bugfix: take the lastline (stdin is not tty part cut)
     id = out.strip()
 
-    now = datetime.datetime.now()
-    date = now.strftime("%M%d%y")
+  now = datetime.datetime.now()
+  date = now.strftime("%M%d%y")
 
-    return "id{id}_date{date}".format(id=id, date=date)
+  return "id{id}_date{date}".format(id=id, date=date)

+ 182 - 0
ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py

@@ -0,0 +1,182 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import os_utils
+from ambari_commons.inet_utils import download_file
+from ambari_commons.os_windows import SystemWideLock
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
+import socket
+import os
+import glob
+
+
+__all__ = ['install_windows_msi']
+
+msi_save_dir = None
+hdp_log_dir = "c:\\hadoop\\logs"
+hdp_data_dir = "c:\\hadoopDefaultData"
+local_host = socket.getfqdn()
+db_flavor = "DERBY"
+cluster_properties = """#Log directory
+HDP_LOG_DIR={hdp_log_dir}
+
+#Data directory
+HDP_DATA_DIR={hdp_data_dir}
+
+#hosts
+NAMENODE_HOST={local_host}
+SECONDARY_NAMENODE_HOST={local_host}
+RESOURCEMANAGER_HOST={local_host}
+HIVE_SERVER_HOST={local_host}
+OOZIE_SERVER_HOST={local_host}
+WEBHCAT_HOST={local_host}
+SLAVE_HOSTS={local_host}
+ZOOKEEPER_HOSTS={local_host}
+CLIENT_HOSTS={local_host}
+HBASE_MASTER={local_host}
+HBASE_REGIONSERVERS={local_host}
+FLUME_HOSTS={local_host}
+FALCON_HOST={local_host}
+KNOX_HOST={local_host}
+STORM_NIMBUS={local_host}
+STORM_SUPERVISORS={local_host}
+
+#Database host
+DB_FLAVOR={db_flavor}
+DB_HOSTNAME={local_host}
+DB_PORT=1527
+
+#Hive properties
+HIVE_DB_NAME=hive
+HIVE_DB_USERNAME=hive
+HIVE_DB_PASSWORD=hive
+
+#Oozie properties
+OOZIE_DB_NAME=oozie
+OOZIE_DB_USERNAME=oozie
+OOZIE_DB_PASSWORD=oozie
+"""
+
+INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i  {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
+                  'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
+                  'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes'
+CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
+CREATE_SERVICE_CMD = 'cmd /C powershell -File "{script}" -username hadoop -password "{password}" -servicename ' \
+                     '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
+INSTALL_MARKER_OK = "msi.installed"
+INSTALL_MARKER_FAILED = "msi.failed"
+_working_dir = None
+
+
+def _ensure_services_created(hadoop_password):
+  resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
+  service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
+  if not check_windows_service_exists("journalnode"):
+    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, password=hadoop_password, servicename="journalnode",
+                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+  if not check_windows_service_exists("zkfc"):
+    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, password=hadoop_password, servicename="zkfc",
+                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+
+
+# creating symlinks to services folders to avoid using stack-dependent paths
+def _create_symlinks():
+  # folders
+  Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
+  Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
+  # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
+  links_pairs = [
+    ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
+     "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
+    ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
+     "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
+    ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
+  ]
+  for link_pair in links_pairs:
+    link, target = link_pair
+    target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
+    Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
+
+
+# check if services exists and marker file present
+def _is_msi_installed():
+  return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
+
+
+# check if msi was installed correctly and raise Fail in case of broken install
+def _validate_msi_install():
+  if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
+    Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
+  return _is_msi_installed()
+
+
+def _write_marker():
+  if check_windows_service_exists("namenode"):
+    open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
+  else:
+    open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
+
+
+def install_windows_msi(msi_url, save_dir, save_file, hadoop_password):
+  global _working_dir
+  _working_dir = save_dir
+  save_dir = os.path.abspath(save_dir)
+  msi_save_dir = save_dir
+  # system wide lock to prevent simultaneous installations(when first task failed on timeout)
+  install_lock = SystemWideLock("hdp_msi_lock")
+  try:
+    # try to acquire lock
+    if not install_lock.lock():
+      Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
+      if not install_lock.lock(600000):
+        raise Fail("Timeout on acquiring lock")
+    if _validate_msi_install():
+      Logger.info("hdp.msi already installed")
+      return
+
+    # install msi
+    download_file(msi_url, os.path.join(msi_save_dir, save_file))
+    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
+                                                                                         hdp_data_dir=hdp_data_dir,
+                                                                                         local_host=local_host,
+                                                                                         db_flavor=db_flavor))
+    hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, "hdp.msi"))
+    hdp_log_path = os_utils.quote_path(os.path.join(save_dir, "hdp.log"))
+    hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
+    hadoop_password_arg = os_utils.quote_path(hadoop_password)
+
+    Execute(
+      INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
+                             hadoop_password_arg=hadoop_password_arg))
+    reload_windows_env()
+    # create additional services manually due to hdp.msi limitaitons
+    _ensure_services_created(hadoop_password)
+    _create_symlinks()
+    # finalizing install
+    _write_marker()
+    _validate_msi_install()
+  finally:
+    install_lock.unlock()

+ 48 - 0
ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py

@@ -0,0 +1,48 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from _winreg import (OpenKey, EnumValue, HKEY_LOCAL_MACHINE, KEY_READ, CloseKey)
+import os
+
+default_whitelist = ["FALCON_CONF_DIR", "FALCON_DATA_DIR", "FALCON_HOME", "FALCON_LOG_DIR", "FLUME_HOME",
+                     "HADOOP_COMMON_HOME", "HADOOP_CONF_DIR", "HADOOP_HDFS_HOME", "HADOOP_HOME", "HADOOP_LOG_DIR",
+                     "HADOOP_MAPRED_HOME", "HADOOP_NODE", "HADOOP_NODE_INSTALL_ROOT", "HADOOP_PACKAGES",
+                     "HADOOP_SETUP_TOOLS", "HADOOP_YARN_HOME", "HBASE_CONF_DIR", "HBASE_HOME", "HCAT_HOME",
+                     "HDFS_AUDIT_LOGGER", "HDFS_DATA_DIR", "HIVE_CONF_DIR", "HIVE_HOME", "HIVE_LIB_DIR", "HIVE_LOG_DIR",
+                     "HIVE_OPTS", "KNOX_CONF_DIR", "KNOX_HOME", "KNOX_LOG_DIR", "MAHOUT_HOME", "OOZIE_DATA",
+                     "OOZIE_HOME", "OOZIE_LOG", "OOZIE_ROOT", "PIG_HOME", "SQOOP_HOME", "STORM_CONF_DIR", "STORM_HOME",
+                     "STORM_LOG_DIR", "WEBHCAT_CONF_DIR", "YARN_LOG_DIR", "ZOOKEEPER_CONF_DIR", "ZOOKEEPER_HOME",
+                     "ZOOKEEPER_LIB_DIR", "ZOO_LOG_DIR"]
+def reload_windows_env(keys_white_list=default_whitelist):
+  root = HKEY_LOCAL_MACHINE
+  subkey = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
+  key = OpenKey(root, subkey, 0, KEY_READ)
+  finish = False
+  index = 0
+  while not finish:
+    try:
+      _key, _value, _ = EnumValue(key, index)
+      if (_key in keys_white_list):
+        os.environ[_key] = _value
+    except WindowsError:
+      finish = True
+    index += 1
+  CloseKey(key)

+ 30 - 0
ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py

@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import tarfile
+from contextlib import closing
+
+def archive_dir(output_filename, input_dir):
+  with closing(tarfile.open(output_filename, "w:gz")) as tar:
+    try:
+      tar.add(input_dir, arcname=os.path.basename("."))
+    finally:
+      tar.close()

+ 42 - 0
ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py

@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+__all__ = ['check_windows_service_status', 'check_windows_service_exists']
+
+import win32service
+
+_schSCManager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
+
+def check_windows_service_status(service_name):
+  _service_handle = win32service.OpenService(_schSCManager, service_name, win32service.SERVICE_ALL_ACCESS)
+  if win32service.QueryServiceStatusEx(_service_handle)["CurrentState"] == win32service.SERVICE_STOPPED:
+      raise ComponentIsNotRunning()
+
+def check_windows_service_exists(service_name):
+  typeFilter = win32service.SERVICE_WIN32
+  stateFilter = win32service.SERVICE_STATE_ALL
+  statuses = win32service.EnumServicesStatus(_schSCManager, typeFilter, stateFilter)
+  for (short_name, desc, status) in statuses:
+    if short_name == service_name:
+      return True
+  return False

+ 40 - 0
ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import zipfile
+
+def _zip_dir(zip, root):
+  for dirname, dirnames, filenames in os.walk(root):
+    for filename in filenames:
+      if len(dirname) > len(root):
+        rel_path = os.path.relpath(dirname, root)
+        arch_name = rel_path + os.sep + filename
+      else:
+        arch_name = filename
+      zip.write(os.path.join(dirname, filename), arch_name)
+
+
+def archive_dir(output_filename, input_dir):
+  zipf = zipfile.ZipFile(output_filename, 'w')
+  try:
+    _zip_dir(zipf, input_dir)
+  finally:
+    zipf.close()

+ 3 - 0
ambari-common/src/main/python/resource_management/libraries/providers/__init__.py

@@ -29,6 +29,9 @@ PROVIDERS = dict(
   ),
   ubuntu=dict(
     Repository="resource_management.libraries.providers.repository.UbuntuRepositoryProvider",
+  ),
+  winsrv=dict(
+
   ),
   default=dict(
     ExecuteHadoop="resource_management.libraries.providers.execute_hadoop.ExecuteHadoopProvider",

+ 7 - 6
ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py

@@ -21,13 +21,14 @@ Ambari Agent
 """
 
 import time
+import os
 from resource_management import *
 
 class XmlConfigProvider(Provider):
   def action_create(self):
     filename = self.resource.filename
     xml_config_provider_config_dir = self.resource.conf_dir
-    
+
     # |e - for html-like escaping of <,>,',"
     config_content = InlineTemplate('''<!--{{time.asctime(time.localtime())}}-->
     <configuration>
@@ -48,12 +49,12 @@ class XmlConfigProvider(Provider):
     {% endfor %}
   </configuration>''', extra_imports=[time], configurations_dict=self.resource.configurations,
                                     configuration_attrs=self.resource.configuration_attributes)
-   
-  
-    Logger.info(format("Generating config: {xml_config_provider_config_dir}/{filename}"))
-    
+
+    xml_config_dest_file_path = os.path.join(xml_config_provider_config_dir, filename)
+    Logger.info("Generating config: {0}".format(xml_config_dest_file_path))
+
     with Environment.get_instance_copy() as env:
-      File (format("{xml_config_provider_config_dir}/{filename}"),
+      File (xml_config_dest_file_path,
         content = config_content,
         owner = self.resource.owner,
         group = self.resource.group,

+ 115 - 68
ambari-common/src/main/python/resource_management/libraries/script/script.py

@@ -17,7 +17,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
-import tarfile
 import tempfile
 
 __all__ = ["Script"]
@@ -26,19 +25,24 @@ import os
 import sys
 import json
 import logging
-from contextlib import closing
-
+import platform
 
 from resource_management.libraries.resources import XmlConfig
 from resource_management.libraries.resources import PropertiesFile
 from resource_management.core.resources import File, Directory
 from resource_management.core.source import InlineTemplate
-
 from resource_management.core.environment import Environment
 from resource_management.core.exceptions import Fail, ClientComponentHasNoStatus, ComponentIsNotRunning
 from resource_management.core.resources.packaging import Package
-from resource_management.libraries.script.config_dictionary import ConfigDictionary
+from resource_management.libraries.script.config_dictionary import ConfigDictionary, UnknownConfiguration
 
+IS_WINDOWS = platform.system() == "Windows"
+if IS_WINDOWS:
+  from resource_management.libraries.functions.install_hdp_msi import install_windows_msi
+  from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+  from resource_management.libraries.functions.zip_archive import archive_dir
+else:
+  from resource_management.libraries.functions.tar_archive import archive_dir
 
 USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEVEL> <TMP_DIR>
 
@@ -50,6 +54,19 @@ USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEV
 <TMP_DIR> temporary directory for executable scripts. Ex: /var/lib/ambari-agent/data/tmp
 """
 
+_PASSWORD_MAP = {"/configurations/cluster-env/hadoop.user.name":"/configurations/cluster-env/hadoop.user.password"}
+
+def get_path_form_configuration(name, configuration):
+  subdicts = filter(None, name.split('/'))
+
+  for x in subdicts:
+    if x in configuration:
+      configuration = configuration[x]
+    else:
+      return None
+
+  return configuration
+
 class Script(object):
   """
   Executes a command for custom service. stdout and stderr are written to
@@ -91,13 +108,13 @@ class Script(object):
     cherr.setFormatter(formatter)
     logger.addHandler(cherr)
     logger.addHandler(chout)
-    
+
     # parse arguments
-    if len(sys.argv) < 7: 
+    if len(sys.argv) < 7:
      logger.error("Script expects at least 6 arguments")
      print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
      sys.exit(1)
-    
+
     command_name = str.lower(sys.argv[1])
     command_data_file = sys.argv[2]
     basedir = sys.argv[3]
@@ -108,11 +125,23 @@ class Script(object):
     logging_level_str = logging._levelNames[logging_level]
     chout.setLevel(logging_level_str)
     logger.setLevel(logging_level_str)
-      
+
+    # on windows we need to reload some of env variables manually because there is no default paths for configs(like
+    # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
+    # in agent, so other Script executions will not be able to access to new env variables
+    if platform.system() == "Windows":
+      reload_windows_env()
+
     try:
       with open(command_data_file, "r") as f:
         pass
         Script.config = ConfigDictionary(json.load(f))
+        #load passwords here(used on windows to impersonate different users)
+        Script.passwords = {}
+        for k, v in _PASSWORD_MAP.iteritems():
+          if get_path_form_configuration(k,Script.config) and get_path_form_configuration(v,Script.config ):
+            Script.passwords[get_path_form_configuration(k,Script.config)] = get_path_form_configuration(v,Script.config)
+
     except IOError:
       logger.exception("Can not read json file with command parameters: ")
       sys.exit(1)
@@ -150,6 +179,9 @@ class Script(object):
     """
     return Script.config
 
+  @staticmethod
+  def get_password(user):
+    return Script.passwords[user]
 
   @staticmethod
   def get_tmp_dir():
@@ -170,28 +202,39 @@ class Script(object):
     self.install_packages(env)
 
 
-  def install_packages(self, env, exclude_packages=[]):
-    """
-    List of packages that are required< by service is received from the server
-    as a command parameter. The method installs all packages
-    from this list
-    """
-    config = self.get_config()
-    
-    try:
-      package_list_str = config['hostLevelParams']['package_list']
-      if isinstance(package_list_str,basestring) and len(package_list_str) > 0:
-        package_list = json.loads(package_list_str)
-        for package in package_list:
-          if not package['name'] in exclude_packages:
-            name = package['name']
-            Package(name)
-    except KeyError:
-      pass # No reason to worry
-    
-    #RepoInstaller.remove_repos(config)
-
-
+  if not IS_WINDOWS:
+    def install_packages(self, env, exclude_packages=[]):
+      """
+      List of packages that are required< by service is received from the server
+      as a command parameter. The method installs all packages
+      from this list
+      """
+      config = self.get_config()
+      try:
+        package_list_str = config['hostLevelParams']['package_list']
+        if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
+          package_list = json.loads(package_list_str)
+          for package in package_list:
+            if not package['name'] in exclude_packages:
+              name = package['name']
+              Package(name)
+      except KeyError:
+        pass  # No reason to worry
+
+        # RepoInstaller.remove_repos(config)
+      pass
+  else:
+    def install_packages(self, env, exclude_packages=[]):
+      """
+      List of packages that are required< by service is received from the server
+      as a command parameter. The method installs all packages
+      from this list
+      """
+      config = self.get_config()
+
+      install_windows_msi(os.path.join(config['hostLevelParams']['jdk_location'], "hdp.msi"),
+                          config["hostLevelParams"]["agentCacheDir"], "hdp.msi", self.get_password("hadoop"))
+      pass
 
   def fail_with_error(self, message):
     """
@@ -239,56 +282,60 @@ class Script(object):
     self.fail_with_error('configure method isn\'t implemented')
 
   def generate_configs_get_template_file_content(self, filename, dicts):
-    import params
+    config = self.get_config()
     content = ''
     for dict in dicts.split(','):
-      if dict.strip() in params.config['configurations']:
-        content += params.config['configurations'][dict.strip()]['content']
+      if dict.strip() in config['configurations']:
+        try:
+          content += config['configurations'][dict.strip()]['content']
+        except Fail:
+          # 'content' section not available in the component client configuration
+          pass
 
     return content
 
   def generate_configs_get_xml_file_content(self, filename, dict):
-    import params
-    return {'configurations':params.config['configurations'][dict],
-            'configuration_attributes':params.config['configuration_attributes'][dict]}
+    config = self.get_config()
+    return {'configurations':config['configurations'][dict],
+            'configuration_attributes':config['configuration_attributes'][dict]}
     
   def generate_configs_get_xml_file_dict(self, filename, dict):
-    import params
-    return params.config['configurations'][dict]
+    config = self.get_config()
+    return config['configurations'][dict]
 
   def generate_configs(self, env):
     """
     Generates config files and stores them as an archive in tmp_dir
     based on xml_configs_list and env_configs_list from commandParams
     """
-    import params
-    env.set_params(params)
-    xml_configs_list = params.config['commandParams']['xml_configs_list']
-    env_configs_list = params.config['commandParams']['env_configs_list']
-    properties_configs_list = params.config['commandParams']['properties_configs_list']
-    
-    conf_tmp_dir = tempfile.mkdtemp()
-    output_filename = os.path.join(self.get_tmp_dir(),params.config['commandParams']['output_file'])
+    config = self.get_config()
+
+    xml_configs_list = config['commandParams']['xml_configs_list']
+    env_configs_list = config['commandParams']['env_configs_list']
+    properties_configs_list = config['commandParams']['properties_configs_list']
 
     Directory(self.get_tmp_dir(), recursive=True)
-    for file_dict in xml_configs_list:
-      for filename, dict in file_dict.iteritems():
-        XmlConfig(filename,
-                  conf_dir=conf_tmp_dir,
-                  **self.generate_configs_get_xml_file_content(filename, dict)
-        )
-    for file_dict in env_configs_list:
-      for filename,dicts in file_dict.iteritems():
-        File(os.path.join(conf_tmp_dir, filename),
-             content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
-        
-    for file_dict in properties_configs_list:
-      for filename, dict in file_dict.iteritems():
-        PropertiesFile(os.path.join(conf_tmp_dir, filename),
-          properties=self.generate_configs_get_xml_file_dict(filename, dict)
-        )
-      
-    with closing(tarfile.open(output_filename, "w:gz")) as tar:
-      tar.add(conf_tmp_dir, arcname=os.path.basename("."))
-      tar.close()
-    Directory(conf_tmp_dir, action="delete")
+
+    conf_tmp_dir = tempfile.mkdtemp(dir=self.get_tmp_dir())
+    output_filename = os.path.join(self.get_tmp_dir(), config['commandParams']['output_file'])
+
+    try:
+      for file_dict in xml_configs_list:
+        for filename, dict in file_dict.iteritems():
+          XmlConfig(filename,
+                    conf_dir=conf_tmp_dir,
+                    **self.generate_configs_get_xml_file_content(filename, dict)
+          )
+      for file_dict in env_configs_list:
+        for filename,dicts in file_dict.iteritems():
+          File(os.path.join(conf_tmp_dir, filename),
+               content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
+
+      for file_dict in properties_configs_list:
+        for filename, dict in file_dict.iteritems():
+          PropertiesFile(os.path.join(conf_tmp_dir, filename),
+            properties=self.generate_configs_get_xml_file_dict(filename, dict)
+          )
+      archive_dir(output_filename, conf_tmp_dir)
+    finally:
+      Directory(conf_tmp_dir, action="delete")

+ 1 - 0
ambari-server/conf/unix/ambari.properties

@@ -35,6 +35,7 @@ bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setu
 recommendations.dir=/var/run/ambari-server/stack-recommendations
 stackadvisor.script=/var/lib/ambari-server/resources/scripts/stack_advisor.py
 server.tmp.dir=/var/lib/ambari-server/tmp
+ambari.python.wrap=ambari-python-wrap
 
 api.authenticate=true
 server.connection.max.idle.millis=900000

+ 19 - 0
ambari-server/conf/windows/ambari-env.cmd

@@ -0,0 +1,19 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+
+set AMBARI_PASSHPHRASE=DEV
+set AMBARI_JVM_ARGS=%AMBARI_JVM_ARGS% -Xms512m -Xmx2048m -Djava.security.auth.login.config=conf\krb5JAASLogin.conf -Djava.security.krb5.conf=conf\krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false

+ 82 - 0
ambari-server/conf/windows/ambari.properties

@@ -0,0 +1,82 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+security.server.keys_dir=keystore
+resources.dir=resources
+shared.resources.dir = sbin\\ambari_commons\\resources
+custom.action.definitions=resources\\custom_action_definitions
+
+#Comma-separated list of JDK versions
+#java.releases=jdk1.8.20,jdk1.6.31
+java.releases=jdk1.7.67
+jdk1.7.67.desc=Oracle JDK 1.7.67
+jdk1.7.67.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-windows-x64.exe
+jdk1.7.67.dest-file=jdk-7u67-windows-x64.exe
+jdk1.7.67.jcpol-url=http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip
+jdk1.7.67.jcpol-file=UnlimitedJCEPolicyJDK7.zip
+jdk1.7.67.home=C:\\jdk1.7.0_67
+
+metadata.path=resources\\stacks
+server.version.file=version
+webapp.dir=web
+bootstrap.dir=bootstrap
+bootstrap.script=bootstrap\\bootstrap.py
+bootstrap.setup_agent.script=bootstrap\\setupAgent.py
+api.authenticate=true
+server.connection.max.idle.millis=900000
+server.fqdn.service.url=http://127.0.0.1/latest/meta-data/public-hostname
+server.stages.parallel=true
+
+# Scheduler settings
+server.execution.scheduler.isClustered=false
+server.execution.scheduler.maxThreads=5
+server.execution.scheduler.maxDbConnections=5
+server.execution.scheduler.misfire.toleration.minutes=480
+
+recommendations.dir=\\var\\run\\ambari-server\\stack-recommendations
+stackadvisor.script=resources\\scripts\\stack_advisor.py
+server.tmp.dir=\\var\\run\\ambari-server\\tmp
+views.dir=resources\\views
+ambari.python.wrap=python.exe
+
+# Default timeout in seconds before task is killed
+agent.task.timeout=600
+
+# thread pool maximums
+client.threadpool.size.max=25
+agent.threadpool.size.max=25
+
+# linux open-file limit
+ulimit.open.files=10000
+
+#java.home=C:\j2se1.8.0_05
+
+#server.jdbc.rca.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+#server.jdbc.rca.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=ambari;integratedSecurity=true
+##server.jdbc.rca.user.name=ambari
+##server.jdbc.rca.user.passwd=etc\\ambari-server\\conf\\password.dat
+
+#server.jdbc.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+#server.jdbc.driver.path=C:\\Program Files\\Microsoft JDBC DRIVER 4.0 for SQL Server\\sqljdbc_4.0\\enu\\sqljdbc4.jar
+#server.jdbc.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=ambari;integratedSecurity=true
+#server.jdbc.schema=ambari
+##server.jdbc.user.passwd=etc\\ambari-server\\conf\\password.dat
+##server.jdbc.user.name=ambari
+#scom.sink.db.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+##scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=[databaseName];user=[user];password=[password]
+#scom.sink.db.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=HadoopMetrics;integratedSecurity=true

+ 29 - 0
ambari-server/conf/windows/ca.config

@@ -0,0 +1,29 @@
+[ ca ]
+default_ca             = CA_CLIENT
+[ CA_CLIENT ]
+dir		               = keystore\\db
+certs                  = $dir\\certs
+new_certs_dir          = $dir\\newcerts
+
+database               = $dir\\index.txt
+serial                 = $dir\\serial
+default_days           = 365
+
+default_crl_days       = 7
+default_md             = md5
+
+policy                 = policy_anything
+
+[ policy_anything ]
+countryName            = optional
+stateOrProvinceName    = optional
+localityName           = optional
+organizationName       = optional
+organizationalUnitName = optional
+commonName             = optional
+emailAddress           = optional
+
+[ jdk7_ca ]
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always,issuer:always
+basicConstraints = CA:true

+ 61 - 0
ambari-server/conf/windows/install-helper.cmd

@@ -0,0 +1,61 @@
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information rega4rding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+rem ##################################################################
+rem #                      SERVER INSTALL HELPER                     #
+rem ##################################################################
+
+set COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
+set INSTALL_HELPER_AGENT="/var/lib/ambari-agent/install-helper.sh"
+set COMMON_DIR_SERVER="/usr/lib/ambari-server/lib/common_functions"
+
+set PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
+set PYTHON_WRAPER_SOURCE="/var/lib/ambari-server/ambari-python-wrap"
+
+do_install(){
+  # setting common_functions shared resource
+  if [ ! -d "$COMMON_DIR" ]; then
+    ln -s "$COMMON_DIR_SERVER" "$COMMON_DIR"
+  fi
+  # setting python-wrapper script
+  if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
+    ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"
+  fi
+}
+
+do_remove(){
+  if [ -d "$COMMON_DIR" ]; then  # common dir exists
+    rm -f "$COMMON_DIR"
+  fi
+
+  if [ -f "$PYTHON_WRAPER_TARGET" ]; then
+    rm -f "$PYTHON_WRAPER_TARGET"
+  fi
+
+  # if server package exists, restore their settings
+  if [ -f "$INSTALL_HELPER_AGENT" ]; then  #  call agent shared files installer
+      $INSTALL_HELPER_AGENT install
+  fi
+}
+
+
+case "$1" in
+install)
+  do_install
+  ;;
+remove)
+  do_remove
+  ;;
+esac

+ 12 - 0
ambari-server/conf/windows/krb5JAASLogin.conf

@@ -0,0 +1,12 @@
+com.sun.security.jgss.initiate {
+    com.sun.security.auth.module.Krb5LoginModule required
+    renewTGT=true
+    doNotPrompt=true
+    useKeyTab=true
+    keyTab="etc\\security\\keytabs\\ambari.keytab"
+    principal="ambari@EXAMPLE.COM"
+    isInitiator=true
+    storeKey=true
+    useTicketCache=true
+    client=true;
+};

+ 68 - 0
ambari-server/conf/windows/log4j.properties

@@ -0,0 +1,68 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+ambari.root.logger=INFO,DRFA
+ambari.log.dir=\\var\\log\\ambari-server-1.3.0-SNAPSHOT\\
+ambari.log.file=ambari-server.log
+ambari.config-changes.file=ambari-config-changes.log
+
+
+# Define the root logger to the system property "ambari.root.logger".
+log4j.rootLogger=${ambari.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${ambari.log.dir}\${ambari.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Log config changes
+log4j.logger.configchange=INFO,configchange
+log4j.additivity.configchange=false
+log4j.appender.configchange=org.apache.log4j.FileAppender
+log4j.appender.configchange.File=${ambari.log.dir}\${ambari.config-changes.file}
+log4j.appender.configchange.layout=org.apache.log4j.PatternLayout
+log4j.appender.configchange.layout.ConversionPattern=%d{ISO8601} %5p - %m%n

+ 186 - 186
ambari-server/docs/api/v1/clusters-cluster.md

@@ -66,191 +66,191 @@ Get information for the cluster "cluster001".
     
     200 OK
     {
-    	"href" : "http://your.ambari.server/api/v1/clusters/cluster001",
-    	"Clusters" : {
-    		"cluster_id" : 9,
-    		"cluster_name" : "cluster001",
-    		"health_report" : {
-    			"Host/stale_config" : 1,
-    			"Host/maintenance_state" : 0,
-    			"Host/host_state/HEALTHY" : 3,
-    			"Host/host_state/UNHEALTHY" : 0,
-    			"Host/host_state/HEARTBEAT_LOST" : 0,
-    			"Host/host_state/INIT" : 0,
-    			"Host/host_status/HEALTHY" : 3,
-    			"Host/host_status/UNHEALTHY" : 0,
-    			"Host/host_status/UNKNOWN" : 0,
-    			"Host/host_status/ALERT" : 0
-    		},
-    		"provisioning_state" : "INIT",
-    		"total_hosts" : 3,
-    		"version" : "HDP-2.0",
-    		"desired_configs" : {
-    			"capacity-scheduler" : {
-    				"user" : "admin",
-    				"tag" : "version1408514705943"
-    			},
-    			"core-site" : {
-    				"user" : "admin",
-    				"tag" : "version1409806913314"
-    			},
-    			"global" : {
-    				"user" : "admin",
-    				"tag" : "version1409806913314"
-    			},
-    			"hdfs-log4j" : {
-    				"user" : "admin",
-    				"tag" : "version1"
-    			},
-    			"hdfs-site" : {
-    				"user" : "admin",
-    				"tag" : "version1407908591996"
-    			},
-    			"mapred-site" : {
-    				"user" : "admin",
-    				"tag" : "version1408514705943"
-    			},
-    			"mapreduce2-log4j" : {
-    				"user" : "admin",
-    				"tag" : "version1408514705943"
-    			},
-    			"yarn-log4j" : {
-    				"user" : "admin",
-    				"tag" : "version1408514705943"
-    			},
-    			"yarn-site" : {
-    				"user" : "admin",
-    				"tag" : "version1408514705943"
-    			},
-    			"zoo.cfg" : {
-    				"user" : "admin",
-    				"tag" : "version1"
-    			},
-    			"zookeeper-log4j" : {
-    				"user" : "admin",
-    				"tag" : "version1"
-    			}
-    		}
-    	},
-    	"alerts" : {
-    		"summary" : {
-    			"CRITICAL" : 1,
-    			"OK" : 2,
-    			"PASSIVE" : 0,
-    			"WARNING" : 0
-    		}
-    	},
-    	"requests" : [
-    		{
-    			"href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/304",
-    			"Requests" : {
-    			"cluster_name" : "cluster001",
-    			"id" : 304
-    			}
-    		},
-    		{
-    			"href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/305",
-    			"Requests" : {
-    			"cluster_name" : "cluster001",
-    			"id" : 305
-    			}
-    		}
-    		],
-    	"services" : [
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/GANGLIA",
-    		"ServiceInfo" : {
-    		"cluster_name" : "cluster001",
-    		"service_name" : "GANGLIA"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/HDFS",
-    		"ServiceInfo" : {
-    		"cluster_name" : "cluster001",
-    		"service_name" : "HDFS"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/MAPREDUCE2",
-    		"ServiceInfo" : {
-    		"cluster_name" : "cluster001",
-    		"service_name" : "MAPREDUCE2"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/ZOOKEEPER",
-    		"ServiceInfo" : {
-    		"cluster_name" : "cluster001",
-    		"service_name" : "ZOOKEEPER"
-    		}
-    	}
+	"href" : "http://your.ambari.server/api/v1/clusters/cluster001",
+	"Clusters" : {
+		"cluster_id" : 9,
+		"cluster_name" : "cluster001",
+		"health_report" : {
+			"Host/stale_config" : 1,
+			"Host/maintenance_state" : 0,
+			"Host/host_state/HEALTHY" : 3,
+			"Host/host_state/UNHEALTHY" : 0,
+			"Host/host_state/HEARTBEAT_LOST" : 0,
+			"Host/host_state/INIT" : 0,
+			"Host/host_status/HEALTHY" : 3,
+			"Host/host_status/UNHEALTHY" : 0,
+			"Host/host_status/UNKNOWN" : 0,
+			"Host/host_status/ALERT" : 0
+		},
+		"provisioning_state" : "INIT",
+		"total_hosts" : 3,
+		"version" : "HDP-2.0",
+		"desired_configs" : {
+			"capacity-scheduler" : {
+				"user" : "admin",
+				"tag" : "version1408514705943"
+			},
+			"core-site" : {
+				"user" : "admin",
+				"tag" : "version1409806913314"
+			},
+			"global" : {
+				"user" : "admin",
+				"tag" : "version1409806913314"
+			},
+			"hdfs-log4j" : {
+				"user" : "admin",
+				"tag" : "version1"
+			},
+			"hdfs-site" : {
+				"user" : "admin",
+				"tag" : "version1407908591996"
+			},
+			"mapred-site" : {
+				"user" : "admin",
+				"tag" : "version1408514705943"
+			},
+			"mapreduce2-log4j" : {
+				"user" : "admin",
+				"tag" : "version1408514705943"
+			},
+			"yarn-log4j" : {
+				"user" : "admin",
+				"tag" : "version1408514705943"
+			},
+			"yarn-site" : {
+				"user" : "admin",
+				"tag" : "version1408514705943"
+			},
+			"zoo.cfg" : {
+				"user" : "admin",
+				"tag" : "version1"
+			},
+			"zookeeper-log4j" : {
+				"user" : "admin",
+				"tag" : "version1"
+			}
+		}
+	},
+	"alerts" : {
+		"summary" : {
+			"CRITICAL" : 1,
+			"OK" : 2,
+			"PASSIVE" : 0,
+			"WARNING" : 0
+		}
+	},
+	"requests" : [
+		{
+			"href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/304",
+			"Requests" : {
+			"cluster_name" : "cluster001",
+			"id" : 304
+			}
+		},
+		{
+			"href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/305",
+			"Requests" : {
+			"cluster_name" : "cluster001",
+			"id" : 305
+			}
+		}
+		],
+	"services" : [
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/GANGLIA",
+		"ServiceInfo" : {
+		"cluster_name" : "cluster001",
+		"service_name" : "GANGLIA"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/HDFS",
+		"ServiceInfo" : {
+		"cluster_name" : "cluster001",
+		"service_name" : "HDFS"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/MAPREDUCE2",
+		"ServiceInfo" : {
+		"cluster_name" : "cluster001",
+		"service_name" : "MAPREDUCE2"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/ZOOKEEPER",
+		"ServiceInfo" : {
+		"cluster_name" : "cluster001",
+		"service_name" : "ZOOKEEPER"
+		}
+	}
     	],
-    	"config_groups" : [
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/config_groups/2",
-    		"ConfigGroup" : {
-    		 "cluster_name" : "cluster001",
-    		  "id" : 2
-    		}
-    	}
-    	],
-    	"workflows" : [ ],
-    	"hosts" : [
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host1.domain.com",
-    		"Hosts" : {
-    		  "cluster_name" : "cluster001",
-    		  "host_name" : "host1.domain.com"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host2.domain.com",
-    		"Hosts" : {
-    		  "cluster_name" : "cluster001",
-    		  "host_name" : "host2.domain.com"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host3.domain.com",
-    		"Hosts" : {
-    		  "cluster_name" : "cluster001",
-    		  "host_name" : "host3.domain.com"
-    		}
-    	}
-    	],
-    	"configurations" : [
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=core-site&tag=version1",
-    		"tag" : "version1",
-    		"type" : "core-site",
-    		"Config" : {
-    		  "cluster_name" : "cluster001"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=global&tag=version1",
-    		"tag" : "version1",
-    		"type" : "global",
-    		"Config" : {
-    		  "cluster_name" : "cluster001"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=hdfs-site&tag=version1",
-    		"tag" : "version1",
-    		"type" : "hdfs-site",
-    		"Config" : {
-    		  "cluster_name" : "cluster001"
-    		}
-    	},
-    	{
-    		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=zoo.cfg&tag=version1",
-    		"tag" : "version1",
-    		"type" : "zoo.cfg",
-    		"Config" : {
-    		  "cluster_name" : "cluster001"
-    		}
-    	},
-    	]
+	"config_groups" : [
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/config_groups/2",
+		"ConfigGroup" : {
+		 "cluster_name" : "cluster001",
+		  "id" : 2
+		}
+	}
+	],
+	"workflows" : [ ],
+	"hosts" : [
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host1.domain.com",
+		"Hosts" : {
+		  "cluster_name" : "cluster001",
+		  "host_name" : "host1.domain.com"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host2.domain.com",
+		"Hosts" : {
+		  "cluster_name" : "cluster001",
+		  "host_name" : "host2.domain.com"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host3.domain.com",
+		"Hosts" : {
+		  "cluster_name" : "cluster001",
+		  "host_name" : "host3.domain.com"
+		}
+	}
+	],
+	"configurations" : [
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=core-site&tag=version1",
+		"tag" : "version1",
+		"type" : "core-site",
+		"Config" : {
+		  "cluster_name" : "cluster001"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=global&tag=version1",
+		"tag" : "version1",
+		"type" : "global",
+		"Config" : {
+		  "cluster_name" : "cluster001"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=hdfs-site&tag=version1",
+		"tag" : "version1",
+		"type" : "hdfs-site",
+		"Config" : {
+		  "cluster_name" : "cluster001"
+		}
+	},
+	{
+		"href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=zoo.cfg&tag=version1",
+		"tag" : "version1",
+		"type" : "zoo.cfg",
+		"Config" : {
+		  "cluster_name" : "cluster001"
+		}
+	},
+	]
     }

+ 188 - 6
ambari-server/pom.xml

@@ -18,7 +18,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-server</artifactId>
-  <packaging>jar</packaging>
+  <packaging>${packagingFormat}</packaging>
   <name>Ambari Server</name>
   <version>1.3.0-SNAPSHOT</version>
   <description>Ambari Server</description>
@@ -105,11 +105,36 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.0</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>1.7</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <configuration>
+              <tasks>
+                <jar destfile="target/DBConnectionVerification.jar">
+                  <fileset dir="${basedir}/target/classes/"
+                    includes="**/DBConnectionVerification.class" />
+                  <manifest>
+                    <attribute name="Main-Class"
+                      value="org.apache.ambari.server.DBConnectionVerification" />
+                  </manifest>
+                </jar>
+              </tasks>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
       <plugin>
         <artifactId>maven-assembly-plugin</artifactId>
         <configuration>
           <descriptors>
-            <descriptor>src/main/assemblies/server.xml</descriptor>
+            <descriptor>${assemblydescriptor}</descriptor>
           </descriptors>
           <tarLongFileMode>gnu</tarLongFileMode>
         </configuration>
@@ -139,15 +164,23 @@
             <exclude>src/main/resources/db/serial</exclude>
             <exclude>src/main/resources/db/index.txt</exclude>
             <exclude>src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2</exclude>
+            <exclude>src/main/windows/ambari-server.cmd</exclude>
+            <exclude>src/main/windows/ambari-server.ps1</exclude>
             <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>conf/unix/ca.config</exclude>
             <exclude>conf/unix/krb5JAASLogin.conf</exclude>
+            <exclude>conf/windows/ca.config</exclude>
+            <exclude>conf/windows/krb5JAASLogin.conf</exclude>
+            <exclude>**/*.iml</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.sql</exclude>
+            <exclude>**/*.wxs</exclude>
             <exclude>**/repo_suse_rhel.j2</exclude>
+            <exclude>**/repo_debian.j2</exclude>
+            <exclude>**/cluster.properties.j2</exclude>
             <exclude>**/repo_ubuntu.j2</exclude>
             <exclude>**/.pydev*</exclude>
 
@@ -156,6 +189,7 @@
 
             <!-- Stack definitions -->
             <exclude>src/main/resources/stacks/HDP/2.0._/services/HBASE/package/templates/regionservers.j2</exclude>
+            <exclude>src/main/resources/stacks/HDPWIN/2.1/services/*/configuration*/*</exclude>
 
             <!--test samples -->
             <exclude>src/test/resources/TestAmbaryServer.samples/**</exclude>
@@ -199,7 +233,6 @@
           </dependency>
         </dependencies>
       </plugin>
-
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
@@ -423,6 +456,15 @@
                 <source>
                   <location>src/main/resources/Ambari-DDL-MySQL-DROP.sql</location>
                 </source>
+                <source>
+                  <location>target/classes/Ambari-DDL-SQLServer-CREATE.sql</location>
+                </source>
+                <source>
+                  <location>target/classes/Ambari-DDL-SQLServer-CREATELOCAL.sql</location>
+                </source>
+                <source>
+                  <location>src/main/resources/Ambari-DDL-SQLServer-DROP.sql</location>
+                </source>
                 <source>
                   <location>${project.build.directory}/DBConnectionVerification.jar</location>
                 </source>
@@ -1024,6 +1066,13 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <skip>${skipSurefireTests}</skip>
+        </configuration>
+      </plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>exec-maven-plugin</artifactId>
@@ -1031,14 +1080,14 @@
         <executions>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>${executable.python}</executable>
               <workingDirectory>src/test/python</workingDirectory>
               <arguments>
                 <argument>unitTests.py</argument>
                 <argument>${custom.tests}</argument>
               </arguments>
               <environmentVariables>
-                  <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari-server-state:${project.basedir}/src/test/python:$PYTHONPATH</PYTHONPATH>
+                  <PYTHONPATH>${path.python.1}${pathsep}$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
               <skip>${skipTests}</skip>
             </configuration>
@@ -1184,6 +1233,139 @@
         </plugins>
       </build>
     </profile>
+    <profile>
+      <id>windows</id>
+      <activation>
+        <os>
+          <family>win</family>
+        </os>
+      </activation>
+      <properties>
+        <envClassifier>win</envClassifier>
+        <dirsep>\</dirsep>
+        <pathsep>;</pathsep>
+        <executable.python>python</executable.python>
+        <executable.shell>cmd</executable.shell>
+        <fileextension.shell>cmd</fileextension.shell>
+        <fileextension.dot.shell-default>.cmd</fileextension.dot.shell-default>
+        <path.python.1>${project.basedir}\..\ambari-common\src\main\python;${project.basedir}\..\ambari-agent\src\main\python;${project.basedir}\..\ambari-common\src\main\python\ambari_jinja2;${project.basedir}\..\ambari-common\src\main\python\ambari_commons;${project.basedir}\..\ambari-common\src\test\python;${project.basedir}\src\main\python;${project.basedir}\src\main\python\ambari-server-state;${project.basedir}\src\main\resources\custom_actions;${project.basedir}\src\main\resources\scripts;${project.basedir}\src\test\python</path.python.1>
+        <assemblydescriptor>src/main/assemblies/server-windows.xml</assemblydescriptor>
+        <packagingFormat>jar</packagingFormat>
+      </properties>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>1.2</version>
+            <executions>
+              <execution>
+                <id>run-heat</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>exec</goal>
+                </goals>
+                <configuration>
+                  <executable>heat.exe</executable>
+                  <arguments>
+                    <argument>dir</argument>
+                    <argument>"."</argument>
+                    <argument>-dr</argument>
+                    <argument>"AMBARI_SERVER_MSI"</argument>
+                    <argument>-platform</argument>
+                    <argument>Win64</argument>
+                    <argument>-cg</argument>
+                    <argument>"AmbariServerGroup"</argument>
+                    <argument>-gg</argument>
+                    <argument>-ke</argument>
+                    <argument>-srd</argument>
+                    <argument>-o</argument>
+                    <argument>".\..\..\ambari-server-files.wxs"</argument>
+                  </arguments>
+                  <workingDirectory>target/ambari-server-${project.version}-dist/ambari-server-${project.version}</workingDirectory>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.npanday.plugins</groupId>
+            <artifactId>wix-maven-plugin</artifactId>
+            <version>1.4.0-incubating</version>
+            <extensions>true</extensions>
+            <configuration>
+              <sourceFiles>
+                <sourceFile>target/ambari-server.wxs</sourceFile>
+                <sourceFile>target/ambari-server-files.wxs</sourceFile>
+              </sourceFiles>
+              <outputDirectory>target</outputDirectory>
+              <objectFiles>
+                <objectFile>target/ambari-server.wixobj</objectFile>
+                <objectFile>target/ambari-server-files.wixobj</objectFile>
+              </objectFiles>
+              <outputFile>target/ambari-server-${ambariVersion}.msi</outputFile>
+              <extensions>
+                <extension>WixUIExtension</extension>
+              </extensions>
+            </configuration>
+            <executions>
+              <execution>
+                <id>wix-candle</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>candle</goal>
+                </goals>
+                <configuration>
+                  <arguments>-arch x64</arguments>
+                </configuration>
+              </execution>
+              <execution>
+                <id>wix-light</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>light</goal>
+                </goals>
+                <configuration>
+                  <arguments>-b ${basedir}/target/ambari-server-${project.version}-dist/ambari-server-${project.version}</arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.npanday.plugins</groupId>
+          <artifactId>wix-maven-plugin</artifactId>
+          <version>1.4.0-incubating</version>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>${pom.groupId}</groupId>
+          <artifactId>metrics-sink</artifactId>
+          <version>1.0.0</version>
+        </dependency>
+      </dependencies>
+    </profile>
+    <profile>
+      <id>linux</id>
+      <activation>
+        <os>
+          <family>unix</family>
+        </os>
+      </activation>
+      <properties>
+        <envClassifier>linux</envClassifier>
+        <dirsep>/</dirsep>
+        <pathsep>:</pathsep>
+        <executable.python>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+        <executable.shell>sh</executable.shell>
+        <fileextension.shell>sh</fileextension.shell>
+        <fileextension.dot.shell-default></fileextension.dot.shell-default>
+        <path.python.1>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari-server-state:${project.basedir}/src/main/resources/custom_actions:${project.basedir}/src/main/resources/scripts:${project.basedir}/src/test/python</path.python.1>
+        <assemblydescriptor>src/main/assemblies/server.xml</assemblydescriptor>
+        <packagingFormat>jar</packagingFormat>
+      </properties>
+    </profile>
     <profile>
       <id>suse11</id>
       <properties>
@@ -1493,7 +1675,7 @@
       <version>1.5.2</version>
     </dependency>
   </dependencies>
-  
+
   <pluginRepositories>
     <pluginRepository>
       <id>oss.sonatype.org</id>

+ 183 - 0
ambari-server/src/main/assemblies/server-windows.xml

@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <files>
+    <file>
+      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
+      <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
+    </file>
+    <file>
+      <source>${project.build.directory}/DBConnectionVerification.jar</source>
+      <outputDirectory>ambari-server-${project.version}/resources</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/conf/windows/ambari.properties</source>
+      <outputDirectory>/ambari-server-${project.version}/conf</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/conf/windows/log4j.properties</source>
+      <outputDirectory>/ambari-server-${project.version}/conf</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/conf/windows/ca.config</source>
+      <outputDirectory>/ambari-server-${project.version}/keystore</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/python/ambari-server-windows.py</source>
+      <outputDirectory>/ambari-server-${project.version}/sbin</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/python/bootstrap.py</source>
+      <outputDirectory>/ambari-server-${project.version}/bootstrap</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/python/setupAgent.py</source>
+      <outputDirectory>/ambari-server-${project.version}/bootstrap</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/windows/ambari-server.cmd</source>
+      <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/windows/ambari-server.ps1</source>
+      <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+    </file>
+    <file>
+      <source>${project.build.directory}/version</source>
+      <outputDirectory>ambari-server-${project.version}/</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.sql</source>
+      <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql</source>
+      <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql</source>
+      <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/../contrib/ambari-scom/metrics-sink/target/metrics-sink-1.0.0.jar</source>
+      <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+    </file>
+    <file>
+      <source>${basedir}/src/main/package/msi/ambari-server.wxs</source>
+      <outputDirectory>../../</outputDirectory>
+      <filtered>true</filtered>
+    </file>
+  </files>
+  <fileSets>
+    <!-- Distro files, readme, licenses, etc -->
+    <fileSet>
+      <directory>${basedir}/../ambari-admin/target</directory>
+      <outputDirectory>/ambari-server-${project.version}/resources/views</outputDirectory>
+      <includes>
+        <include>*.jar</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/../</directory>
+      <outputDirectory>ambari-server-${project.version}/</outputDirectory>
+      <includes>
+        <include>*.txt</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/python/ambari_server</directory>
+      <outputDirectory>ambari-server-${project.version}/sbin/ambari_server</outputDirectory>
+      <includes>
+        <include>*.py</include>
+        <include>*.pyc</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/../ambari-common/src/main/python/ambari_commons</directory>
+      <outputDirectory>ambari-server-${project.version}/sbin/ambari_commons</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</directory>
+      <outputDirectory>ambari-server-${project.version}/sbin/ambari_jinja2</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/../ambari-common/src/main/python/resource_management</directory>
+      <outputDirectory>ambari-server-${project.version}/sbin/resource_management</outputDirectory>
+    </fileSet>
+     <!--
+    <fileSet>
+      <directory>${project.build.directory}/web/</directory>
+      <outputDirectory>ambari-server-${project.version}/web/</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+    </fileSet>
+    -->
+    <!--
+    <fileSet>
+      <directory>${basedir}/src/main/bin</directory>
+      <outputDirectory>ambari-server-${project.version}/bin</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    -->
+    <fileSet>
+      <directory>${basedir}/src/main/resources/</directory>
+      <outputDirectory>/ambari-server-${project.version}/keystore</outputDirectory>
+      <includes>
+        <include>db/*</include>
+        <include>pass.txt</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/../ambari-web/public</directory>
+      <outputDirectory>ambari-server-${project.version}/web</outputDirectory>
+      <includes>
+        <include>**</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/resources</directory>
+      <outputDirectory>/ambari-server-${project.version}/resources/</outputDirectory>
+      <includes>
+        <include>Ambari-DDL-SQLServer-*.sql</include>
+        <include>custom_action_definitions/**</include>
+        <include>custom_actions/**</include>
+        <include>stacks/stack_advisor.py</include>
+        <include>scripts/**</include>
+        <include>stacks/HDPWIN/**</include>
+        <include>upgrade/**</include>
+      </includes>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
+      <unpack>false</unpack>
+      <scope>compile</scope>
+    </dependencySet>
+  </dependencySets>
+</assembly>

+ 7 - 1
ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java

@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server;
 
+import org.apache.commons.lang.StringUtils;
+
 import java.sql.*;
 
 public class DBConnectionVerification {
@@ -30,7 +32,11 @@ public class DBConnectionVerification {
     Connection conn = null;
     try {
        Class.forName(driver);
-       conn = DriverManager.getConnection(url, username, password);
+       if(url.contains("integratedSecurity=true")) {
+         conn = DriverManager.getConnection(url);
+       } else {
+         conn = DriverManager.getConnection(url, username, password);
+       }
        System.out.println("Connected to DB Successfully!");
     } catch (Exception e) {
        System.out.println("ERROR: Unable to connect to the DB. Please check DB connection properties.");

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java

@@ -234,7 +234,7 @@ public class ComponentService extends BaseService {
     Response.ResponseBuilder rb = Response.status(Response.Status.OK);
     Configuration configs = new Configuration();
     String tmpDir = configs.getProperty(Configuration.SERVER_TMP_DIR_KEY);
-    File file = new File(tmpDir+File.separator+componentName+"-configs.tar.gz");
+    File file = new File(tmpDir + File.separator + componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION);
     InputStream resultInputStream = null;
     try {
       resultInputStream = new FileInputStream(file);
@@ -242,8 +242,8 @@ public class ComponentService extends BaseService {
       e.printStackTrace();
     }
 
-    String contentType = "application/x-ustar";
-    String outputFileName = componentName + "-configs.tar.gz";
+    String contentType = Configuration.DEF_ARCHIVE_CONTENT_TYPE;
+    String outputFileName = componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION;
     rb.header("Content-Disposition",  "attachment; filename=\"" + outputFileName + "\"");
     rb.entity(resultInputStream);
     return rb.type(contentType).build();

+ 9 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java

@@ -37,7 +37,7 @@ public class StackAdvisorRunner {
 
   /**
    * Runs stack_advisor.py script in the specified {@code actionDirectory}.
-   * 
+   *
    * @param script stack advisor script
    * @param saCommandType {@link StackAdvisorCommandType} to run.
    * @param actionDirectory directory for the action
@@ -110,7 +110,7 @@ public class StackAdvisorRunner {
    * Gets an instance of a {@link ProcessBuilder} that's ready to execute the
    * shell command to run the stack advisor script. This will take the
    * environment variables from the current process.
-   * 
+   *
    * @param script
    * @param saCommandType
    * @param actionDirectory
@@ -126,8 +126,13 @@ public class StackAdvisorRunner {
 
     // includes the original command plus the arguments for it
     List<String> builderParameters = new ArrayList<String>();
-    builderParameters.add("sh");
-    builderParameters.add("-c");
+    if (System.getProperty("os.name").contains("Windows")) {
+      builderParameters.add("cmd");
+      builderParameters.add("/c");
+    } else {
+      builderParameters.add("sh");
+      builderParameters.add("-c");
+    }
 
     // for the 3rd argument, build a single parameter since we use -c
     // ProcessBuilder doesn't support output redirection until JDK 1.7

+ 52 - 1
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -73,6 +73,8 @@ public class Configuration {
   public static final String RECOMMENDATIONS_DIR_DEFAULT = "/var/run/ambari-server/stack-recommendations";
   public static final String STACK_ADVISOR_SCRIPT = "stackadvisor.script";
   public static final String STACK_ADVISOR_SCRIPT_DEFAULT = "/var/lib/ambari-server/resources/scripts/stack_advisor.py";
+  public static final String AMBARI_PYTHON_WRAP_KEY = "ambari.python.wrap";
+  public static final String AMBARI_PYTHON_WRAP_DEFAULT = "ambari-python-wrap";
   public static final String API_AUTHENTICATE = "api.authenticate";
   public static final String API_USE_SSL = "api.ssl";
   public static final String API_CSRF_PREVENTION_KEY = "api.csrfPrevention.enabled";
@@ -167,6 +169,11 @@ public class Configuration {
   public static final String SERVER_JDBC_RCA_USER_PASSWD_KEY = "server.jdbc.rca.user.passwd";
   public static final String SERVER_JDBC_RCA_DRIVER_KEY = "server.jdbc.rca.driver";
   public static final String SERVER_JDBC_RCA_URL_KEY = "server.jdbc.rca.url";
+  public static final String SCOM_JDBC_SINK_USER_NAME_KEY = "scom.sink.db.username";
+  public static final String SCOM_JDBC_SINK_USER_PASSWD_KEY = "scom.sink.db.password";
+  public static final String SCOM_JDBC_SINK_DRIVER_KEY = "scom.sink.db.driver";
+  public static final String SCOM_JDBC_SINK_URL_KEY = "scom.sink.db.url";
+  public static final String SCOM_JDBC_SINK_INT_AUTH_KEY = "scom.sink.db.use.integrated.auth";
   public static final String SERVER_JDBC_GENERATE_TABLES_KEY = "server.jdbc.generateTables";
   public static final String JDBC_UNIT_NAME = "ambari-server";
   public static final String JDBC_LOCAL_URL = "jdbc:postgresql://localhost/";
@@ -236,6 +243,9 @@ public class Configuration {
   public static final String SERVER_TMP_DIR_DEFAULT = "/var/lib/ambari-server/tmp";
   public static final String EXTERNAL_SCRIPT_TIMEOUT_KEY = "server.script.timeout";
   public static final String EXTERNAL_SCRIPT_TIMEOUT_DEFAULT = "5000";
+  public static final String DEF_ARCHIVE_EXTENSION;
+  public static final String DEF_ARCHIVE_CONTENT_TYPE;
+
   /**
    * This key defines whether stages of parallel requests are executed in
    * parallel or sequentally. Only stages from different requests
@@ -257,6 +267,8 @@ public class Configuration {
   private static final String SERVER_JDBC_USER_PASSWD_DEFAULT = "bigdata";
   private static final String SERVER_JDBC_RCA_USER_NAME_DEFAULT = "mapred";
   private static final String SERVER_JDBC_RCA_USER_PASSWD_DEFAULT = "mapred";
+  private static final String SCOM_JDBC_SINK_USER_NAME_DEFAULT = "hadoop";
+  private static final String SCOM_JDBC_SINK_USER_PASSWD_DEFAULT = "hadoop";
   private static final String SRVR_TWO_WAY_SSL_DEFAULT = "false";
   private static final String SRVR_KSTR_DIR_DEFAULT = ".";
   private static final String API_CSRF_PREVENTION_DEFAULT = "true";
@@ -327,6 +339,17 @@ public class Configuration {
   private volatile boolean credentialProviderInitialized = false;
   private Map<String, String> customDbProperties = null;
 
+  static {
+    if (System.getProperty("os.name").contains("Windows")) {
+      DEF_ARCHIVE_EXTENSION = ".zip";
+      DEF_ARCHIVE_CONTENT_TYPE = "application/zip";
+    }
+    else {
+      DEF_ARCHIVE_EXTENSION = ".tar.gz";
+      DEF_ARCHIVE_CONTENT_TYPE = "application/x-ustar";
+    }
+  }
+
   public Configuration() {
     this(readConfigFile());
   }
@@ -341,6 +364,8 @@ public class Configuration {
     this.properties = properties;
 
     configsMap = new HashMap<String, String>();
+    configsMap.put(AMBARI_PYTHON_WRAP_KEY, properties.getProperty(
+        AMBARI_PYTHON_WRAP_KEY, AMBARI_PYTHON_WRAP_DEFAULT));
     configsMap.put(SRVR_TWO_WAY_SSL_KEY, properties.getProperty(
         SRVR_TWO_WAY_SSL_KEY, SRVR_TWO_WAY_SSL_DEFAULT));
     configsMap.put(SRVR_TWO_WAY_SSL_PORT_KEY, properties.getProperty(
@@ -765,6 +790,32 @@ public class Configuration {
     return readPasswordFromFile(passwdProp, SERVER_JDBC_RCA_USER_PASSWD_DEFAULT);
   }
 
+  public String getSinkDatabaseDriver() {
+    return properties.getProperty(SCOM_JDBC_SINK_DRIVER_KEY);
+  }
+
+  public String getSinkDatabaseUrl() {
+    return properties.getProperty(SCOM_JDBC_SINK_URL_KEY);
+  }
+
+  public boolean getSinkUseIntegratedAuth() {
+      return "true".equalsIgnoreCase(properties.getProperty(SCOM_JDBC_SINK_INT_AUTH_KEY));
+  }
+
+  public String getSinkDatabaseUser() {
+    return properties.getProperty(SCOM_JDBC_SINK_USER_NAME_KEY, SCOM_JDBC_SINK_USER_NAME_DEFAULT);
+  }
+
+  public String getSinkDatabasePassword() {
+    String passwdProp = properties.getProperty(SCOM_JDBC_SINK_USER_PASSWD_KEY);
+    if (passwdProp != null) {
+      String dbpasswd = readPasswordFromStore(passwdProp);
+      if (dbpasswd != null)
+        return dbpasswd;
+    }
+    return readPasswordFromFile(passwdProp, SCOM_JDBC_SINK_USER_PASSWD_DEFAULT);
+  }
+
   private String readPasswordFromFile(String filePath, String defaultPassword) {
     if (filePath == null) {
       LOG.debug("DB password file not specified - using default");
@@ -1090,7 +1141,7 @@ public class Configuration {
   public String getResourceDirPath() {
     return properties.getProperty(RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT);
   }
-    
+
   public String getSharedResourcesDirPath(){
       return properties.getProperty(SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT);
   }

+ 12 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java

@@ -303,6 +303,18 @@ public abstract class AbstractPropertyProvider extends BaseProvider implements P
     }
   }
 
+  protected PropertyInfo updatePropertyInfo(String propertyKey, String id, PropertyInfo propertyInfo) {
+    List<String> regexGroups = getRegexGroups(propertyKey, id);
+    String propertyId = propertyInfo.getPropertyId();
+    if(propertyId != null) {
+      for (String regexGroup : regexGroups) {
+        regexGroup = regexGroup.replace("/", ".");
+        propertyId = propertyId.replaceFirst(FIND_REGEX_IN_METRIC_REGEX, regexGroup);
+      }
+    }
+    return new PropertyInfo(propertyId, propertyInfo.isTemporal(), propertyInfo.isPointInTime());
+  }
+
   /**
    * Verify that the component metrics contains the property id.
    * @param componentName Name of the component

+ 227 - 122
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java

@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
+import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.HostRequest;
@@ -47,6 +48,9 @@ import org.apache.ambari.server.controller.jmx.JMXHostProvider;
 import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
 import org.apache.ambari.server.controller.metrics.MetricsHostProvider;
 import org.apache.ambari.server.controller.nagios.NagiosPropertyProvider;
+import org.apache.ambari.server.controller.sql.HostInfoProvider;
+import org.apache.ambari.server.controller.sql.SQLPropertyProvider;
+import org.apache.ambari.server.controller.sql.SinkConnectionFactory;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
 import org.apache.ambari.server.controller.spi.Predicate;
@@ -68,12 +72,18 @@ import org.apache.ambari.server.state.State;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.InetAddress;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.lang.System;
+
 import com.google.inject.Inject;
 
 /**
  * An abstract provider module implementation.
  */
-public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider, MetricsHostProvider {
+public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider, HostInfoProvider, MetricsHostProvider {
 
   private static final int PROPERTY_REQUEST_CONNECT_TIMEOUT = 5000;
   private static final int PROPERTY_REQUEST_READ_TIMEOUT    = 10000;
@@ -89,7 +99,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
   private static final Map<Service.Type, Map<String, String[]>> serviceDesiredProperties = new EnumMap<Service.Type, Map<String, String[]>>(Service.Type.class);
   private static final Map<String, Service.Type> componentServiceMap = new HashMap<String, Service.Type>();
 
-  private static final Map<String, Map<String, String[]>> jmxDesiredProperties = new HashMap<String, Map<String,String[]>>();
+  private static final Map<String, Map<String, String[]>> jmxDesiredProperties = new HashMap<String, Map<String, String[]>>();
   private volatile Map<String, String> clusterCoreSiteConfigVersionMap = new HashMap<String, String>();
   private volatile Map<String, String> clusterJmxProtocolMap = new HashMap<String, String>();
 
@@ -110,30 +120,30 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     componentServiceMap.put("HISTORYSERVER", Service.Type.MAPREDUCE2);
 
     Map<String, String[]> initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("NAMENODE", new String[] {"dfs.http.address", "dfs.namenode.http-address"});
-    initPropMap.put("DATANODE", new String[] {"dfs.datanode.http.address"});
+    initPropMap.put("NAMENODE", new String[]{"dfs.http.address", "dfs.namenode.http-address"});
+    initPropMap.put("DATANODE", new String[]{"dfs.datanode.http.address"});
     serviceDesiredProperties.put(Service.Type.HDFS, initPropMap);
 
     initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("JOBTRACKER", new String[] {"mapred.job.tracker.http.address"});
-    initPropMap.put("TASKTRACKER", new String[] {"mapred.task.tracker.http.address"});
+    initPropMap.put("JOBTRACKER", new String[]{"mapred.job.tracker.http.address"});
+    initPropMap.put("TASKTRACKER", new String[]{"mapred.task.tracker.http.address"});
     serviceDesiredProperties.put(Service.Type.MAPREDUCE, initPropMap);
 
     initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("HBASE_MASTER", new String[] {"hbase.master.info.port"});
+    initPropMap.put("HBASE_MASTER", new String[]{"hbase.master.info.port"});
     serviceDesiredProperties.put(Service.Type.HBASE, initPropMap);
 
     initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("RESOURCEMANAGER", new String[] {"yarn.resourcemanager.webapp.address"});
-    initPropMap.put("NODEMANAGER", new String[] {"yarn.nodemanager.webapp.address"});
+    initPropMap.put("RESOURCEMANAGER", new String[]{"yarn.resourcemanager.webapp.address"});
+    initPropMap.put("NODEMANAGER", new String[]{"yarn.nodemanager.webapp.address"});
     serviceDesiredProperties.put(Service.Type.YARN, initPropMap);
 
     initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("HISTORYSERVER", new String[] {"mapreduce.jobhistory.webapp.address"});
+    initPropMap.put("HISTORYSERVER", new String[]{"mapreduce.jobhistory.webapp.address"});
     serviceDesiredProperties.put(Service.Type.MAPREDUCE2, initPropMap);
 
     initPropMap = new HashMap<String, String[]>();
-    initPropMap.put("NAMENODE", new String[] {"hadoop.ssl.enabled"});
+    initPropMap.put("NAMENODE", new String[]{"hadoop.ssl.enabled"});
     jmxDesiredProperties.put("NAMENODE", initPropMap);
   }
 
@@ -145,7 +155,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
   /**
    * The map of lists of property providers.
    */
-  private final Map<Resource.Type,List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
+  private final Map<Resource.Type, List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
 
   @Inject
   AmbariManagementController managementController;
@@ -164,7 +174,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
    * JMX ports read from the configs
    */
   private final Map<String, Map<String, String>> jmxPortMap =
-    new HashMap<String, Map<String, String>>();
+      new HashMap<String, Map<String, String>>();
 
   private volatile boolean initialized = false;
 
@@ -266,13 +276,13 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
         // performance with a ConcurrentHashMap and maybe get default/existing
         // ports for a few calls.
         if (!currVersion.equals(oldVersion) ||
-          !clusterJmxPorts.containsKey(componentName)) {
+            !clusterJmxPorts.containsKey(componentName)) {
 
           serviceConfigVersions.put(service, currVersion);
 
           Map<String, String> portMap = getDesiredConfigMap(clusterName,
-            currVersion, serviceConfigTypes.get(service),
-            serviceDesiredProperties.get(service));
+              currVersion, serviceConfigTypes.get(service),
+              serviceDesiredProperties.get(service));
 
           for (Entry<String, String> entry : portMap.entrySet()) {
             // portString will be null if the property defined for the component doesn't exist
@@ -292,43 +302,82 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     return clusterJmxPorts.get(componentName);
   }
 
-  /**Post process property value. If value has one ore some substrings
+  /**
+   * Post process property value. If value has one ore some substrings
    * started with "${" and ended with "}" these substrings will replace
    * with properties from current propertiesMap. It is doing recursively.
-   * @param key - properties name
-   * @param value - properties value
+   *
+   * @param key        - properties name
+   * @param value      - properties value
    * @param properties - map with properties
    */
   private String postProcessPropertyValue(String key, String value, Map<String, String> properties, Set<String> prevProps) {
-      if (value != null && key != null && value.contains("${")){
-          if (prevProps == null) {
-            prevProps = new HashSet<String>();
-          }
-          if (prevProps.contains(key)){
-            return value;
-          }
-          prevProps.add(key);
-          String refValueString = value;
-          Map<String, String> refMap = new HashMap<String, String>();
-          while(refValueString.contains("${")) {
-              int startValueRef = refValueString.indexOf("${") + 2;
-              int endValueRef = refValueString.indexOf('}');
-              String valueRef = refValueString.substring(startValueRef, endValueRef);
-              refValueString = refValueString.substring(endValueRef+1);
-              String trueValue = postProcessPropertyValue(valueRef, properties.get(valueRef), properties, prevProps);
-              if (trueValue != null){
-               refMap.put("${"+valueRef+ '}', trueValue);
-              }
-          }
-          for (Entry<String, String> entry : refMap.entrySet()){
-            refValueString = entry.getValue();
-            value = value.replace(entry.getKey(), refValueString);
-          }
-          properties.put(key, value);
+    if (value != null && key != null && value.contains("${")) {
+      if (prevProps == null) {
+        prevProps = new HashSet<String>();
+      }
+      if (prevProps.contains(key)) {
+        return value;
+      }
+      prevProps.add(key);
+      String refValueString = value;
+      Map<String, String> refMap = new HashMap<String, String>();
+      while (refValueString.contains("${")) {
+        int startValueRef = refValueString.indexOf("${") + 2;
+        int endValueRef = refValueString.indexOf('}');
+        String valueRef = refValueString.substring(startValueRef, endValueRef);
+        refValueString = refValueString.substring(endValueRef + 1);
+        String trueValue = postProcessPropertyValue(valueRef, properties.get(valueRef), properties, prevProps);
+        if (trueValue != null) {
+          refMap.put("${" + valueRef + '}', trueValue);
+        }
+      }
+      for (Entry<String, String> entry : refMap.entrySet()) {
+        refValueString = entry.getValue();
+        value = value.replace(entry.getKey(), refValueString);
+      }
+      properties.put(key, value);
     }
     return value;
   }
 
+  // ----- HostInfoProvider -----------------------------------------------
+
+  @Override
+  public String getHostName(String id) throws SystemException {
+    return getClusterNodeName(id);
+  }
+
+  @Override
+  public String getHostAddress(String id) throws SystemException {
+    return getClusterHostAddress(id);
+  }
+
+
+  // get the hostname
+  private String getClusterNodeName(String hostname) throws SystemException {
+    try {
+      if (hostname.equalsIgnoreCase("localhost")) {
+        return InetAddress.getLocalHost().getCanonicalHostName();
+      }
+      return InetAddress.getByName(hostname).getCanonicalHostName();
+    } catch (Exception e) {
+      throw new SystemException("Error getting hostname.", e);
+    }
+  }
+
+  // get the hostname
+  private String getClusterHostAddress(String hostname) throws SystemException {
+    try {
+      if (hostname.equalsIgnoreCase("localhost")) {
+        return InetAddress.getLocalHost().getHostAddress();
+      }
+      return InetAddress.getByName(hostname).getHostAddress();
+    } catch (Exception e) {
+      throw new SystemException("Error getting ip address.", e);
+    }
+  }
+
   // ----- GangliaHostProvider -----------------------------------------------
 
   @Override
@@ -376,9 +425,9 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
       final String gangliaCollectorHostName = getGangliaCollectorHostName(clusterName);
 
       ServiceComponentHostRequest componentRequest = new ServiceComponentHostRequest(clusterName, "GANGLIA",
-                                                                                     Role.GANGLIA_SERVER.name(),
-                                                                                     gangliaCollectorHostName,
-                                                                                     null);
+          Role.GANGLIA_SERVER.name(),
+          gangliaCollectorHostName,
+          null);
 
       Set<ServiceComponentHostResponse> hostComponents =
           managementController.getHostComponents(Collections.singleton(componentRequest));
@@ -403,7 +452,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     ResourceProvider resourceProvider = createResourceProvider(type);
 
     if (resourceProvider instanceof ObservableResourceProvider) {
-      ((ObservableResourceProvider)resourceProvider).addObserver(this);
+      ((ObservableResourceProvider) resourceProvider).addObserver(this);
     }
 
     putResourceProvider(type, resourceProvider);
@@ -468,15 +517,23 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               null,
               PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"),
               PropertyHelper.getPropertyId("ServiceComponentInfo", "state"));
-
-          PropertyProvider gpp = createGangliaComponentPropertyProvider(
-              type,
-              streamProvider,
-              ComponentSSLConfiguration.instance(),
-              this,
-              PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
-              PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"));
-
+          PropertyProvider gpp = null;
+          if (System.getProperty("os.name").contains("Windows")) {
+            gpp = createSQLComponentPropertyProvider(
+                type,
+                this,
+                PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+                PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"),
+                PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name"));
+          } else {
+            gpp = createGangliaComponentPropertyProvider(
+                type,
+                streamProvider,
+                ComponentSSLConfiguration.instance(),
+                this,
+                PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+                PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"));
+          }
           providers.add(new StackDefinedPropertyProvider(
               type,
               this,
@@ -489,8 +546,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               PropertyHelper.getPropertyId("ServiceComponentInfo", "state"),
               jpp,
               gpp));
-          }
-          break;
+        }
+        break;
         case HostComponent: {
           // TODO as we fill out stack metric definitions, these can be phased out
           PropertyProvider jpp = createJMXPropertyProvider(
@@ -502,16 +559,25 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               PropertyHelper.getPropertyId("HostRoles", "host_name"),
               PropertyHelper.getPropertyId("HostRoles", "component_name"),
               PropertyHelper.getPropertyId("HostRoles", "state"));
-
-          PropertyProvider gpp = createGangliaHostComponentPropertyProvider(
-              type,
-              streamProvider,
-              ComponentSSLConfiguration.instance(),
-              this,
-              PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-              PropertyHelper.getPropertyId("HostRoles", "host_name"),
-              PropertyHelper.getPropertyId("HostRoles", "component_name"));
-
+          PropertyProvider gpp = null;
+          if (System.getProperty("os.name").contains("Windows")) {
+            gpp = createSQLHostComponentPropertyProvider(
+                type,
+                this,
+                PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+                PropertyHelper.getPropertyId("HostRoles", "host_name"),
+                PropertyHelper.getPropertyId("HostRoles", "component_name"),
+                PropertyHelper.getPropertyId("HostRoles", "service_name"));
+          } else {
+            gpp = createGangliaHostComponentPropertyProvider(
+                type,
+                streamProvider,
+                ComponentSSLConfiguration.instance(),
+                this,
+                PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+                PropertyHelper.getPropertyId("HostRoles", "host_name"),
+                PropertyHelper.getPropertyId("HostRoles", "component_name"));
+          }
           providers.add(new StackDefinedPropertyProvider(
               type,
               this,
@@ -524,8 +590,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               PropertyHelper.getPropertyId("HostRoles", "state"),
               jpp,
               gpp));
-          }
-          break;
+        }
+        break;
         default:
           break;
       }
@@ -533,7 +599,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     putPropertyProviders(type, providers);
   }
 
-  private void checkInit() throws SystemException{
+  private void checkInit() throws SystemException {
     if (!initialized) {
       synchronized (this) {
         if (!initialized) {
@@ -568,7 +634,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
       jmxPortMap.clear();
       Set<Resource> clusters = provider.getResources(request, null);
 
-      clusterHostComponentMap    = new HashMap<String, Map<String, String>>();
+      clusterHostComponentMap = new HashMap<String, Map<String, String>>();
       clusterGangliaCollectorMap = new HashMap<String, String>();
 
       for (Resource cluster : clusters) {
@@ -582,9 +648,9 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
             HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
 
         Predicate predicate = new PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-          equals(clusterName).toPredicate();
+            equals(clusterName).toPredicate();
 
-        Set<Resource>       hostComponents   = provider.getResources(request, predicate);
+        Set<Resource> hostComponents = provider.getResources(request, predicate);
         Map<String, String> hostComponentMap = clusterHostComponentMap.get(clusterName);
 
         if (hostComponentMap == null) {
@@ -594,7 +660,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
 
         for (Resource hostComponent : hostComponents) {
           String componentName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-          String hostName      = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
+          String hostName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
 
           hostComponentMap.put(componentName, hostName);
 
@@ -624,7 +690,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
 
   private String getPortString(String value) {
     return value != null && value.contains(":") ? value.substring
-      (value.lastIndexOf(":") + 1, value.length()) : value;
+        (value.lastIndexOf(":") + 1, value.length()) : value;
   }
 
   private String getDesiredConfigVersion(String clusterName,
@@ -634,10 +700,10 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
 
     // Get config version tag
     ResourceProvider clusterResourceProvider = getResourceProvider(Resource
-      .Type.Cluster);
+        .Type.Cluster);
     Predicate basePredicate = new PredicateBuilder().property
-      (ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals(clusterName)
-      .toPredicate();
+        (ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals(clusterName)
+        .toPredicate();
 
     Set<Resource> clusterResource = null;
     try {
@@ -661,8 +727,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     if (clusterResource != null) {
       for (Resource resource : clusterResource) {
         Map<String, Object> configs =
-        resource.getPropertiesMap().get(ClusterResourceProvider
-          .CLUSTER_DESIRED_CONFIGS_PROPERTY_ID);
+            resource.getPropertiesMap().get(ClusterResourceProvider
+                .CLUSTER_DESIRED_CONFIGS_PROPERTY_ID);
         if (configs != null) {
           DesiredConfig config = (DesiredConfig) configs.get(configType);
           if (config != null) {
@@ -675,21 +741,21 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
   }
 
   private Map<String, String> getDesiredConfigMap(String clusterName,
-      String versionTag, String configType, Map<String, String[]> keys) throws
-        NoSuchParentResourceException, UnsupportedPropertyException,
-        SystemException {
+                                                  String versionTag, String configType, Map<String, String[]> keys) throws
+      NoSuchParentResourceException, UnsupportedPropertyException,
+      SystemException {
     // Get desired configs based on the tag
     ResourceProvider configResourceProvider = getResourceProvider(Resource.Type.Configuration);
     Predicate configPredicate = new PredicateBuilder().property
-      (ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
-      .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID).equals(configType).and()
-      .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals(versionTag).toPredicate();
+        (ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
+        .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID).equals(configType).and()
+        .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals(versionTag).toPredicate();
     Set<Resource> configResources;
     try {
       configResources = configResourceProvider.getResources
-        (PropertyHelper.getReadRequest(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
-          ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
-          ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID), configPredicate);
+          (PropertyHelper.getReadRequest(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
+              ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+              ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID), configPredicate);
     } catch (NoSuchResourceException e) {
       LOG.info("Resource for the desired config not found. " + e);
       return Collections.emptyMap();
@@ -698,8 +764,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     Map<String, String> mConfigs = new HashMap<String, String>();
     if (configResources != null) {
       for (Resource res : configResources) {
-       Map<String, String> evaluatedProperties = null;
-        for (Entry<String,String[]> entry : keys.entrySet()) {
+        Map<String, String> evaluatedProperties = null;
+        for (Entry<String, String[]> entry : keys.entrySet()) {
           String propName = null;
           String value = null;
 
@@ -713,15 +779,15 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
           }
 
           if (value != null && value.contains("${")) {
-            if (evaluatedProperties == null){
+            if (evaluatedProperties == null) {
               evaluatedProperties = new HashMap<String, String>();
               Map<String, Object> properties = res.getPropertiesMap().get(PROPERTIES_CATEGORY);
               for (Map.Entry<String, Object> subentry : properties.entrySet()) {
                 String keyString = subentry.getKey();
                 Object object = subentry.getValue();
                 String valueString;
-                if (object != null && object instanceof String){
-                  valueString = (String)object;
+                if (object != null && object instanceof String) {
+                  valueString = (String) object;
                   evaluatedProperties.put(keyString, valueString);
                   postProcessPropertyValue(keyString, valueString, evaluatedProperties, null);
                 }
@@ -751,58 +817,97 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
 
     return new JMXPropertyProvider(PropertyHelper.getJMXPropertyIds(type), streamProvider,
         jmxHostProvider, metricsHostProvider, clusterNamePropertyId, hostNamePropertyId,
-                    componentNamePropertyId, statePropertyId);
+        componentNamePropertyId, statePropertyId);
   }
 
   /**
    * Create the Ganglia report property provider for the given type.
    */
-  private PropertyProvider createGangliaReportPropertyProvider( Resource.Type type, StreamProvider streamProvider,
-                                                                ComponentSSLConfiguration configuration,
-                                                                GangliaHostProvider hostProvider,
-                                                                String clusterNamePropertyId) {
+  private PropertyProvider createGangliaReportPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+                                                               ComponentSSLConfiguration configuration,
+                                                               GangliaHostProvider hostProvider,
+                                                               String clusterNamePropertyId) {
 
     return new GangliaReportPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
-          configuration, hostProvider, clusterNamePropertyId);
+        configuration, hostProvider, clusterNamePropertyId);
   }
 
   /**
    * Create the Ganglia host property provider for the given type.
    */
-  private PropertyProvider createGangliaHostPropertyProvider( Resource.Type type, StreamProvider streamProvider,
-                                                              ComponentSSLConfiguration configuration,
-                                                              GangliaHostProvider hostProvider,
-                                                              String clusterNamePropertyId,
-                                                              String hostNamePropertyId) {
+  private PropertyProvider createGangliaHostPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+                                                             ComponentSSLConfiguration configuration,
+                                                             GangliaHostProvider hostProvider,
+                                                             String clusterNamePropertyId,
+                                                             String hostNamePropertyId) {
     return new GangliaHostPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
-          configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId);
+        configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId);
   }
 
   /**
    * Create the Ganglia component property provider for the given type.
    */
-  private PropertyProvider createGangliaComponentPropertyProvider( Resource.Type type, StreamProvider streamProvider,
-                                                                   ComponentSSLConfiguration configuration,
-                                                                   GangliaHostProvider hostProvider,
-                                                                   String clusterNamePropertyId,
-                                                                   String componentNamePropertyId) {
+  private PropertyProvider createGangliaComponentPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+                                                                  ComponentSSLConfiguration configuration,
+                                                                  GangliaHostProvider hostProvider,
+                                                                  String clusterNamePropertyId,
+                                                                  String componentNamePropertyId) {
     return new GangliaComponentPropertyProvider(PropertyHelper.getGangliaPropertyIds(type),
-              streamProvider, configuration, hostProvider, clusterNamePropertyId, componentNamePropertyId);
+        streamProvider, configuration, hostProvider, clusterNamePropertyId, componentNamePropertyId);
   }
 
 
   /**
    * Create the Ganglia host component property provider for the given type.
    */
-  private PropertyProvider createGangliaHostComponentPropertyProvider( Resource.Type type, StreamProvider streamProvider,
-                                                                       ComponentSSLConfiguration configuration,
-                                                                       GangliaHostProvider hostProvider,
-                                                                       String clusterNamePropertyId,
-                                                                       String hostNamePropertyId,
-                                                                       String componentNamePropertyId) {
+  private PropertyProvider createGangliaHostComponentPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+                                                                      ComponentSSLConfiguration configuration,
+                                                                      GangliaHostProvider hostProvider,
+                                                                      String clusterNamePropertyId,
+                                                                      String hostNamePropertyId,
+                                                                      String componentNamePropertyId) {
 
     return new GangliaHostComponentPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
-          configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
+        configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
+  }
+
+  /**
+   * Create the SQL component property provider for the given type.
+   */
+  private PropertyProvider createSQLComponentPropertyProvider(Resource.Type type,
+                                                              HostInfoProvider hostProvider,
+                                                              String clusterNamePropertyId,
+                                                              String componentNamePropertyId,
+                                                              String serviceNamePropertyId) {
+    return new SQLPropertyProvider(
+        PropertyHelper.getSQLServerPropertyIds(type),
+        hostProvider,
+        clusterNamePropertyId,
+        null,
+        componentNamePropertyId,
+        serviceNamePropertyId,
+        SinkConnectionFactory.instance());
+  }
+
+
+  /**
+   * Create the SQL host component property provider for the given type.
+   */
+  private PropertyProvider createSQLHostComponentPropertyProvider(Resource.Type type,
+                                                                  HostInfoProvider hostProvider,
+                                                                  String clusterNamePropertyId,
+                                                                  String hostNamePropertyId,
+                                                                  String componentNamePropertyId,
+                                                                  String serviceNamePropertyId) {
+
+    return new SQLPropertyProvider(
+        PropertyHelper.getSQLServerPropertyIds(type),
+        hostProvider,
+        clusterNamePropertyId,
+        hostNamePropertyId,
+        componentNamePropertyId,
+        serviceNamePropertyId,
+        SinkConnectionFactory.instance());
   }
 
   @Override
@@ -825,7 +930,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
 
     } catch (Exception e) {
       LOG.info("Exception while detecting JMX protocol for clusterName = " + clusterName +
-          ", componentName = " + componentName,  e);
+          ", componentName = " + componentName, e);
       LOG.info("Defaulting JMX to HTTP protocol for  for clusterName = " + clusterName +
           ", componentName = " + componentName +
           componentName);

+ 3 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java

@@ -123,6 +123,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
 
     Configuration configs = new Configuration();
     String TMP_PATH = configs.getProperty(Configuration.SERVER_TMP_DIR_KEY);
+    String pythonCmd = configs.getProperty(Configuration.AMBARI_PYTHON_WRAP_KEY);
     AmbariManagementController managementController = getManagementController();
     ConfigHelper configHelper = managementController.getConfigHelper();
     Cluster cluster = null;
@@ -285,7 +286,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
       commandParams.put("xml_configs_list", xmlConfigs);
       commandParams.put("env_configs_list", envConfigs);
       commandParams.put("properties_configs_list", propertiesConfigs);
-      commandParams.put("output_file", componentName + "-configs.tar.gz");
+      commandParams.put("output_file", componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION);
 
       Map<String, Object> jsonContent = new TreeMap<String, Object>();
       jsonContent.put("configurations", configurations);
@@ -316,7 +317,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         throw new SystemException("Failed to write configurations to json file ", e);
       }
 
-      String cmd = "ambari-python-wrap " + commandScriptAbsolute + " generate_configs " + jsonFileName.getAbsolutePath() + " " +
+      String cmd = pythonCmd + " " + commandScriptAbsolute + " generate_configs " + jsonFileName.getAbsolutePath() + " " +
               packageFolderAbsolute + " " + TMP_PATH + File.separator + "structured-out.json" + " INFO " + TMP_PATH;
 
       try {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java

@@ -481,7 +481,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
         }
         if (serviceName == null
             || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
+          throw new ObjectNotFoundException("Could not find service for component"
               + ", componentName=" + request.getComponentName()
               + ", clusterName=" + cluster.getClusterName()
               + ", stackInfo=" + stackId.getStackId());

+ 63 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.sql;
+
+import org.apache.ambari.server.controller.spi.SystemException;
+
+/**
+ * Provider of host information.
+ */
+public interface HostInfoProvider {
+
+  /**
+   * Get the host name for the given cluster name and component name.
+   *
+   * @param clusterName    the cluster name
+   * @param componentName  the component name
+   *
+   * @return the host name
+   *
+   * @throws SystemException if unable to get the host name
+   */
+  public String getHostName(String clusterName, String componentName)
+      throws SystemException;
+
+  /**
+   * Get the host name.
+   *
+   * @param id  the host identifier
+   *
+   * @return the host name
+   *
+   * @throws SystemException if unable to get the host name
+   */
+  public String getHostName(String id)
+      throws SystemException;
+
+  /**
+   * Get the host ip address.
+   *
+   * @param id  the host identifier
+   *
+   * @return the host ip address
+   *
+   * @throws SystemException if unable to get the host address
+   */
+  public String getHostAddress(String id)
+      throws SystemException;
+}

+ 572 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java

@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.sql;
+
+import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
+import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.jdbc.ConnectionFactory;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.TemporalInfo;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * SQL based property/metrics provider required for ambari-scom.
+ */
+public class SQLPropertyProvider extends AbstractPropertyProvider {
+
+  private final HostInfoProvider hostProvider;
+
+  private final String clusterNamePropertyId;
+
+  private final String hostNamePropertyId;
+
+  private final String componentNamePropertyId;
+
+  private final String serviceNamePropertyId;
+
+  private final ConnectionFactory connectionFactory;
+
+
+  // ----- Constants ---------------------------------------------------------
+
+  private static final String GET_METRICS_STATEMENT =
+    "SELECT  s.RecordTypeContext, s.RecordTypeName, s.TagPairs, s.NodeName, s.ServiceName, mn.Name AS MetricName, s.RecordTimeStamp, mp.MetricValue\n" +
+      "FROM HadoopMetrics.dbo.MetricPair mp\n" +
+      "     INNER JOIN (\n" +
+      "         SELECT mr.RecordID AS RecordID, mr.RecordTimeStamp AS RecordTimeStamp, rt.Context AS RecordTypeContext, rt.Name AS RecordTypeName, ts.TagPairs AS TagPairs, nd.Name AS NodeName, sr.Name AS ServiceName\n" +
+      "         FROM HadoopMetrics.dbo.MetricRecord mr\n" +
+      "              INNER JOIN HadoopMetrics.dbo.RecordType rt ON (mr.RecordTypeId = rt.RecordTypeId)\n" +
+      "              INNER JOIN HadoopMetrics.dbo.TagSet ts ON (mr.TagSetID = ts.TagSetID)\n" +
+      "              INNER JOIN HadoopMetrics.dbo.Node nd ON (mr.NodeID = nd.NodeID)\n" +
+      "              INNER JOIN HadoopMetrics.dbo.Service sr ON (mr.ServiceID = sr.ServiceID)\n" +
+      "         WHERE rt.Context in (%s)\n" +
+      "               AND rt.Name in (%s)\n" +
+      "               AND (ts.TagPairs LIKE %s)\n" +
+      "               AND (nd.Name in (%s))\n" +
+      "               AND (sr.Name in (%s))\n" +
+      "               AND mr.RecordTimestamp >= %d\n" +
+      "               AND mr.RecordTimestamp <= %d\n" +
+      "     ) s ON (mp.RecordID = s.RecordID)\n" +
+      "     INNER JOIN HadoopMetrics.dbo.MetricName mn ON (mp.MetricID = mn.MetricID)\n" +
+      "WHERE (mn.Name in (%s))";
+
+  protected final static Logger LOG = LoggerFactory.getLogger(SQLPropertyProvider.class);
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  public SQLPropertyProvider(
+    Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
+    HostInfoProvider hostProvider,
+    String clusterNamePropertyId,
+    String hostNamePropertyId,
+    String componentNamePropertyId,
+    String serviceNamePropertyId,
+    ConnectionFactory connectionFactory) {
+    super(componentPropertyInfoMap);
+    this.hostProvider = hostProvider;
+    this.clusterNamePropertyId = clusterNamePropertyId;
+    this.hostNamePropertyId = hostNamePropertyId;
+    this.componentNamePropertyId = componentNamePropertyId;
+    this.serviceNamePropertyId = serviceNamePropertyId;
+    this.connectionFactory = connectionFactory;
+  }
+
+
+  // ----- PropertyProvider --------------------------------------------------
+
+  @Override
+  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
+    throws SystemException {
+    Set<Resource> keepers = new HashSet<Resource>();
+    try {
+      Connection connection = connectionFactory.getConnection();
+      try {
+        Statement statement = connection.createStatement();
+        try {
+          for (Resource resource : resources) {
+            if (populateResource(resource, request, predicate, statement)) {
+              keepers.add(resource);
+            }
+          }
+        } finally {
+          statement.close();
+        }
+      } finally {
+        connection.close();
+      }
+    } catch (SQLException e) {
+      if (LOG.isErrorEnabled()) {
+        LOG.error("Error during populateResources call.");
+        LOG.debug("Error during populateResources call : caught exception", e);
+      }
+    }
+    return keepers;
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  // Populate the given resource
+  private boolean populateResource(Resource resource, Request request, Predicate predicate, Statement statement) throws SystemException {
+
+    Set<String> ids = getRequestPropertyIds(request, predicate);
+    if (ids.isEmpty()) {
+      // no properties requested ... nothing to do.
+      return true;
+    }
+
+    String componentName = (String) resource.getPropertyValue(componentNamePropertyId);
+    String serviceName = (String) resource.getPropertyValue(serviceNamePropertyId);
+
+    if (getComponentMetrics().get(componentName) == null) {
+      // no metrics defined for the given component ... nothing to do.
+      return true;
+    }
+
+    String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
+    String hostName = getHost(resource, clusterName, componentName);
+
+    if (hostName == null) {
+      throw new SystemException(
+        "Unable to get metrics.  No host name for " + componentName, null);
+    }
+
+    Set<MetricDefinition> metricsDefinitionSet = new HashSet<MetricDefinition>();
+    for (String id : ids) {
+      Map<String, PropertyInfo> propertyInfoMap = getPropertyInfoMap(componentName, id);
+
+      for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
+        String propertyKey = entry.getKey();
+        PropertyInfo propertyInfo = entry.getValue();
+        if (containsArguments(propertyKey)) {
+          propertyInfo = updatePropertyInfo(propertyKey, id, propertyInfo);
+        }
+
+        String propertyId = propertyInfo.getPropertyId();
+        TemporalInfo temporalInfo = request.getTemporalInfo(id);
+
+        if ((propertyInfo.isPointInTime() && temporalInfo == null) ||
+          (propertyInfo.isTemporal() && temporalInfo != null)) {
+
+          long startTime;
+          long endTime;
+
+          if (temporalInfo != null) {
+            Long endTimeSeconds = temporalInfo.getEndTime();
+
+            endTime = endTimeSeconds != -1 ? endTimeSeconds * 1000 : Long.MAX_VALUE;
+            startTime = temporalInfo.getStartTime() * 1000;
+          } else {
+            startTime = 0L;
+            endTime = Long.MAX_VALUE;
+          }
+
+          String category = "";
+          String recordTypeContext = "";
+          String recordTypeName = "";
+          String metricName = "";
+          String tagPairsPattern = ",";
+          int dotIndex = propertyId.lastIndexOf('.');
+          if (dotIndex != -1) {
+            category = propertyId.substring(0, dotIndex);
+            metricName = propertyId.substring(dotIndex + 1);
+          }
+          String[] parts = category.split("\\.");
+          if (parts.length >= 2) {
+            recordTypeContext = parts[0];
+            recordTypeName = parts[1];
+            if (containsArguments(propertyKey) && parts.length > 2) {
+              tagPairsPattern = StringUtils.join(Arrays.copyOfRange(parts, 2, parts.length), ".");
+            }
+            metricsDefinitionSet.add(
+              new MetricDefinition(
+                startTime,
+                endTime,
+                recordTypeContext,
+                recordTypeName,
+                tagPairsPattern,
+                metricName,
+                serviceName != null && serviceName.toLowerCase().equals("hbase") ? serviceName.toLowerCase() : componentName.toLowerCase(),
+                hostName,
+                propertyKey,
+                id,
+                temporalInfo)
+            );
+          } else {
+            if (LOG.isWarnEnabled()) {
+              LOG.warn("Can't get metrics for " + id + " : " + propertyId);
+            }
+          }
+        }
+      }
+    }
+
+    Map<MetricDefinition, List<DataPoint>> results = getMetric(metricsDefinitionSet, statement);
+
+    for (MetricDefinition metricDefinition : metricsDefinitionSet) {
+      List<DataPoint> dataPoints = results.containsKey(metricDefinition) ? results.get(metricDefinition) : new ArrayList<DataPoint>();
+      TemporalInfo temporalInfo = metricDefinition.getTemporalInfo();
+      String propertyKey = metricDefinition.getPropertyKey();
+      String requestedPropertyKey = metricDefinition.getRequestedPropertyKey();
+      if (dataPoints != null) {
+        if (temporalInfo == null) {
+          // return the value of the last data point
+          int length = dataPoints.size();
+          Serializable value = length > 0 ? dataPoints.get(length - 1).getValue() : 0;
+          resource.setProperty(propertyKey, value);
+        } else {
+          Number[][] dp = new Number[dataPoints.size()][2];
+          for (int i = 0; i < dp.length; i++) {
+            dp[i][0] = dataPoints.get(i).getValue();
+            dp[i][1] = dataPoints.get(i).getTimestamp() / 1000;
+          }
+          if (containsArguments(propertyKey)) {
+            resource.setProperty(requestedPropertyKey, dp);
+          } else {
+            resource.setProperty(propertyKey, dp);
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+  // get a metric from a sql connection
+  private Map<MetricDefinition, List<DataPoint>> getMetric(Set<MetricDefinition> metricDefinitionSet, Statement statement) throws SystemException {
+    Map<MetricDefinition, List<DataPoint>> results = new HashMap<MetricDefinition, List<DataPoint>>();
+    try {
+      StringBuilder query = new StringBuilder();
+      Set<String> recordTypeContexts = new HashSet<String>();
+      Set<String> recordTypeNamess = new HashSet<String>();
+      Set<String> tagPairsPatterns = new HashSet<String>();
+      Set<String> nodeNames = new HashSet<String>();
+      Set<String> serviceNames = new HashSet<String>();
+      Set<String> metricNames = new HashSet<String>();
+      long startTime = 0, endTime = 0;
+      for (MetricDefinition metricDefinition : metricDefinitionSet) {
+        if (metricDefinition.getRecordTypeContext() == null || metricDefinition.getRecordTypeName() == null || metricDefinition.getNodeName() == null) {
+          continue;
+        }
+
+        recordTypeContexts.add(metricDefinition.getRecordTypeContext());
+        recordTypeNamess.add(metricDefinition.getRecordTypeName());
+        tagPairsPatterns.add(metricDefinition.getTagPairsPattern());
+        nodeNames.add(metricDefinition.getNodeName());
+        serviceNames.add(metricDefinition.getServiceName());
+        metricNames.add(metricDefinition.getMetricName());
+        startTime = metricDefinition.getStartTime();
+        endTime = metricDefinition.getEndTime();
+      }
+
+      for (String tagPairsPattern : tagPairsPatterns) {
+        if (query.length() != 0) {
+          query.append("\nUNION\n");
+        }
+        query.append(String.format(GET_METRICS_STATEMENT,
+          "'" + StringUtils.join(recordTypeContexts, "','") + "'",
+          "'" + StringUtils.join(recordTypeNamess, "','") + "'",
+          "'%" + tagPairsPattern + "%'",
+          "'" + StringUtils.join(nodeNames, "','") + "'",
+          "'" + StringUtils.join(serviceNames, "','") + "'",
+          startTime,
+          endTime,
+          "'" + StringUtils.join(metricNames, "','") + "'"
+        ));
+      }
+
+      ResultSet rs = null;
+      if (query.length() != 0) {
+        rs = statement.executeQuery(query.toString());
+      }
+
+      if (rs != null) {
+        //(RecordTimeStamp bigint, MetricValue NVARCHAR(512))
+        while (rs.next()) {
+          MetricDefinition metricDefinition = null;
+          for (MetricDefinition md : metricDefinitionSet) {
+            if (md.getRecordTypeContext().equalsIgnoreCase(rs.getString("RecordTypeContext"))
+              && md.getRecordTypeName().equalsIgnoreCase(rs.getString("RecordTypeName"))
+              && md.getMetricName().equalsIgnoreCase(rs.getString("MetricName"))
+              && md.getServiceName().equalsIgnoreCase(rs.getString("ServiceName"))
+              && md.getNodeName().equalsIgnoreCase(rs.getString("NodeName"))
+              && rs.getString("TagPairs").contains(md.getTagPairsPattern())) {
+              metricDefinition = md;
+              break;
+            }
+          }
+          if (metricDefinition == null) {
+            LOG.error("Error during getMetric call : No metricdefinition found for  result");
+            continue;
+          }
+          ParsePosition parsePosition = new ParsePosition(0);
+          NumberFormat numberFormat = NumberFormat.getInstance();
+          Number parsedNumber = numberFormat.parse(rs.getString("MetricValue"), parsePosition);
+          if (results.containsKey(metricDefinition)) {
+            results.get(metricDefinition).add(new DataPoint(rs.getLong("RecordTimeStamp"), parsedNumber));
+          } else {
+            List<DataPoint> dataPoints = new ArrayList<DataPoint>();
+            dataPoints.add(new DataPoint(rs.getLong("RecordTimeStamp"), parsedNumber));
+            results.put(metricDefinition, dataPoints);
+          }
+        }
+      }
+    } catch (SQLException e) {
+      throw new SystemException("Error during getMetric call : caught exception - ", e);
+    }
+    return results;
+  }
+
+  // get the hostname for a given resource
+  private String getHost(Resource resource, String clusterName, String componentName) throws SystemException {
+    return hostNamePropertyId == null ?
+      hostProvider.getHostName(clusterName, componentName) :
+      hostProvider.getHostName((String) resource.getPropertyValue(hostNamePropertyId));
+  }
+
+
+  // ----- inner class : DataPoint -------------------------------------------
+
+  /**
+   * Structure to hold a single datapoint (value/timestamp pair) retrieved from the db.
+   */
+  private static class DataPoint {
+    private final long timestamp;
+    private final Number value;
+
+    // ----- Constructor -------------------------------------------------
+
+    /**
+     * Construct a data point from the given value and timestamp.
+     *
+     * @param timestamp the timestamp
+     * @param value     the value
+     */
+    private DataPoint(long timestamp, Number value) {
+      this.timestamp = timestamp;
+      this.value = value;
+    }
+
+    // ----- DataPoint ---------------------------------------------------
+
+    /**
+     * Get the timestamp value.
+     *
+     * @return the timestamp
+     */
+    public long getTimestamp() {
+      return timestamp;
+    }
+
+    /**
+     * Get the value.
+     *
+     * @return the value
+     */
+    public Number getValue() {
+      return value;
+    }
+
+    // ----- Object overrides --------------------------------------------
+
+    @Override
+    public String toString() {
+      return "{" + value + " : " + timestamp + "}";
+    }
+  }
+
+  private class MetricDefinition {
+    long startTime;
+    long endTime;
+
+    String recordTypeContext;
+    String recordTypeName;
+    String tagPairsPattern;
+    String metricName;
+    String serviceName;
+    String nodeName;
+
+    String propertyKey;
+    String requestedPropertyKey;
+    TemporalInfo temporalInfo;
+
+    private MetricDefinition(long startTime, long endTime, String recordTypeContext, String recordTypeName, String tagPairsPattern, String metricName, String serviceName, String nodeName, String propertyKey, String requestedPropertyKey, TemporalInfo temporalInfo) {
+      this.startTime = startTime;
+      this.endTime = endTime;
+      this.recordTypeContext = recordTypeContext;
+      this.recordTypeName = recordTypeName;
+      this.tagPairsPattern = tagPairsPattern;
+      this.metricName = metricName;
+      this.serviceName = serviceName;
+      this.nodeName = nodeName;
+      this.propertyKey = propertyKey;
+      this.requestedPropertyKey = requestedPropertyKey;
+      this.temporalInfo = temporalInfo;
+    }
+
+    private MetricDefinition(String recordTypeContext, String recordTypeName, String tagPairsPattern, String metricName, String serviceName, String nodeName) {
+      this.recordTypeContext = recordTypeContext;
+      this.recordTypeName = recordTypeName;
+      this.tagPairsPattern = tagPairsPattern;
+      this.metricName = metricName;
+      this.serviceName = serviceName;
+      this.nodeName = nodeName;
+    }
+
+    public long getStartTime() {
+      return startTime;
+    }
+
+    public void setStartTime(long startTime) {
+      this.startTime = startTime;
+    }
+
+    public long getEndTime() {
+      return endTime;
+    }
+
+    public void setEndTime(long endTime) {
+      this.endTime = endTime;
+    }
+
+    public String getRecordTypeContext() {
+      return recordTypeContext;
+    }
+
+    public void setRecordTypeContext(String recordTypeContext) {
+      this.recordTypeContext = recordTypeContext;
+    }
+
+    public String getRecordTypeName() {
+      return recordTypeName;
+    }
+
+    public void setRecordTypeName(String recordTypeName) {
+      this.recordTypeName = recordTypeName;
+    }
+
+    public String getTagPairsPattern() {
+      return tagPairsPattern;
+    }
+
+    public void getTagPairsPattern(String tagPairsPattern) {
+      this.tagPairsPattern = tagPairsPattern;
+    }
+
+    public String getMetricName() {
+      return metricName;
+    }
+
+    public void setMetricName(String metricName) {
+      this.metricName = metricName;
+    }
+
+    public String getServiceName() {
+      return serviceName;
+    }
+
+    public void setServiceName(String serviceName) {
+      this.serviceName = serviceName;
+    }
+
+    public String getNodeName() {
+      return nodeName;
+    }
+
+    public void setNodeName(String nodeName) {
+      this.nodeName = nodeName;
+    }
+
+    public String getPropertyKey() {
+      return propertyKey;
+    }
+
+    public void setPropertyKey(String propertyKey) {
+      this.propertyKey = propertyKey;
+    }
+
+    public String getRequestedPropertyKey() {
+      return requestedPropertyKey;
+    }
+
+    public void setRequestedPropertyKey(String requestedPropertyKey) {
+      this.requestedPropertyKey = requestedPropertyKey;
+    }
+
+    public TemporalInfo getTemporalInfo() {
+      return temporalInfo;
+    }
+
+    public void setTemporalInfo(TemporalInfo temporalInfo) {
+      this.temporalInfo = temporalInfo;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      MetricDefinition that = (MetricDefinition) o;
+
+      if (metricName != null ? !metricName.equals(that.metricName) : that.metricName != null) return false;
+      if (nodeName != null ? !nodeName.equalsIgnoreCase(that.nodeName) : that.nodeName != null) return false;
+      if (recordTypeContext != null ? !recordTypeContext.equals(that.recordTypeContext) : that.recordTypeContext != null)
+        return false;
+      if (recordTypeName != null ? !recordTypeName.equals(that.recordTypeName) : that.recordTypeName != null)
+        return false;
+      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
+      if (tagPairsPattern != null ? !(tagPairsPattern.contains(that.tagPairsPattern) ||
+        that.tagPairsPattern.contains(tagPairsPattern)) : that.tagPairsPattern != null)
+        return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = recordTypeContext != null ? recordTypeContext.hashCode() : 0;
+      result = 31 * result + (recordTypeName != null ? recordTypeName.hashCode() : 0);
+      result = 31 * result + (metricName != null ? metricName.hashCode() : 0);
+      result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
+      result = 31 * result + (nodeName != null ? nodeName.toLowerCase().hashCode() : 0);
+      return result;
+    }
+  }
+}

+ 132 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java

@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.sql;
+
+import com.mchange.v2.c3p0.ComboPooledDataSource;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.jdbc.ConnectionFactory;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+/**
+ * Factory for the sink database connection.
+ */
+public class SinkConnectionFactory implements ConnectionFactory {
+
+  /**
+   * The database URL.
+   */
+  private String databaseUrl;
+
+  /**
+   * The database driver.
+   */
+  private String databaseDriver;
+
+  private String databaseUser;
+
+  private String databasePassword;
+
+  private boolean useIntegratedAuth;
+
+  /**
+   * Indicates whether or not the driver has been initialized
+   */
+  private boolean connectionInitialized = false;
+
+  private ComboPooledDataSource cpds;
+  /**
+   * The singleton.
+   */
+  private static SinkConnectionFactory singleton = new SinkConnectionFactory();
+
+  // ----- Constructor -------------------------------------------------------
+
+  protected SinkConnectionFactory() {
+    Configuration config = new Configuration();
+    this.databaseUrl    = config.getSinkDatabaseUrl();
+    this.databaseDriver = config.getSinkDatabaseDriver();
+    this.useIntegratedAuth = config.getSinkUseIntegratedAuth();
+    this.databaseUser = config.getSinkDatabaseUser();
+    this.databasePassword =  config.getSinkDatabasePassword();
+  }
+
+
+  // ----- SinkConnectionFactory ---------------------------------------------
+
+  /**
+   * Initialize.
+   */
+  public void init() {
+    this.cpds = new ComboPooledDataSource();
+    this.cpds.setJdbcUrl(this.databaseUrl);
+    if(!useIntegratedAuth) {
+      this.cpds.setUser(this.databaseUser);
+      this.cpds.setPassword(this.databasePassword);
+    }
+    this.cpds.setMaxPoolSize(5);
+  }
+
+  /**
+   * Get the singleton instance.
+   *
+   * @return the singleton instance
+   */
+  public static SinkConnectionFactory instance() {
+    return singleton;
+  }
+
+  /**
+   * Get the database URL.
+   *
+   * @return the database URL
+   */
+  public String getDatabaseUrl() {
+    return databaseUrl;
+  }
+
+  /**
+   * Get the database driver.
+   *
+   * @return the database driver
+   */
+  public String getDatabaseDriver() {
+    return databaseDriver;
+  }
+
+// ----- ConnectionFactory -----------------------------------------------
+
+  @Override
+  public Connection getConnection() throws SQLException {
+    synchronized (this) {
+      if (!connectionInitialized) {
+        try {
+          Class.forName(databaseDriver);
+        } catch (Exception e) {
+          throw new SQLException("Can't load the driver class.", e);
+        }
+        init();
+        connectionInitialized = true;
+      }
+    }
+    return this.cpds.getConnection();
+  }
+}

+ 6 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java

@@ -44,6 +44,7 @@ public class PropertyHelper {
 
   private static final String PROPERTIES_FILE = "properties.json";
   private static final String GANGLIA_PROPERTIES_FILE = "ganglia_properties.json";
+  private static final String SQLSERVER_PROPERTIES_FILE = "sqlserver_properties.json";
   private static final String JMX_PROPERTIES_FILE = "jmx_properties.json";
   private static final String KEY_PROPERTIES_FILE = "key_properties.json";
   private static final char EXTERNAL_PATH_SEP = '/';
@@ -51,6 +52,7 @@ public class PropertyHelper {
   private static final Map<Resource.InternalType, Set<String>> PROPERTY_IDS = readPropertyIds(PROPERTIES_FILE);
   private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> JMX_PROPERTY_IDS = readPropertyProviderIds(JMX_PROPERTIES_FILE);
   private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> GANGLIA_PROPERTY_IDS = readPropertyProviderIds(GANGLIA_PROPERTIES_FILE);
+    private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> SQLSERVER_PROPERTY_IDS = readPropertyProviderIds(SQLSERVER_PROPERTIES_FILE);
   private static final Map<Resource.InternalType, Map<Resource.Type, String>> KEY_PROPERTY_IDS = readKeyPropertyIds(KEY_PROPERTIES_FILE);
 
   /**
@@ -114,6 +116,10 @@ public class PropertyHelper {
     return GANGLIA_PROPERTY_IDS.get(resourceType.getInternalType());
   }
 
+  public static Map<String, Map<String, PropertyInfo>> getSQLServerPropertyIds(Resource.Type resourceType) {
+    return SQLSERVER_PROPERTY_IDS.get(resourceType.getInternalType());
+  }
+
   public static Map<String, Map<String, PropertyInfo>> getJMXPropertyIds(Resource.Type resourceType) {
     return JMX_PROPERTY_IDS.get(resourceType.getInternalType());
   }

Some files were not shown because too many files changed in this diff