Browse Source

Merging AMBARI-666 to trunk.

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1421062 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 12 years ago
parent
commit
b87dc45ed3
100 changed files with 3393 additions and 2070 deletions
  1. 2 1326
      CHANGES.txt
  2. 29 0
      ambari-agent/.project
  3. 7 0
      ambari-agent/.pydevproject
  4. 14 0
      ambari-agent/ambari-agent.iml
  5. 138 6
      ambari-agent/conf/unix/ambari-agent
  6. 32 0
      ambari-agent/conf/unix/ambari-agent.ini
  7. 3 0
      ambari-agent/conf/unix/ambari-env.sh
  8. 0 28
      ambari-agent/conf/unix/ambari.ini
  9. 119 10
      ambari-agent/pom.xml
  10. 1 0
      ambari-agent/src/main/package/rpm/postinstall.sh
  11. 2 0
      ambari-agent/src/main/package/rpm/preinstall.sh
  12. 2 1
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
  13. 1 1
      ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
  14. 138 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
  15. 26 16
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
  16. 43 3
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  17. 1 1
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
  18. 36 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
  19. 42 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
  20. 14 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  21. 5 5
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  22. 8 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  23. 1 1
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
  24. 3 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
  25. 16 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
  26. 23 0
      ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
  27. 31 0
      ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
  28. 22 0
      ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
  29. 22 0
      ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
  30. 39 17
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
  31. 61 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
  32. 2 2
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp
  33. 15 1
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
  34. 2 1
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
  35. 47 11
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
  36. 48 6
      ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
  37. 3 3
      ambari-agent/src/main/puppet/modules/hdp-mysql/files/startMysql.sh
  38. 26 7
      ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
  39. 0 59
      ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php
  40. 7 2
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
  41. 107 5
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
  42. 0 1
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
  43. 3 10
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
  44. 0 5
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
  45. 0 4
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
  46. 7 18
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
  47. 2 2
      ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
  48. 1 1
      ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
  49. 1 15
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
  50. 9 6
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
  51. 1 0
      ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
  52. 2 1
      ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
  53. 4 7
      ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
  54. 3 0
      ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
  55. 9 9
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
  56. 9 9
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
  57. 10 10
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
  58. 8 8
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
  59. 17 17
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
  60. 2 2
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
  61. 1 1
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
  62. 9 6
      ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
  63. 1 1
      ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
  64. 2 2
      ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
  65. 31 11
      ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
  66. 1 2
      ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
  67. 48 29
      ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
  68. 192 16
      ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
  69. 102 23
      ambari-agent/src/main/python/ambari_agent/ActionQueue.py
  70. 93 32
      ambari-agent/src/main/python/ambari_agent/Controller.py
  71. 22 8
      ambari-agent/src/main/python/ambari_agent/Grep.py
  72. 26 6
      ambari-agent/src/main/python/ambari_agent/Hardware.py
  73. 5 35
      ambari-agent/src/main/python/ambari_agent/Heartbeat.py
  74. 134 0
      ambari-agent/src/main/python/ambari_agent/LiveStatus.py
  75. 22 9
      ambari-agent/src/main/python/ambari_agent/NetUtil.py
  76. 51 0
      ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
  77. 63 4
      ambari-agent/src/main/python/ambari_agent/Register.py
  78. 24 5
      ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
  79. 8 3
      ambari-agent/src/main/python/ambari_agent/StatusCheck.py
  80. 39 38
      ambari-agent/src/main/python/ambari_agent/main.py
  81. 37 19
      ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
  82. 146 76
      ambari-agent/src/main/python/ambari_agent/puppetExecutor.py
  83. 4 4
      ambari-agent/src/main/python/ambari_agent/rolesToClass.dict
  84. 53 15
      ambari-agent/src/main/python/ambari_agent/security.py
  85. 14 4
      ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
  86. 28 26
      ambari-agent/src/main/python/ambari_agent/site.pp
  87. 8 8
      ambari-agent/src/main/python/ambari_agent/test.json
  88. 87 1
      ambari-agent/src/test/python/TestActionQueue.py
  89. 325 0
      ambari-agent/src/test/python/TestController.py
  90. 8 1
      ambari-agent/src/test/python/TestGrep.py
  91. 2 0
      ambari-agent/src/test/python/TestHardware.py
  92. 94 4
      ambari-agent/src/test/python/TestHeartbeat.py
  93. 37 0
      ambari-agent/src/test/python/TestLiveStatus.py
  94. 20 39
      ambari-agent/src/test/python/TestNetUtil.py
  95. 141 5
      ambari-agent/src/test/python/TestPuppetExecutor.py
  96. 57 0
      ambari-agent/src/test/python/TestPuppetExecutorManually.py
  97. 37 0
      ambari-agent/src/test/python/TestRegistration.py
  98. 40 0
      ambari-agent/src/test/python/dummy_puppet_output_error2.txt
  99. 76 0
      ambari-agent/src/test/python/dummy_puppet_output_error3.txt
  100. 79 0
      ambari-agent/src/test/python/examples/debug_testcase_example.py

+ 2 - 1326
CHANGES.txt

@@ -6,51 +6,13 @@ should be listed by their full name.
  - Please keep the file to a max of 80 characters wide.
  - Please keep the file to a max of 80 characters wide.
  - Put latest commits first in each section.
  - Put latest commits first in each section.
 
 
-Trunk (unreleased changes)
+  Merging AMBARI-666 to trunk.
 
 
-Changes merged from AMBARI-666
+AMBARI-666 branch (unreleased changes)
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
   NEW FEATURES
   NEW FEATURES
-  
-  AMBARI-1054. Implement retrying of bootstrap on confirm host page.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1048. Integrate slave configuration parameters with respective
-  service on step7 of installer wizard. (Jaimin Jetly via yusaku)
-
-  AMBARI-1031. Check for host registration at step3 of installer wizard 
-  and retrieve information for RAM and no. of cores. (Jaimin Jetly via
-  yusaku)
-
-  AMBARI-1022. Integrate Heatmap UI to backend API. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1015. Create HBase summary section in Dashboard & Service
-  pages. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1014. Hook service summary sections in service pages to API.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1008. Populate dashboard>MapReduce section with API data.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1006. Populate dashboard>HDFS section with API data.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1004. Allow properties entered in custom config
-  (ex: hdfs-site.xml) to override existing or create new properties.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1002. Integrate Installer with config APIs. (Jaimin Jetly
-  via yusaku)
-
-  AMBARI-989. Show task logs for each host in the Deploy step of the
-  wizard. (yusaku)
-
-  AMBARI-976.  Hook HDFS/MapReduce/HBase/Host graphs to backend API
-  (Srimanth Gunturi via yusaku)
 
 
   AMBARI-964. Implement summary page of installer wizard. (Jaimin Jetly
   AMBARI-964. Implement summary page of installer wizard. (Jaimin Jetly
   via yusaku)
   via yusaku)
@@ -431,60 +393,6 @@ Changes merged from AMBARI-666
   AMBARI-676. Seperate directory for ambari-server. (jitendra)
   AMBARI-676. Seperate directory for ambari-server. (jitendra)
 
 
   IMPROVEMENTS
   IMPROVEMENTS
-  
-  AMBARI-1053. Dashboard page loads very slow due to hosts?fields=* API call
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1051. Dashboard page takes long time to load. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1041. Additional metrics need to be added to Heatmap UI. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1040. Cluster heatmap: green should always mean "good". (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1039. Improve Nagios alerts time display. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1036. Service Info/Quick Links do not display external hostnames.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1035. Aggregate creation of multiple services and assignment of host
-  to cluster. (Jaimin Jetly via yusaku)
-
-  AMBARI-1034. Metric Charts - display local time rather than UTC.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1033. Nagios and Ganglia links should use public host names in URLs.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1030. Metrics links in web ui should link to Ganglia UI. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1025. Display total install and start services time on summary page
-  and polish summary page ui. (Jaimin Jetly via yusaku)
-
-  AMBARI-1023. Dashboard page should handle API sending JSON as strings and
-  object. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1011. Create 2 missing HDFS service graphs. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1003. Nagios sections should use backend API to populate. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1062. Convert Apache license header comment style in Handlebars files
-  to Handlebars comments rather than JavaScript comments. (yusaku)
-
-  AMBARI-1061. Data loading refactoring for cluster management. (yusaku)
-
-  AMBARI-1060. Data loading for App Browser. (yusaku)
-
-  AMBARI-993. Hook up login with server authentication. (yusaku)
-
-  AMBARI-1059. Refactor cluster management. (yusaku)
-
-  AMBARI-1058. Implement data loading. (yusaku)
 
 
   AMBARI-956. On unavailability of non-master components, host with least
   AMBARI-956. On unavailability of non-master components, host with least
   number of master components should install all slave and client components. 
   number of master components should install all slave and client components. 
@@ -525,36 +433,6 @@ Changes merged from AMBARI-666
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
- 
-  AMBARI-1046. Heatmap with no numbers on the hover. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1045. Service summary sections have incorrect values displayed.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1042. Heatmap UI fluctuates between white and green colors
-  intermittently. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1032. Host hover in cluster heatmap showing mock data. (Srimanth
-  Gunturi via yusaku)
- 
-  AMBARI-1028. MapReduce & HDFS summaries should use ServiceComponentInfo
-  values. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1017. Alerts not showing up in Ambari UI due to model refactoring.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1013. Host metrics charts should use live data. (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-1009. Cluster level graphs need to use API for data. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1064. App Browser fixes. (yusaku)
-
-  AMBARI-995. Deploy logs not shown for failed tasks. (yusaku)
-
-  AMBARI-992. Logout does not clean application state properly. (yusaku)
 
 
   AMBARI-957. Adding a host whose hostname is the same as the one the user 
   AMBARI-957. Adding a host whose hostname is the same as the one the user 
   is accessing Ambari Web with breaks the Installer. (yusaku)
   is accessing Ambari Web with breaks the Installer. (yusaku)
@@ -633,1205 +511,3 @@ Changes merged from AMBARI-666
   AMBARI-684. Remove non-required dependencies from pom files (hitesh via jitendra)
   AMBARI-684. Remove non-required dependencies from pom files (hitesh via jitendra)
 
 
   AMBARI-680. Fix pom structure. (hitesh)
   AMBARI-680. Fix pom structure. (hitesh)
-
-Pre-AMBARI-666 changes.
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-    AMBARI-766. Add privacy policy in web site. (Olivier Lamy via yusaku)
-
-    AMBARI-765. Update project site documentation to point to the 0.9
-    release. Also update the style. (yusaku) 
-
-    AMBARI-700. Add a link to the Confluence Wiki in the upper-right nav on
-    the Ambari project webpages (yusaku) 
-
-    AMBARI-679. Have Monitoring Dashboard display Puppet Agent related alerts
-    (yusaku)
-
-    AMBARI-678. Enable nagios add-on to send back information for puppet agents
-    being down (hitesh)
-  
-    AMBARI-656. Separate keytab for HTTP principal. (jitendra)
-
-    AMBARI-655. Move hbase directory creation in hdfs to namenode puppet 
-    module. (jitendra)
-
-    AMBARI-648. Keytab generation for hbase, hive. (jitendra)
-
-    AMBARI-642. Require jce policy file. (jitendra)
-
-    AMBARI-641. change the nagios status.data file location according to
-    platform (vgogate)
-
-    AMBARI-579. Support secure hadoop installation. (jitendra)
-
-    AMBARI-640. Add entrypoint to fetch info pertaining to Ganglia Clusters
-    (reznor)
-
-    AMBARI-616. Enable support for configuration of heapsize for TaskTracker
-    (hitesh)
-
-    AMBARI-636. Support for Hadoop Security (front-end changes)
-    (Jaimin Jetly via yusaku)
-
-    AMBARI-634. Force redirect to the "upgrade progress" page to prevent any
-    other actions during Hadoop stack upgrade (yusaku)
-
-    AMBARI-628. hdp-nagios and hdp-monitoring has wrong configuration file
-    location, also owner:group permissions are wrong.
-
-    AMBARI-631. Set the new Hadoop stack version in the database upon successful
-    upgrade (yusaku) 
-
-    AMBARI-623. Streamline UI flow for Hadoop stack upgrade (yusaku)
-
-    AMBARI-622. Upgrade DB only when Ambari Config table exists. (mahadev)
-
-    AMBARI-621. On Cluster Summary page, show Hadoop stack version information
-    and "Upgrade available" link if a newer version of the stack is available
-    (yusaku) 
-
-    AMBARI-612. Allow upgrading the database and add versions for ambari and
-    hadoop stacks. (mahadev)
-
-    AMBARI-605. Add UI flow/groundwork for handling Ambari / Hadoop stack version
-    upgrades (yusaku) 
-
-  IMPROVEMENTS
-    
-    AMBARI-686. Update documentation for RHEL/CentOS 6 install steps and a more
-    streamlined procedure in general (yusaku)
-
-    AMBARI-635. Add Nodes Progress: for partial failure that lets the user
-    continue, display an orange bar rather than a red bar in the progress popup
-    (yusaku)
-
-    AMBARI-620. Put common Javascript utility functions into appropriate
-    namespace/packages (yusaku)
-
-    AMBARI-615. Eliminate redundant and unused definition for the columns in the
-    table ConfigProperties (yusaku) 
-
-    AMBARI-613. Do not force Hadoop stack upgrade when a new version is available.
-    Show Hadoop stack version info (currently installed and latest available).
-    (yusaku)
-
-    AMBARI-607. Increase puppet timeouts to handle single-node installs timing
-    out (hitesh)
-
-    AMBARI-609. Modify router to handle hierarchical directory structure for
-    front-facing PHP files (yusaku)
-
-    AMBARI-606. Refactor "review and deploy" Javascript code for reuse (yusaku)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    AMBARI-782. Make RPM package builds honor Ruby's sitelibdir
-    (Jos Backus via mahadev)
-
-    AMBARI-701. Ambari does not handle a pre-setup user-supplied Hive Metastore. (hitesh)
-
-    AMBARI-690. Monitoring Dashboard - update the RPM version and documentation
-    (yusaku)
-
-    AMBARI-689. Fix ambari agent init.d scripts and the bootstrapping.
-    (mahadev)
-
-    AMBARI-688. Documentation typo: jce_policy-6.zip needs to be uploaded to
-    Ambari master, not jce_policy-6.jar (yusaku)
-
-    AMBARI-687. Monitoring Dashboard is not showing alerts for Templeton 
-    (yusaku)
-
-    AMBARI-674. Nagios uninstall should cleanup temporary directories. (jitendra)
-
-    AMBARI-673. Going back to step 3 from step 5 in UI breaks DB
-    (Jaimin Jetly via yusaku)
-
-    AMBARI-671. Mapred child java opts set to a too large of a value for 
-    heapsize. (jitendra)
-
-    AMBARI-664. Fix mapred io sort mb and heap size for map/reduce. (mahadev)
-
-    AMBARI-661. Deploy cluster fails during Hive/HCatalog test (mahadev via yusaku)
-
-    AMBARI-654. Enter the value of the service properties that are not meant to
-    be displayed on UI in ServiceConfig table (Jaimin Jetly via yusaku) 
-
-    AMBARI-602. Fix install docs for 64-bit only support and how to pre-setup
-    cluster for ambari-agent for testing (hitesh)
-
-    AMBARI-653. Log txn-id when reporting completion of nodes for puppet kicks in
-    orchestrator (hitesh)
-
-    AMBARI-652. Large number of spurious logs due to undefined variables (yusaku)
-    
-    AMBARI-651. Fix issues with the footer overlapping page content due to
-    uncleared floats (yusaku)
-
-    AMBARI-650. Issues with Reconfigure Service (yusaku)
-
-    AMBARI-647. Change back log level to INFO from DEBUG (hitesh)
-
-    AMBARI-644. Fix various markup issues (yusaku) 
-
-    AMBARI-639. Monitoring Dashboard does not show the Help Link in the top nav
-    and a link to Apache 2.0 license and NOTICE file in the footer (yusaku) 
-
-    AMBARI-638. Weirdness with Custom Config page when the user goes back to 
-    previous stages (yusaku)
-
-    AMBARI-637. Cluster Install Wizard: the 7-step nav goes out of sync if the
-    user goes back to Step 3 and moves forward (yusaku) 
-
-    AMBARI-586. Add validation checks for the HBase config services value
-    entered by the user (Jaimin Jetly via yusaku)
-
-    AMBARI-632. Ensure that mysql connector jar is available to sqoop (hitesh)
-
-    AMBARI-633. Fix invalid HTML markup on Monitoring Dashboard (yusaku)
-
-    AMBARI-630. lzo needs arch-specific hadoop-lzo-native rpm to also be installed
-    (hitesh)
-
-    AMBARI-627. AMBARI-400 messed up the hbase region server opts (ddas via hitesh)
-
-    AMBARI-629. Upgrading Hadoop stack - uninstall for upgrade should preserve
-    cluster information (yusaku) 
-
-    AMBARI-624. Make hadoop-client/templeton/pig/sqoop also use 64-bit only packages
-    (hitesh)
-
-    AMBARI-625. Fix undefined variable warning in puppet php layer (hitesh)
-
-    AMBARI-614. The database set up script has a duplicate definition of
-    AmbariConfig so install fails (yusaku) 
-
-    AMBARI-681. Stopping of hive server fails. (jitendra)
-
-    AMBARI-682. Fix log location for gc logs. (jitendra)
-
-    AMBARI-683. ooziedb.sh should be invoked after setup. (jitendra)
-
-Release 0.9.0
-
-  AMBARI-675. Make puppet generate more logs on command failures (hitesh)
-
-  AMBARI-672. Hardcoded -Xmn value for hbase causes region servers to fail to start in
-  machines with less memory (hitesh)
-
-  AMBARI-667. After reboots, namenode start fails as it thinks dfs is not formatted (hitesh)
-
-  AMBARI-668. Ambari should install yum priorities plugin on all nodes to ensure repo
-  priorities are adhered to. (hitesh)
-
-  AMBARI-618. Fix RAT warnings on docs directory (vinodkv via vikram)
-
-  AMBARI-617. Missing some files in rat-excludes (vikram via vinodkv)
-
-  AMBARI-600. Fix lzo installs to work correctly on RHEL6 (hitesh)
-
-  AMBARI-598. Set state to UNKNOWN and not 0 to avoid issues when using === matches
-  (hitesh)  
-
-  AMBARI-599. Welcome page - revive introductory message (yusaku) 
-
-  AMBARI-589. Refactor progress popup (TxnProgressWidget) Javascript code
-  (yusaku)
-
-  AMBARI-597. Remove /usr/bin/php dependency from the rpm's. (mahadev)
-
-  AMBARI-596. Remove unused images in images/ directory. (vinodkv)
-
-  AMBARI-594. Move documentation into a sub-directory. (vinodkv)
-
-  AMBARI-595. Fix NOTICE file to link to the images borrowed from Iconic. (vikram)
-
-  AMBARI-495. HMC master node not shown in topology and node assignments 
-  if the HMC server is not assigned to any other role (yusaku)
-
-  AMBARI-593. Update README file for consistency (yusaku)
-
-  AMBARI-592. Add a link to NOTICE file on every page (yusaku)
-
-  More notices added for jqgrid etc. (vikram)
-
-  AMBARI-591. License header for PHP files should use PHP comments,
-  not HTML comments (yusaku)
-
-  Added notices where we use compatibly licensed third party libraries (vikram)
-
-  Rat tool compliance on special files. (hitesh and vikram)
-
-  AMBARI-588. Externalize the manager service name and point the Help link to
-  a valid URL (yusaku)
-
-  AMBARI-587. Rat compliance patch. (vikram)
-
-  AMBARI-583. UI allows io_sort_spill_percent value to be set to over 1.0
-  (Jaimin Jetly via hitesh)
-
-  AMBARI-580. Run datanodes/tasktrackers in 64-bit mode to get around rpm issues
-  (hitesh)
-
-  AMBARI-585. Remove hardcoded dependency on mysql-connector-java package
-  version 5.0.8-1 (hitesh)
-
-  AMBARI-581. Strip carriage-return related control-chars from hosts files
-  (Jaimin Jetly via hitesh)
-
-  AMBARI-582. Update the installation guide - monitoring dashboard install
-  instructions (yusaku) 
-
-  AMBARI-569. Nagios install fails on RHEL6 due to php-pecl-json dep (hitesh)
-
-  AMBARI-546. Puppet fails to install 32-bit JDK properly on RHEL6 (hitesh)
-
-  AMBARI-548. Puppet agent install script should use correct epel repo (hitesh)
-
-  AMBARI-547. Change os type check during node bootstrap to allow for
-  CentOS6/RHEL6 nodes (hitesh)
-
-  AMBARI-549. rpm should have a dependency on php-posix (hitesh)
-
-  AMBARI-578. Custom Config page: don't allow form submission if there are
-  client-side validation errors (yusaku)
-
-  AMBARI-411. The HBase puppet templates could include some more config knobs
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-577. Document the steps to build and install monitoring RPMs for Ambari 
-              (vgogate)
-
-  AMBARI-576. In Custom config for Nagios: emails with multiple periods
-  before the '@' fails validation (Jaimin Jetly via yusaku)
- 
-  AMBARI-574. Service Configuration: tabify settings for each service
-  (yusaku) 
-
-  AMBARI-573. Puppet error: Cannot reassign variable zookeeper_hosts at
-  modules/hdp/manifests/params.pp (hitesh)
-
-  AMBARI-571. Hive Server text should be replaced with Hive Metastore
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-570. Consolidate head tags for organization and combine CSS files
-  for faster load (yusaku)
-
-  AMBARI-565. Remove YUI source files from SVN (yusaku)
-
-  AMBARI-566. Update documentation (yusaku)
-
-  AMBARI-564. Check in generated docs into svn to allow automatic updates
-  for doc web server. (hitesh)
-
-  AMBARI-559. Top nav - specify a static height for the logo (yusaku)
-
-  AMBARI-558. Update Installation Guide (yusaku)
-
-  AMBARI-556. Update version number for documentation (hitesh via yusaku)
-  
-  AMBARI-551. Missing font images (yusaku)
-
-  AMBARI-550. Add support to jump to a specified state in the wizard for
-  development purposes (yusaku)
-
-  AMBARI-538. Puppet layer assumes net-snmp* to be installed on all boxes
-  (ramya via yusaku)
-
-  AMBARI-536. Duplicate hosts not recognized due to case sensitive matching
-  (hitesh via yusaku)
-
-  AMBARI-535. On Reconfigure Service popup, enable webhdfs and enable lzo
-  check boxes are not selected when they should be (yusaku)
-
-  AMBARI-534. Duplicate package names passed in the manifest (jitendra via
-  yusaku)
-
-  AMBARI-557. Update project website documentation (yusaku via hitesh)
-
-  AMBARI-554. Update documentation to account for ambari-186 merge to trunk
-  (Yusaku via hitesh)
-
-  AMBARI-555. Update README for additional documentation to get around errors
-  related to missing php-posix module (ViVek Raghuwanshi via hitesh)
-
-  AMBARI-552. Update README to point to trunk (vinodkv via vikram)
-
-  AMBARI-543. Rpm naming needs to be corrected. (vikram via reznor)
-
-  AMBARI-541. Update README for installing/running code off
-  AMBARI-186. (vinodkv via reznor)
-
-  AMBARI-542. Rename HMC to Ambari in user-facing init.d scripts
-  (reznor via vikram)
-
-  AMBARI-540. Naming cleanup required for management console. (vikram
-  via reznor)
-
-  AMBARI-539. Create a spec file with less dependencies for
-  HMC. (hitesh via reznor)
-
-  AMBARI-537. Cleaning up references. (vikram via hitesh)
-
-  AMBARI-528. Fix oozie smoke test failure (ramya via vikram)
-
-  AMBARI-525. Javascript should not pass flag in case of post install
-  add nodes. (vikram)
-
-  AMBARI-524. Add nodes restore yum default on post install add nodes
-  page (vikram)
-
-  AMBARI-523. Need to persist yum repo for add nodes. (vikram)
-
-  AMBARI-522. Fix version of the server rpm. (mahadev via vikram)
-
-  AMBARI-521. Lzo Install with RPM's. (ramya via vikram)
-
-  AMBARI-520. Disable all debug console logging on the browser. (Yusaku Sako
-  via vikram)
-
-  AMBARI-517. Dashboard shows HDFS is down though it's still running.
-  (vgogate via vikram)
-
-  AMBARI-516. Fix epel install to be using curl calls. (mahadev via vikram)
-
-  AMBARI-515. Modules tar size increases. (jitendra via vikram)
-
-  AMBARI-514. Fix parsing error in puppet manifests. (mahadev via vikram)
-
-  AMBARI-513. Download mysql rpm for sqoop (ramya via vikram)
-
-  AMBARI-512. Fix puppet manifests for tarball downloads via
-  rpms. (mahadev via vikram)
-
-  AMBARI-511. Support rpms for mysql connector and other
-  tars. (jitendra via vikram)
-
-  AMBARI-510. Modify the router to force redirection to "Add Nodes
-  Progress" popup (Yusaku Sako via vikram)
-
-  AMBARI-508. Support Resume For Add Nodes (reznor via vikram)
-
-  AMBARI-506. Do not use epel in local yum repo installs (hitesh via vikram)
-
-  AMBARI-507. Install rpms instead of tar.gz downloads (ramya via vikram)
-
-  AMBARI-505. Messaging Update (Yusaku Sako via vikram)
-
-  AMBARI-503. Make sure epel rep is installed when installing the
-  agent. (mahadev via vikram)
-
-  AMBARI-527. Increase number of puppet retries to recover from
-  intermittent network issues. (jitendra via vikram)
-
-  AMBARI-502. X button missing from popup (Yusaku Sako via vikram)
-
-  AMBARI-501. Speed up page load/reload times (Yusaku Sako via vikram)
-
-  AMBARI-500. Fix versions in rpm spec file. (mahadev via vikram)
-
-  AMBARI-499. Add "Help" link to the top nav (Yusaku Sako via vikram)
-
-  AMBARI-498. Make service directories editable (but not
-  reconfigurable) (Yusaku Sako via vikram)
-
-  AMBARI-497. Messaging clean up (Yusaku Sako via vikram)
-
-  AMBARI-496. Ganglia graphs have labels/content that are not
-  meaningful (vgogate via vikram)
-
-  AMBARI-494. Fix node assignments not not allow slaves on
-  master. (mahadev via vikram)
-
-  AMBARI-493. Add rack_info as column in Hosts table (hitesh via vikram)
-
-  AMBARI-492. make support for os check a bit more robust (hitesh via vikram)
-
-  AMBARI-491. Service Reconfiguration screens should respect the
-  "reconfigurable" attributes set in ConfigProperties table (Yusaku Sako
-  via vikram)
-
-  AMBARI-490. Highlight the required parameters in Step 6 "Custom
-  Config" of the Cluster Init Wizard (Yusaku Sako via vikram)
-
-  AMBARI-489. Call out HMC master node in the topology and node
-  assignments (Yusaku Sako via vikram)
-
-  AMBARI-488. Manage service needs a way to recover from terminated
-  browser sessions (Yusaku Sako via vikram)
-
-  AMBARI-487. Add nodes with external name on AWS causes issues. (vikram)
-
-  AMBARI-486. Add Node installs MySQL Server for Hive (Yusaku Sako via vikram)
-
-  AMBARI-485. Make Firebug debugger work again with fileCombinator.php
-  (Yusaku Sako via vikram)
-
-  AMBARI-484. Reconfigure option for Nagios service does not work (vikram)
-
-  AMBARI-483. Start Making Responses From fileCombinator.php Suitably
-  Cacheable (reznor via vikram)
-
-  AMBARI-482. Show the same welcome page to the user if the user
-  starts configuring a cluster but has not started deploy yet (Yusaku Sako
-  via vikram)
-
-  AMBARI-481. Units of various config values not displayed on review
-  and deploy page (Yusaku Sako via vikram)
-
-  AMBARI-480. Reduce Page Load Time By Combining HMC JS Files (reznor
-  via vikram)
-
-  AMBARI-479. Add nodes after install does not allow re-bootstrap if
-  user closes browser after bootstrap and before starting services
-  (vikram)
-
-  AMBARI-477. Spec file for using installer with php-5.3 (hitesh via vikram)
-
-  AMBARI-478. Checkbox in the review and deploy page does not show
-  proper state (vikram)
-
-  AMBARI-476. Undefined offset: 0 in
-  /usr/share/hmc/php/db/HMCDBAccessor.php on line 1030 (hitesh via
-  vikram)
-
-  AMBARI-475. Add missing JS file for making post cluster install Add
-  Nodes work (Yusaku Sako via vikram)
-
-  AMBARI-473. Secondary namenode checkpoint dir doesnt get created if
-  its on the same host as NN. (mahadev via vikram)
-
-  AMBARI-471. hadoop-metrics2.properties not updated (ramya via vikram)
-
-  AMBARI-468. Post-Install Add Nodes - update progress title and
-  success/error messages to reflect what it's actually doing/has done
-  (Yusaku Sako via vikram)
-
-  AMBARI-472. Add api to find all unassigned hosts in a cluster
-  (hitesh via vikram)
-
-  AMBARI-470. Fix conflicting dashbord conf files in installer and
-  dashboard. (mahadev via vikram)
-
-  AMBARI-469. Cap DataNode heap size. (mahadev via vikram)
-
-  AMBARI-467. Fix hive stop to escape $. (mahadev via vikram)
-
-  AMBARI-446. Support Resume For Manage Services (reznor via vikram)
-  
-  AMBARI-466. Add nodes page alerts removed in case of adding
-  duplicate nodes (vikram)
-
-  AMBARI-465. Fix suggestions for Map Red Child java opts. (mahadev via vikram)
-
-  AMBARI-461. Uninstall shoud stop nagios service first before
-  stopping other services (hitesh via vikram)
-  
-  AMBARI-464. Auto refresh should be applicable to all the service
-  tabs (vgogate via vikram)
-
-  AMBARI-463. Redesign cluster management pages and navigation (Yusaku Sako
-  via vikram)
-
-  AMBARI-462. when hive is being stopped, it stops the hive nagios
-  checks (ramya via vikram)
-
-  AMBARI-460. Ganglia shows four hosts on a single node installation
-  (vgogate via vikram)
-
-  AMBARI-459. Race conditions in fetch transaction progress (vikram)
-
-  AMBARI-447. First pass at Info level logging (cleanup of
-  logging). (mahadev via vikram)
-
-  AMBARI-458. Support configuration of checkpointing-related variables
-  (hitesh via vikram)
-
-  AMBARI-457. Create template for SNN checkpoint dir (ramya via vikram)
-
-  AMBARI-456. Add more logging for ganglia (ramya via vikram)
-
-  AMBARI-455. nagios shows service status critical if hbase is not
-  installed (vgogate via vikram)
-
-  AMBARI-453. Remove the puppet kick --ping during bootstrap instead
-  use simple network ping for puppet agent. (vgogate via vikram)
-
-  AMBARI-454. Fix Hive tarball issue download for templeton. (mahadev
-  via vikram)
-
-  AMBARI-452. Create cluster should wipe out entire db (vikram)
-
-  AMBARI-451. Add nodes has incorrect check on returned status (vikram)
-
-  AMBARI-450. Boldify/Redify restart HMC message when nagios/ganglia
-  is on the hmc host (Yusaku Sako via vikram)
-
-  AMBARI-449. Post cluster install/deploy the URL
-  hmc/html/initializeCluster.php should be disabled (Yusaku Sako via
-  vikram)
-
-  AMBARI-448. Redesign progress popups. (Yusaku Sako via vikram)
-
-  AMBARI-444, 445. Nagios checks send kicks to the agent that trigger
-  a run. Configure nagios to send email notifications when slaves go
-  down (vgogate via vikram)
-
-  AMBARI-443. Nagios start fails on reinstall (ramya via vikram)
-
-  AMBARI-442 Duplicate definition:
-  Class[Hdp-hbase::Regionserver::Enable-ganglia] (ramya via vikram)
-
-  AMBARI-441. Add ganglia monitor to all the hosts including
-  collector. (mahadev via vikram)
-
-  AMBARI-440. Keep the touch file for NN format in /var/run rather
-  than /etc/conf. (mahadev via vikram)
-
-  AMBARI-439. Gmetad start fails sometimes. (ramya via vikram)
-
-  AMBARI-438. Add retry if kick fails immediately. (jitendra via vikram)
-
-  AMBARI-437. Update router to handle resuming uninstall progress and
-  uninstall failed pages (Yusaku Sako via vikram)
-
-  AMBARI-436. Support Resume For Uninstall (reznor via vikram)
-
-  AMBARI-435. Uninstall needs to update status for failure. (vikram)
-
-  AMBARI-434. fix display name in smoke test progress description
-  (hitesh via vikram)
-
-  AMBARI-433. Using service stop instead of killall for uninstall (vikram)
-
-  AMBARI-432. Templeton should not install templeton.i386 (ramya via vikram)
-
-  AMBARI-431. Fix orchestrator to use correct display names for
-  descriptions of stages (hitesh via vikram)
-
-  AMBARI-430. set service state to failed if cluster monitoring
-  reconfiguration fails (hitesh via vikram)
-
-  AMBARI-429. Fix bug with jmx parsing on HBase. (mahadev via vikram)
-
-  AMBARI-428. changes to templeton setup for 0.1.4 (ramya via vikram)
-
-  AMBARI-425. Oozie start fails with "Not managing symlink mode"
-  (ramya via vikram)
-
-  AMBARI-424. change "reconfiguremonitoring" message to a better
-  worded action (hitesh via vikram)
-
-  AMBARI-421. Ganglia uninstall does not remove libganglia or gmond
-  (Richard Pelavin via vikram)
-
-  AMBARI-423. Uninstall cluster can't be automated by Selenium due to
-  the internal confirmation window (Yusaku Sako via vikram)
-
-  AMBARI-422. Increase Threshold For Number Of Successive Backend
-  Connection Failures (reznor via vikram)
-
-  AMBARI-420. Improve style on error log popups (Yusaku Sako via vikram)
-
-  AMBARI-419. Add Basic Keyboard Action Support For HMC UI (reznor via vikram)
-
-  AMBARI-418. Remove Redundant Titles From Reconfigure Panel (reznor
-  via vikram)
-
-  AMBARI-416. Fix Inconsistent Validation Error Messages (reznor via vikram)
-
-  AMBARI-417. Typo fix in uninstall path. (vikram)
-
-  AMBARI-415. Reset service back to original state after
-  reconfiguration (hitesh via vikram)
-
-  AMBARI-414. Add rpm spec for hmc agent. (mahadev via vikram)
-
-  AMBARI-409. Uninstall does not get full list of hosts. (vikram)
-
-  AMBARI-410. Need to move the creation of cluster directory for
-  hosting the key file and the nodes file to add nodes. (vikram)
-
-  AMBARI-408. HDPJobTracker cluster in ganglia has multiple nodes
-  (vgogate via vikram)
-
-  AMBARI-426. Reinstall of cluster after failure to install results in
-  failure (ramya via vikram)
-
-  AMBARI-427. Class not found Class['hdp-zookeeper::service'] during
-  uninstall (ramya via vikram)
-
-  AMBARI-531. Remove/disable reconfigure option for Sqoop, Pig,
-  Ganglia and Templeton (Yusaku Sako via vikram)
-
-  AMBARI-529. Fix Advanced Config: HDFS reserved space is in
-  bytes. Too many bytes to count. (hitesh via vikram)
-
-  AMBARI-530. HMC UI shows undefined for nodes after single node
-  install (Yusaku Sako via vikram)
-
-  AMBARI-532. add ganglia monitor to all masters (mahadev via vikram)
-
-  AMBARI-407. add more logging and timing info for various actions
-  (hitesh via vikram)
-
-  AMBARI-406. Monitoring dashboard does not show ZK service state
-  correctly (ramya via vikram)
-
-  AMBARI-321. Multiple ZK nodes not displayed on review-page (Yusaku Sako
-  via vikram)
-
-  AMBARI-405. Clean up messages for service management reconfiguration
-  popup (Yusaku Sako via vikram)
-
-  AMBARI-404. Unify the top nav for both Monitoring and Cluster
-  Management (Yusaku Sako via vikram)
-
-  AMBARI-403. Show fixed count of total nodes during all bootstrap
-  phases (hitesh via vikram)
-
-  AMBARI-325. MR vmem config options are useless without an option to
-  enable/disable memory-monitoring (vinodkv via vikram)
-
-  AMBARI-402. Completing successful add node takes one to initialize
-  cluster page starting from scratch (reznor via vikram)
-
-  AMBARI-401. Manual config changes for nn get reset on stop/start
-  from hmc (jitendra via vikram)
-
-  AMBARI-399. Cannot uninstall - the page hangs with the spinning icon
-  (Yusaku Sako via vikram)
-
-  AMBARI-398. if hbase is not installed, nagios sends alerts for
-  "ganglia collector for hbasemaster" being down (vgogate via vikram)
-
-  AMBARI-397. Clean up descriptions and titles of mapreduce memory
-  related configs (vinodkv via vikram)
-
-  AMBARI-396. Add nodes fails in assign masters because of closure
-  issues (vikram)
-
-  AMBARI-395. Ganglia server should not run gmond. (mahadev via vikram)
-
-  AMBARI-394. Add nodes fails to find node in db (vikram)
-
-  AMBARI-393. ZooKeeper myid files not existent on ZK
-  install. (mahadev via vikram)
-
-  AMBARI-392. Add ID attributes to HTML tags to help test automation
-  (Yusaku Sako via vikram)
-
-  AMBARI-358. Make index.php always accessible, rather than
-  automatically forwarding to the action, even if there's only one
-  action that the user can take (Yusaku Sako via vikram)
-
-  AMBARI-390. Handle multiple ZooKeeper service masters in Assign
-  Masters page (Yusaku Sako via vikram)
-
-  AMBARI-389. Do not allow invalid chars for database name and user
-  name for hive (hitesh via vikram)
-
-  AMBARI-388. Prevent the user from assigning NameNode and Secondary
-  NameNode services on the same host (Yusaku Sako via vikram)
-
-  AMBARI-386. On Single Node install when install all the components
-  the recommended num for Map/Reduce Tasks is too high (hitesh via
-  vikram)
-
-  AMBARI-279. On the mount points page show info on what the mount
-  points are being used for (Yusaku Sako via vikram)
-
-  AMBARI-387. Fine tune node assignment and propagate ZK host
-  assignments to the frontend. (mahadev via vikram)
-
-  AMBARI-381. Restarting Templeton should not run PIG smoke tests
-  (hitesh via vikram)
-
-  AMBARI-384. Fix the position of the deploy error logs popup (Yusaku Sako
-  via vikram)
-
-  AMBARI-385. Namenode format should not be passed as true in the
-  manifest unless from user input. (vikram via jitendra)
-
-  AMBARI-383. Do not force the user to look at the error logs before
-  given choices to go back/continue, etc (Yusaku Sako via vikram)
-
-  AMBARI-382. Make sure install/uninstall/reinstall preserves data on
-  HDFS/ZK/others. (ramya via vikram)
-
-  AMBARI-377. Uninstall does not handle component
-  dependencies. (jitendra via vikram)
-
-  AMBARI-380. Clean up messages for Add Nodes and Deploy progress
-  result (Yusaku Sako via vikram)
-
-  AMBARI-379. Remove puppet start stub on uninstall. (mahadev via vikram)
-
-  AMBARI-378. Getting a 404 after clicking on reinstall when the
-  install fails (Yusaku Sako via vikram)
-
-  AMBARI-376. Show welcome page when no clusters are set up (Yusaku Sako via vikram)
-
-  AMBARI-374. Modify the cluster info page to show host-to-service
-  mapping for both master services and client services using
-  color-coding (Yusaku Sako via vikram)
-
-  AMBARI-372. Hive metastore nagios check is broken. (jitendra via vikram)
-
-  AMBARI-373. Create RPM fails as fonts files are not copied
-  over. (mahadev via vikram)
-
-  AMBARI-371. Mysql packages not being sent during install and
-  uninstall (jitendra via vikram)
-
-  AMBARI-369. Improve Service Management page and general popup
-  styling (Yusaku Sako via vikram)
-
-  AMBARI-367. Make users enter hive configs instead of using defaults
-  (hitesh via vikram)
-
-  AMBARI-364. Retry puppet kick --ping on bootstrap to handle
-  intermittent failures (hitesh via vikram)
-
-  AMBARI-370. Uninstall needs to clear Puppet directories (vikram)
-
-  AMBARI-368. Server restart kills puppet agent. (mahadev via vikram)
-
-  AMBARI-366. Package up the fonts/ subdirectory in the HMC RPM (reznor
-  via vikram)
-
-  AMBARI-365. Uninstall/reinstall complains about OS Type as
-  unsupported (ramya via vikram)
-
-  AMBARI-363. Nagios should monitor puppet agents (vgogate via vikram)  
-
-  AMBARI-362. Create lock file as part of rpm install (vikram)
-
-  AMBARI-361. Display client nodes as part of cluster topology display
-  (Yusaku Sako via vikram)
-
-  AMBARI-360. Adding nodes that already part of the cluster should be
-  avoidable (vinodkv via vikram)
-
-  AMBARI-286. Make TxnProgressWidget Immune To Re-Fetch Race
-  Conditions (reznor via vikram)
-
-  AMBARI-526. Display client nodes as part of cluster topology
-  display. (reznor via vikram)
-
-  AMBARI-265. Reconfig page close button (x) is not visible (vinodkv
-  via vikram)
-
-  AMBARI-357. Redesign master service assignment page so that it takes
-  up less vertical space (Yusaku Sako via vikram)
-
-  AMBARI-356. Log output to console slows puppet run. (jitendra via vikram)
-
-  AMBARI-359. invalid parameter java_needed during uninstall (ramya
-  via vikram)
-
-  AMBARI-354. hmc rpm on install must backup the database (suresh via vikram)
-
-  AMBARI-353. Passing packages in hdp class for package install
-  optimization. (jitendra via vikram)
-
-  AMBARI-344. Fix TxnProgressWidget To Not Hide Previously Pending
-  States (reznor via vikram)
-
-  AMBARI-352. Add flow control - force redirects to appropriate pages
-  based on cluster configuration status for better usability (Yusaku Sako
-  via vikram)
-
-  AMBARI-351.  Monitoring dashboard should auto refresh as regular
-  interval (vgogate via vikram)
-
-  AMBARI-349. Logging in case of error during uninstall needs to be
-  fixed. (vikram)
-
-  AMBARI-317. Select-all + unselect HBASE removes Zookeeper
-  incorrectly (vinodkv via vikram)
-
-  AMBARI-348. Select all services by default (vinodkv via vikram)
-
-  AMBARI-247. Replace index.php with clusters.php (reznor via vikram)
-
-  AMBARI-347. Redo master service assignment page (Yusaku Sako via vikram)
-
-  AMBARI-339. Making transitionToNextStage more robust (vikram)
-
-  AMBARI-345. Make TxnProgressWidget More Robust In The Face Of
-  Un-Ready Txn Stages (reznor via vikram)
-
-  AMBARI-346. user should not be allowed to change the paths to
-  various directories on the advance config page (hitesh via vikram)
-
-  AMBARI-316. Grid mount points page doesn't let one pass with only a
-  custom mount point (vinodkv via vikram)
-
-  AMBARI-343. add option to enable webhdfs (hitesh via vikram)
-
-  AMBARI-342. Reconfiguration process kicks in even when the user
-  submits without new configs changes (vinodkv via vikram)
-
-  AMBARI-341. Batch yum commands (ramya via vikram)
-
-  AMBARI-338. Cluster status update needs to happen for all stages of
-  installation wizard. (vikram)
-
-  AMBARI-330. Provide a way to resume if browser crashes/is closed
-  during the deploy-in-progress (reznor via vikram)
-
-  AMBARI-320. Reconfiguring a stopped service starts it incorrectly
-  (hitesh via vikram)
-
-  AMBARI-340. Info logs for PuppetInvoker (jitendra via vikram)
-
-  AMBARI-337. Parallelize puppet kick --ping during bootstrap (hitesh
-  via vikram)
-
-  AMBARI-335. Redundant downloads even though the artifacts are
-  already installed (ramya via vikram)
-
-  AMBARI-519. update to fix the ganglia monitor_and_server anchor
-  problem (Richard Pelavin via vikram)
-
-  AMBARI-333. Update messaging resources for initialization wizard
-  (Yusaku Sako via vikram)
-
-  AMBARI-332. Modify nav to easily switch between cluster management
-  and monitoring. (Yusaku Sako via vikram)
-
-  AMBARI-518. Junk code in manifestloader site.pp. (jitendra via
-  vikram)
-
-  AMBARI-331. Make txnUtils immune to backend race conditions (reznor
-  via vikram)
-
-  AMBARI-327. Fix syntax error in monitoring modules (ramya via vikram)
-
-  AMBARI-326. Dependencies should be added only during install phase
-  (jitendra via vikram)
-
-  AMBARI-324. Welcome page missing. (Yusaku Sako via vikram)
-
-  AMBARI-323. During any process in the cluster initialization wizard,
-  if the user goes back to the "1 Create Cluster" tab, the user is
-  stuck. (vikram)
-
-  AMBARI-319. Scale puppet master to large number of nodes. (jitendra
-  via vikram)
-
-  AMBARI-318. Do not install the packages that install init.d scripts.
-  (ramya via vikram)
-
-  AMBARI-315. reconfig a service should a list of dependent services
-  that will be restarted as a result. (vinodkv via vikram)
-
-  AMBARI-314. Uninstall Wizard prevents the user from proceeding.
-  (Yusaku Sako via vikram)
-
-  AMBARI-311. Update HBase configuration description (suresh via vikram)
-
-  AMBARI-313. Provide a DB cleanup script. (hitesh via vikram)
-
-  AMBARI-312. Uninstall's wipe flag should be correctly passed to
-  puppet. (hitesh via vikram)
-
-  AMBARI-307. Ensure recommended memory is never below 256 M. (hitesh
-  via vikram)
-
-  AMBARI-310. Externalize message resources for the welcome
-  page. Update styles on various pages. (Yusaku Sako via vikram)
-
-  AMBARI-309. Make ManageServices Show Only One Action Per Service
-  (reznor via vgogate)
-
-  AMBARI-194. Avoid TxnProgressWidget Getting Stuck In An Infinite
-  Loop (reznor via vgogate)
-
-  AMBARI-308. Externalize message resources; Update styles/messaging
-  on Uninstall Wizard and Add Nodes Wizard (Yusaku Sako via vgogate)
-
-  AMBARI-306. Ignore client components when calculating
-  memory. (hitesh via jitendra)
-
-  AMBARI-305. Combine Hive and HCat into a single service. (hitesh via
-  jitendra)
-
-  AMBARI-278. Update MapReduce parameter configuration
-  description. (suresh via jitendra)
-
-  AMBARI-276. Update HDFS parameter configuration description. (suresh
-  via jitendra)
-
-  AMBARI-304. Upgrade to yui-3.5.1. (vinodkv via jitendra)
-
-  AMBARI-302. regionservers config in the hbase only has localhost in it. 
-  (ramya via jitendra)
-
-  AMBARI-275. Remove the configuration variable fs.inmemory.size.mb.
-  (suresh via jitendra)
-
-  AMBARI-303. Cleanup testing code for uninstall failure (simulation). 
-  (vikram via jitendra)
-
-  AMBARI-301. Uninstall should not stop HMC. (vikram via jitendra)
-  
-  AMBARI-273. Fix TxnProgressWidget To Show Failed States When No
-  SubTxn Is In Progress (reznor via vgogate)
-
-  AMBARI-294. Add Nodes page - incorrect field label (Yusaku Sako via vgogate)
-
-  AMBARI-293. Invoking browser "back" action on any step after the Add
-  Hosts step in the Cluster Init Wizard launches the host discovery
-  process again (Yusaku Sako via vgogate)
-
-  AMBARI-289. Hive dependency on hcat (jitendra via vgogate)
-
-  AMBARI-288. Add description for Nagios config (vgogate)
-
-  AMBARI-400. Fixes the regionserver opts for GC (ddas)
-
-  AMBARI-287. Add link to uninstall on index page. (vikram via hitesh)
-
-  AMBARI-285. Clean up Add Hosts page. (Yusaku Sako via hitesh)
-
-  AMBARI-284. Define service groups in nagios such that users can more easily
-  enable/disable the related alerts. (vgogate via hitesh)
-
-  AMBARI-283. Fixup review and deploy rendering. (vinodkv via hitesh)
-
-  AMBARI-282. Make fetchTxnProgress post processing easier to
-  debug. (vikram via hitesh)
-
-  AMBARI-281. Uninstall should hide loading image at startup. (vikram
-  via hitesh)
-
-  AMBARI-280. Cleanup of utilities. (vikram via hitesh)
-
-  AMBARI-249. Uninstall support from UI. (vikram via hitesh)
-
-  AMBARI-277. API for getting cluster status. (vikram via hitesh)
-
-  AMBARI-274. Templeton data on hdfs needs to be readable by all users
-  (ramya via hitesh)
-
-  AMBARI-272. Remove occurrences of repo_url to support local yum repo
-  (ramya via hitesh)
-
-  AMBARI-271. Support for local yum mirror (hitesh via ramya)
- 
-  AMBARI-270. Puppet cleanup to define all the users in a common 
-  location (ramya)
-
-  AMBARI-269. Specifiy the notification intervals and options for Alerts 
-  (vgogate via ramya)
-
-  AMBARI-300. Change the status message (success/error) location so that it 
-  shows below the page summary box, rather than above, more better visibility 
-  (Yusaku Sako via ramya)
-
-  AMBARI-255. Rename/Relocate files as appropriate (reznor via ramya)
-
-  AMBARI-252. Remove Playground files from HMC (reznor via ramya)
-
-  AMBARI-266. add select/unselect all buttons to the select services page 
-  (vinodkv via ramya)
-
-  AMBARI-256. Update hive config to enable authorization (ramya)
- 
-  AMBARI-254. Parameterize zookeeper configs (ramya)
-
-  AMBARI-257. Manage services section will have any empty section when no 
-  client only components installed (vinodkv via ramya)
-
-  AMBARI-253. Support uninstall state in mysql modules (ramya)
-
-  AMBARI-258. Start/Stop service show services that are not dependent on the 
-  service being worked on (vinodkv via ramya)
-
-  AMBARI-251. Oozie link is not displayed even when Oozie is installed 
-  (vgogate via ramya)
-
-  AMBARI-298. The current stage and the next stage are shown at the same time
-  during state transition (Yusaku Sako via ramya)
-
-  AMBARI-245. Support data cleanup if installation fails (jitendra via ramya)
-
-  AMBARI-248. Add yuiCombinator.php to rpm (jitendra via ramya)
-
-  AMBARI-297. Modal dialog box for showing deploy progress looks broken
-  in Safari (Yusaku Sako via ramya)
-
-  AMBARI-244. Implement wipeoff state in puppet (ramya)
-
-  AMBARI-246. Add support for parsing yum repo files to understand how to
-  bootstrap nodes for local yum mirror (hitesh via ramya)
-
-  AMBARI-218. Install Combo-Handler On HMC Webserver To Drastically Speed Up
-  Page Load Times (reznor via ramya)
-
-  AMBARI-259. add nodes to a cluster gives an option for ganglia and dashboard,
-  these should be on by default (vinodkv via ramya)
-
-  AMBARI-262. Init Wizard: Advanced Config validation errors can be bypassed 
-  (vinodkv via ramya)
-
-  AMBARI-263. Initialization Wizard: Select Disk Mount Points allows the user 
-  to continue without any mount points selected (vinodkv via ramya) 
-
-  AMBARI-250. Cluster name validation (vikram via ramya)
-
-  AMBARI-243. Templeton setup fails due to hive download error. (ramya)
-
-  AMBARI-296. Update styles on Service Management page (Yusaku Sako via ramya)
-
-  AMBARI-264. Nagios Admin Contact should be checked to ensure it is always an 
-  email address (vinodkv via ramya)
-
-  AMBARI-242. Change code layout to ensure unit tests are not part of final
-  built rpm. (hitesh via ramya)
-
-  AMBARI-295. Improve the overall look and feel (Yusaku Sako via ramya)
-
-  AMBARI-241. Support cluster wipeout in orchestrator. (hitesh via jitendra)
-
-  AMBARI-292. HTML being spewed in the Review+Deploy page. (reznor
-  via jitendra)
-
-  AMBARI-291. Fix yui packaging in the rpm. (jitendra)
-
-  AMBARI-290. Comment in addNodesWizardInit.js. (reznor via jitendra)
-
-  AMBARI-240. Make All FE Entry Points Hide The Loading Image When
-  They're Ready To Take Input.  (reznor via jitendra)
-
-  AMBARI-197. Templatize Standard Helper Components Of Every HTML
-  Page. (reznor via jitendra)
-
-  AMBARI-229. Remove Navigation Bar (W/ Clusters Link) From
-  InstallationWizard (And Other Entry Points).  (reznor via
-  jitendra)
-
-  AMBARI-214. Make HMC Use Local YUI Copy. (reznor via jitendra)
-
-  AMBARI-239. HDFS utilization pie chart shows gray / HDFS down while
-  HDFS is up and running.  (vgogate via jitendra)
-
-  AMBARI-238. When namenode process is down info shown is not correct
-  for both HDFS and MR (vgogate)
-
-  AMBARI-237. Refactor puppet kick loop to easily change retries and timeouts.
-  (jitendra)
-
-  AMBARI-236. Increase puppet agent timeout. (jitendra via vgogate)
-
-  AMBARI-235. Ordering problem when using
-  hdp-ganglia::monitor_and_server (Richard Pelavin via vgogate)
-
-  AMBARI-234. Typo in javascript (vikram via vgogate)
-
-  AMBARI-233. Add Oozie link to HMC dashboard (vgogate)
-
-  AMBARI-232. Enable LZO should show checkbox instead of text (
-  vikram via vgogate)
-
-  AMBARI-231. Support hadoop cleanup (ramya via vgogate)
-
-  AMBARI-228. Ganglia reports on host types not present (Richard
-  Pelavin via vgogate)
-
-  AMBARI-227. Invalid parameter ensure in ganglia manifests (ramya via vgogate)
-
-  AMBARI-226. Make the daemon names and other field names consistent
-  (suresh via vgogate)
-
-  AMBARI-225. Currently we dont have any monitoring alerts setup for
-  secondary namenode (vgogate)
-
-  AMBARI-224. sequentialScriptRunner.php logging in a tight
-  loop. (jitendra via vgogate)
-
-  AMBARI-223. Add troubleshoot link in the Alerts table to provide
-  documentation for debugging/resolving the alerts (vgogate)
-
-  AMBARI-222. Remove the word alert from all the Nagios alerts
-  descriptions. (vgogate)
-
-  AMBARI-221. Service fails to set its state to failed if a component
-  fails to be acted upon (hitesh via vgogate)
-
-  AMBARI-220. Alerts table semantic difference at different levels (vgogate)
-
-  AMBARI-217. Alert table needs to display service name for context.
-  (vgogate via omalley)
-
-  AMBARI-216. Remove sleeps to speed simulations of installs. (vikram
-  via omalley)
-
-  AMBARI-215. Fix description for dfs_data_dir. (hitesh via omalley)
-
-  AMBARI-209. Node selection should ignore failed nodes. (hitesh via omalley)
-
-  AMBARI-213. Fix directory permissions so that Ganglia graphs render.
-  (ramya via omalley)
-
-  AMBARI-210. Remove link for wiping clusters from the hope page. (vikram
-  via omalley)
-
-  AMBARI-212. Fix templeton configurations. (ramya via omalley)
-
-  AMBARI-206. Fix undefined variable in orchestrator. (hitesh via omalley)
-
-  AMBARI-208. Support filtering hosts based on discovery status. (hitesh
-  via omalley)
-
-  AMBARI-207. Fix for undefined variable manifest. (jitendra via omalley)
-
-  AMBARI-204. Use the host that runs Ambari for running slaves & masters.
-  (mahadev via omalley)
-
-  AMBARI-196. Support capturing os information correctly during node 
-  discovery. (hitesh via omalley)
-
-  AMBARI-203. Fix for duplicate jdk definition (ramya via omalley)
-
-  AMBARI-202. Add check to verify jdk path after install (ramya via vgogate)
-
-  AMBARI-201. reduce db query logging (hitesh via vgogate)
-
-  AMBARI-200. External hostnames should be used for links on dashboard
-  UI (ramya via vgogate)
-
-  AMBARI-199. Remove import of mysql puppet module from manifest
-  (jitendra via vgogate)
-
-  AMBARI-198. Dependency of templeton on hcat client (jitendra via vgogate)
-
-  AMBARI-192. Check for NN safemode during restarts (ramya via vgogate)
-
-  AMBARI-191. Implement hive server stopped state (ramya via vgogate)
-
-  AMBARI-195. Fix typo in java license question (hitesh via vgogate)
-
-  AMBARI-187. Syntax error in the puppet manifest in reconfiguration (jitendra
-  via vgogate)
-
-  AMBARI-193. Track nodes that timed out for puppet kicks (hitesh via
-  vgogate)
-
-  AMBARI-190. On a Single Node install, Nagios alert do not take affect until 
-  hmc is restarted (hitesh via vgogate)
-
-  AMBARI-189. Make getAllHostsInfo api support optional params (
-  hitesh via vgogate)
-
-  AMBARI-188. Disable jdk location validation as filter apis not supported in 
-  PHP 5.1. (hitesh via vgogate)
-

+ 29 - 0
ambari-agent/.project

@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>ambari-agent</name>
+	<comment>Apache Ambari Project POM. NO_M2ECLIPSE_SUPPORT: Project files created with the maven-eclipse-plugin are not supported in M2Eclipse.</comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.rubypeople.rdt.core.rubybuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.python.pydev.PyDevBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+		<nature>org.python.pydev.pythonNature</nature>
+		<nature>org.rubypeople.rdt.core.rubynature</nature>
+	</natures>
+</projectDescription>

+ 7 - 0
ambari-agent/.pydevproject

@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?eclipse-pydev version="1.0"?>
+
+<pydev_project>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
+</pydev_project>

+ 14 - 0
ambari-agent/ambari-agent.iml

@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
+  <component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_6" inherit-compiler-output="false">
+    <output url="file://$MODULE_DIR$/target/classes" />
+    <output-test url="file://$MODULE_DIR$/target/test-classes" />
+    <exclude-output />
+    <content url="file://$MODULE_DIR$">
+      <excludeFolder url="file://$MODULE_DIR$/target" />
+    </content>
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>
+

+ 138 - 6
ambari-agent/conf/unix/ambari-agent

@@ -1,24 +1,156 @@
+#!/usr/bin/env bash
 # description: ambari-agent daemon
 # description: ambari-agent daemon
 # processname: ambari-agent
 # processname: ambari-agent
 
 
 # /etc/init.d/ambari-agent
 # /etc/init.d/ambari-agent
 
 
+export PATH=/usr/lib/ambari-server/*:$PATH
+export AMBARI_CONF_DIR=/etc/ambari-server/conf:$PATH
+
+AMBARI_AGENT=ambari-agent
+PIDFILE=/var/run/ambari-agent/$AMBARI_AGENT.pid
+LOGFILE=/var/log/ambari-agent/ambari-agent.out
+AGENT_SCRIPT=/usr/lib/python2.6/site-packages/ambari_agent/main.py
+OK=1
+NOTOK=0
+
+
+if [ -a /usr/bin/python2.6 ]; then
+  PYTHON=/usr/bin/python2.6
+fi
+
+if [ "x$PYTHON" == "x" ]; then
+  PYTHON=/usr/bin/python
+fi
+
+# Trying to read the passphrase from an environment
+if [ ! -z $AMBARI_PASSPHRASE ]; then
+  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
+fi
+
+# Reading the environment file
+if [ -a /var/lib/ambari-agent/ambari-env.sh ]; then
+  . /var/lib/ambari-agent/ambari-env.sh
+fi
+
+if [ -z $RESOLVED_AMBARI_PASSPHRASE ] &&  [ ! -z $AMBARI_PASSPHRASE ]; then
+  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
+  # If the passphrase is not defined yet, use the value from the env file
+elif [ -z $RESOLVED_AMBARI_PASSPHRASE ]; then
+  # Passphrase is not defined anywhere, set the default value
+  RESOLVED_AMBARI_PASSPHRASE="DEV"
+fi
+
+export AMBARI_PASSPHRASE=$RESOLVED_AMBARI_PASSPHRASE
+
+#echo $AMBARI_PASSPHRASE
+
+# check for version
+check_python_version ()
+{
+  echo "Verifying Python version compatibility..."
+  majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
+  minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
+  numversion=$(( 10 * $majversion + $minversion))
+  if (( $numversion < 26 )); then
+    echo "ERROR: Found Python version $majversion.$minversion. Ambari Agent requires Python version > 2.6"
+    return $NOTOK
+  fi
+  echo "Using python " $PYTHON
+  return $OK
+}
+
 case "$1" in
 case "$1" in
   start)
   start)
-        echo -e "Starting ambari-agent"
-        python /usr/lib/python2.6/site-packages/ambari_agent/main.py
+        check_python_version
+        if [ "$?" -eq "$NOTOK" ]; then
+          exit -1
+        fi
+        echo "Checking for previously running Ambari Agent..."
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            echo "$PIDFILE found with no process. Removing $PID..."
+            rm -f $PIDFILE
+          else
+            tput bold
+            echo "ERROR: $AMBARI_AGENT already running"
+            tput sgr0
+            echo "Check $PIDFILE for PID."
+            exit -1
+          fi
+        fi
+        echo "Starting ambari-agent"
+        nohup $PYTHON $AGENT_SCRIPT > $LOGFILE 2>&1 &
+        sleep 2
+        PID=$!
+        echo "Verifying $AMBARI_AGENT process status..."
+        if [ -z "`ps ax -o pid | grep $PID`" ]; then
+          echo "ERROR: $AMBARI_AGENT start failed for unknown reason"
+          exit -1
+        fi
+        tput bold
+        echo "Ambari Agent successfully started"
+        tput sgr0
+        echo "Agent PID at: $PIDFILE"
+        echo "Agent log at: $LOGFILE"
+        ;;
+  status)
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          echo "Found $AMBARI_AGENT PID: $PID"
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            echo "$AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
+          else
+            tput bold
+            echo "$AMBARI_AGENT running."
+            tput sgr0
+            echo "Agent PID at: $PIDFILE"
+            echo "Agent log at: $LOGFILE"
+          fi
+        else
+          tput bold
+          echo "$AMBARI_AGENT currently not running"
+          tput sgr0
+          echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
+        fi
         ;;
         ;;
   stop)
   stop)
-        echo -e "Stopping ambari-agent"
-        python /usr/lib/python2.6/site-packages/ambari_agent/main.py stop
+        check_python_version
+        if [ "$?" -eq "$NOTOK" ]; then
+          exit -1
+        fi
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          echo "Found $AMBARI_AGENT PID: $PID"
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            tput bold
+            echo "ERROR: $AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
+            tput sgr0
+          else
+            echo "Stopping $AMBARI_AGENT"
+            $PYTHON $AGENT_SCRIPT stop
+          fi
+          echo "Removing PID file at $PIDFILE"
+          rm -f $PIDFILE
+          tput bold
+          echo "$AMBARI_AGENT successfully stopped"
+          tput sgr0
+        else
+          tput bold
+          echo "$AMBARI_AGENT is not running. No PID found at $PIDFILE"
+          tput sgr0
+        fi
         ;;
         ;;
   restart)
   restart)
-        echo -e "Restarting ambari-agent"
+        echo -e "Restarting $AMBARI_AGENT"
         $0 stop
         $0 stop
         $0 start
         $0 start
         ;;     
         ;;     
   *)
   *)
-        echo "Usage: /usr/sbin/ambari-agent {start|stop|restart}"
+        tput bold
+        echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
+        tput sgr0
         exit 1
         exit 1
 esac
 esac
 
 

+ 32 - 0
ambari-agent/conf/unix/ambari-agent.ini

@@ -0,0 +1,32 @@
+[server]
+hostname=localhost
+url_port=4080
+secured_url_port=8443
+
+[agent]
+prefix=/var/lib/ambari-agent/data
+
+[stack]
+installprefix=/var/ambari-agent/
+
+[puppet]
+puppetmodules=/var/lib/ambari-agent/puppet
+ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370
+puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9
+facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10
+imports_file=/usr/lib/python2.6/site-packages/ambari_agent/imports.txt
+roles_to_class=/usr/lib/python2.6/site-packages/ambari_agent/rolesToClass.dict
+service_states=/usr/lib/python2.6/site-packages/ambari_agent/serviceStates.dict
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=/var/lib/ambari-agent/keys
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[services]
+serviceToPidMapFile=/usr/lib/python2.6/site-packages/ambari_agent/servicesToPidNames.dict
+pidLookupPath=/var/run/

+ 3 - 0
ambari-agent/conf/unix/ambari-env.sh

@@ -0,0 +1,3 @@
+# To change a passphrase used by the agent adjust the line below. This value is used when no passphrase is
+# given through environment variable
+AMBARI_PASSPHRASE="DEV"

+ 0 - 28
ambari-agent/conf/unix/ambari.ini

@@ -1,28 +0,0 @@
-[server]
-hostname=ambari-rpm.cybervisiontech.com.ua
-url_port=4080
-secured_url_port=8443
-
-[agent]
-prefix=/tmp/ambari-agent
-
-[stack]
-installprefix=/var/ambari/
-
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet
-puppet_home=/usr/bin/puppet
-facter_home=/usr/bin/facter
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/var/lib/ambari-agent/keys
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[services]
-serviceToPidMapFile=/usr/lib/python2.6/site-packages/ambari_agent/servicesToPidNames.dict
-pidLookupPath=/var/run/

+ 119 - 10
ambari-agent/pom.xml

@@ -33,11 +33,30 @@
     <final.name>${project.artifactId}-${project.version}</final.name>
     <final.name>${project.artifactId}-${project.version}</final.name>
     <package.release>1</package.release>
     <package.release>1</package.release>
     <package.prefix>/usr</package.prefix>
     <package.prefix>/usr</package.prefix>
-    <package.conf.dir>/etc/ambari</package.conf.dir>
-    <package.log.dir>/var/log/ambari</package.log.dir>
-    <package.pid.dir>/var/run/ambari</package.pid.dir>
+    <package.conf.dir>/etc/ambari-agent</package.conf.dir>
+    <package.log.dir>/var/log/ambari-agent</package.log.dir>
+    <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
     <skipTests>false</skipTests>
     <skipTests>false</skipTests>
+    <facter.tar>http://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gz</facter.tar>
+    <puppet.tar>http://www.puppetlabs.com/downloads/puppet/puppet-2.7.9.tar.gz</puppet.tar>
+    <install.dir>/usr/lib/python2.6/site-packages/ambari_agent</install.dir>
+    <ruby.tar>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz</ruby.tar>
+    <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
   </properties>
   </properties>
+  <profiles>
+    <profile>
+      <id>suse11</id>
+      <properties>
+        <ruby.tar>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11/ruby-1.8.7-p370.tar.gz</ruby.tar>
+      </properties>
+    </profile>
+    <profile>
+      <id>centos5</id>
+      <properties>
+        <ruby.tar>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5/ruby-1.8.7-p370.tar.gz</ruby.tar>
+      </properties>
+    </profile>
+  </profiles>
   <build>
   <build>
     <plugins>
     <plugins>
       <plugin>
       <plugin>
@@ -119,19 +138,42 @@
           <group>Development</group>
           <group>Development</group>
           <description>Maven Recipe: RPM Package.</description>
           <description>Maven Recipe: RPM Package.</description>
           <requires>
           <requires>
-            <require>puppet = 2.7.9</require>
+            <require>openssl</require>
+            <require>zlib</require>
+            <require>${python.ver}</require>
           </requires>
           </requires>
+           <postinstallScriptlet>
+            <scriptFile>src/main/package/rpm/postinstall.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </postinstallScriptlet>
+          <preinstallScriptlet>
+            <scriptFile>src/main/package/rpm/preinstall.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </preinstallScriptlet>
+          <needarch>x86_64</needarch>
+          <autoRequires>false</autoRequires>
           <mappings>
           <mappings>
             <mapping>
             <mapping>
-              <directory>/usr/lib/python2.6/site-packages/ambari_agent</directory>
+              <directory>${install.dir}</directory>
               <sources>
               <sources>
                 <source>
                 <source>
                   <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
                   <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
                 </source>
                 </source>
               </sources>
               </sources>
             </mapping>
             </mapping>
+            <mapping>
+              <directory>${lib.dir}</directory>
+              <sources>
+                <source>
+                  <location>${project.build.directory}/lib</location>
+                </source>
+              </sources>
+            </mapping>
             <mapping>
             <mapping>
               <directory>/var/lib/${project.artifactId}/puppet</directory>
               <directory>/var/lib/${project.artifactId}/puppet</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
               <sources>
               <sources>
                 <source>
                 <source>
                   <location>src/main/puppet</location>
                   <location>src/main/puppet</location>
@@ -139,16 +181,21 @@
               </sources>
               </sources>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
-              <directory>/etc/ambari</directory>
+              <directory>/etc/ambari-agent</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
               <sources>
               <sources>
                 <source>
                 <source>
-                  <location>conf/unix/ambari.ini</location>
+                  <location>conf/unix/ambari-agent.ini</location>
                 </source>
                 </source>
               </sources>
               </sources>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
               <directory>/usr/sbin</directory>
               <directory>/usr/sbin</directory>
               <filemode>744</filemode>
               <filemode>744</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
               <sources>
               <sources>
                 <source>
                 <source>
                   <location>conf/unix/ambari-agent</location>
                   <location>conf/unix/ambari-agent</location>
@@ -156,21 +203,83 @@
               </sources>
               </sources>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
-              <directory>/var/run/ambari</directory>
+              <directory>/var/lib/ambari-agent</directory>
+              <filemode>700</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-env.sh</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/run/ambari-agent</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/data</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
               <directory>/var/lib/${project.artifactId}/keys</directory>
               <directory>/var/lib/${project.artifactId}/keys</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
-              <directory>/var/log/ambari</directory>
+              <directory>/var/log/ambari-agent</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
             </mapping>
             </mapping>
             <mapping>
             <mapping>
-              <directory>/var/ambari</directory>
+              <directory>/var/ambari-agent</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
             </mapping>
             </mapping>
             <!-- -->
             <!-- -->
           </mappings>
           </mappings>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>com.github.goldin</groupId>
+        <artifactId>copy-maven-plugin</artifactId>
+        <version>0.2.5</version>
+        <executions>
+          <execution>
+            <id>create-archive</id>
+            <phase>package</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <resources>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${ruby.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${facter.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${puppet.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
     </plugins>
     <extensions>
     <extensions>
       <extension>
       <extension>

+ 1 - 0
ambari-agent/src/main/package/rpm/postinstall.sh

@@ -0,0 +1 @@
+chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*

+ 2 - 0
ambari-agent/src/main/package/rpm/preinstall.sh

@@ -0,0 +1,2 @@
+getent group puppet >/dev/null || groupadd -r puppet
+getent passwd puppet >/dev/null || /usr/sbin/useradd -g puppet puppet

+ 2 - 1
ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp

@@ -44,7 +44,8 @@
 #
 #
 
 
 define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration) {
 define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration) {
-  $configcontent = inline_template('<configuration>
+  $configcontent = inline_template('<!--<%=Time.now.asctime %>-->
+  <configuration>
   <% configuration.each do |key,value| -%>
   <% configuration.each do |key,value| -%>
   <property>
   <property>
     <name><%=key %></name>
     <name><%=key %></name>

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb

@@ -80,7 +80,7 @@
       ],
       ],
       "TEMPLETON" : [
       "TEMPLETON" : [
         {
         {
-          "installed": <%=not scope.function_hdp_no_hosts("public_templeton_server_host")%>,
+          "installed": <%=not scope.function_hdp_no_hosts("public_webhcat_server_host")%>,
           "name": "TEMPLETON"
           "name": "TEMPLETON"
         }
         }
       ],
       ],

+ 138 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py

@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+#import cgitb
+import os
+import rrdtool
+import sys
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+#cgitb.enable()
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+   args.extend(["-s", start])
+
+  if end is not None:
+   args.extend(["-e", end])
+
+  if resolution is not None:
+   args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+
+  time = rrdMetric[0][0]
+  step = rrdMetric[0][2]
+
+  sys.stdout.write("  {\n    \"ds_name\":\"" + rrdMetric[1][0] +\
+                   "\",\n    \"cluster_name\":\"" + clusterName +\
+                   "\",\n    \"host_name\":\"" + hostName +\
+                   "\",\n    \"metric_name\":\"" + metricName + "\",\n")
+
+  firstDP = True
+  sys.stdout.write("    \"datapoints\":[\n")
+  for tuple in rrdMetric[2]:
+    if tuple[0] is not None:
+      if not firstDP:
+        sys.stdout.write(",\n")
+      firstDP = False
+      sys.stdout.write("      [")
+      sys.stdout.write(str(tuple[0]))
+      sys.stdout.write(",")
+      sys.stdout.write(str(time))
+      sys.stdout.write("]")
+    time = time + step
+  sys.stdout.write("\n    ]\n  }")
+  return
+
+def stripList(l):
+  return([x.strip() for x in l])
+
+sys.stdout.write("Content-type: application/json\n\n")
+
+queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+sys.stdout.write("[\n")
+
+firstMetric = True
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+for cluster in clusterParts:
+  for path, dirs, files in os.walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    if len(hostParts) == 0 or pathParts[-1] in hostParts:
+      for file in files:
+        for metric in metricParts:
+          if file.endswith(metric + ".rrd"):
+            if not firstMetric:
+              sys.stdout.write(",\n")
+
+            printMetric(pathParts[-2], pathParts[-1], file[:-4], os.path.join(path, file), cf, start, end, resolution)
+
+            firstMetric = False
+
+sys.stdout.write("\n]\n")
+sys.stdout.flush

+ 26 - 16
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp

@@ -45,9 +45,9 @@ class hdp-ganglia::monitor(
       class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
       class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
     }
     }
 
 
-#    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
-#     class { 'hdp-hadoop::enable-ganglia': }
-#   }
+    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
+     class { 'hdp-hadoop::enable-ganglia': }
+   }
 
 
     if ($service_exists['hdp-hbase::master'] == true) {
     if ($service_exists['hdp-hbase::master'] == true) {
       class { 'hdp-hbase::master::enable-ganglia': }
       class { 'hdp-hbase::master::enable-ganglia': }
@@ -76,18 +76,29 @@ class hdp-ganglia::monitor::config-gen()
 
 
   $service_exists = $hdp::params::service_exists
   $service_exists = $hdp::params::service_exists
 
 
-  if ($service_exists['hdp-hadoop::namenode'] == true) {
-    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
-  }
-  if ($service_exists['hdp-hadoop::jobtracker'] == true){
-    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
-  }
-  if ($service_exists['hdp-hbase::master'] == true) {
-    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
-  }
-  if ($service_exists['hdp-hadoop::datanode'] == true) {
-    hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
-  }
+   #FIXME currently hacking this to make it work
+
+#  if ($service_exists['hdp-hadoop::namenode'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
+#  }
+#  if ($service_exists['hdp-hadoop::jobtracker'] == true){
+#    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
+#  }
+#  if ($service_exists['hdp-hbase::master'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+#  }
+#  if ($service_exists['hdp-hadoop::datanode'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
+#  }
+
+  # FIXME
+  # this will be enable gmond for all clusters on the node
+  # should be selective based on roles present
+  hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
+  hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
+  hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+  hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
+
   Hdp-ganglia::Config::Generate_monitor<||>{
   Hdp-ganglia::Config::Generate_monitor<||>{
     ganglia_service => 'gmond',
     ganglia_service => 'gmond',
     role => 'monitor'
     role => 'monitor'
@@ -108,7 +119,6 @@ class hdp-ganglia::monitor::gmond(
   if ($ensure == 'running' or $ensure == 'stopped') {
   if ($ensure == 'running' or $ensure == 'stopped') {
     hdp::exec { "hdp-gmond service" :
     hdp::exec { "hdp-gmond service" :
       command => "$command",
       command => "$command",
-      unless => "/bin/ps auwx | /bin/grep [g]mond",
       path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
       path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
     }
     }
   }
   }

+ 43 - 3
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -32,6 +32,10 @@ class hdp-ganglia::server(
       ensure => 'uninstalled'
       ensure => 'uninstalled'
    }
    }
 
 
+   class { 'hdp-ganglia::server::files':
+      ensure => 'absent'
+   }
+
   } else {
   } else {
   class { 'hdp-ganglia':
   class { 'hdp-ganglia':
     service_state => $service_state
     service_state => $service_state
@@ -56,10 +60,23 @@ class hdp-ganglia::server(
   class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
   class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
 
 
   class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
   class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
+  
+  if ($service_state == 'installed_and_configured') {
+    $webserver_state = 'restart'
+  } else {
+    $webserver_state = $service_state
+  }
+
+  class { 'hdp-monitor-webserver': service_state => $webserver_state}
+
+   class { 'hdp-ganglia::server::files':
+      ensure => 'present'
+   }
 
 
   #top level does not need anchors
   #top level does not need anchors
-  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
-    Hdp-ganglia::Config::Generate_server<||> -> Class['hdp-ganglia::server::gmetad'] -> Class['hdp-ganglia::service::change_permission']
+  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] ->
+ Hdp-ganglia::Config::Generate_server<||> ->
+ Class['hdp-ganglia::server::gmetad'] -> Class['hdp-ganglia::service::change_permission'] -> Class['hdp-ganglia::server::files'] -> Class['hdp-monitor-webserver']
  }
  }
 }
 }
 
 
@@ -71,6 +88,30 @@ class hdp-ganglia::server::packages(
     ensure      => $ensure,
     ensure      => $ensure,
     java_needed => false  
     java_needed => false  
   } 
   } 
+
+  hdp::package { ['rrdtool-python']: 
+    ensure      => $ensure,
+    java_needed => false  
+  } 
+}
+
+class hdp-ganglia::server::files(
+  $ensure = present 
+)
+{
+
+
+  $rrd_py_path = $hdp::params::rrd_py_path
+  hdp::directory_recursive_create{$rrd_py_path:
+    ensure => "directory"  
+  }
+
+  file{'/var/www/cgi-bin/rrd.py' : 
+    ensure => $ensure,
+    source => "puppet:///modules/hdp-ganglia/rrd.py",
+    mode   => '0755',
+    require => Hdp::Directory_recursive_create[$rrd_py_path]
+  }
 }
 }
 
 
 
 
@@ -97,7 +138,6 @@ class hdp-ganglia::server::gmetad(
   if ($ensure == 'running' or $ensure == 'stopped') {
   if ($ensure == 'running' or $ensure == 'stopped') {
     hdp::exec { "hdp-gmetad service" :
     hdp::exec { "hdp-gmetad service" :
       command => "$command",
       command => "$command",
-      unless => "/bin/ps auwx | /bin/grep [g]metad",
       path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
       path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
     }
     }
   }
   }

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp

@@ -91,7 +91,7 @@ class hdp-hadoop::datanode(
 define hdp-hadoop::datanode::create_data_dirs($service_state)
 define hdp-hadoop::datanode::create_data_dirs($service_state)
 {
 {
   $dirs = hdp_array_from_comma_list($name)
   $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create { $dirs :
+  hdp::directory_recursive_create_ignore_failure { $dirs :
     owner => $hdp-hadoop::params::hdfs_user,
     owner => $hdp-hadoop::params::hdfs_user,
     mode => '0750',
     mode => '0750',
     service_state => $service_state,
     service_state => $service_state,

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hdfs::decommission(
+) inherits hdp-hadoop::params
+{
+  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
+    hdp_fail("There is no path to exclude file in configuration!")
+  }
+
+  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
+
+  hdp::exec{"hadoop dfsadmin -refreshNodes":
+      command => "hadoop dfsadmin -refreshNodes",
+      user => $hdp::params::hdfs_user,
+      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
+    }
+  
+}

+ 42 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp

@@ -0,0 +1,42 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+define hdp-hadoop::hdfs::generate_exclude_file()
+{
+  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
+  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
+  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
+  if (hdp_is_empty($configuration) == false and
+    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
+    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
+    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
+    ##Create file with list of excluding hosts
+    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
+    file { $exlude_file_path :
+      ensure => file,
+      content => template('hdp-hadoop/exclude_hosts_list.erb')
+    }
+  }
+}
+
+
+
+

+ 14 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -96,6 +96,20 @@ debug('##Configs generation for hdp-hadoop')
       configuration => $configuration['hdfs-site']
       configuration => $configuration['hdfs-site']
     }
     }
   }
   }
+
+  if has_key($configuration, 'hdfs-exclude-file') {
+    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
+  }
+
+  hdp::package {'ambari-log4j':
+    package_type  => 'ambari-log4j'
+  }
+
+  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
+    ensure => 'link',
+    target => '/usr/lib/hadoop/hadoop-tools.jar',
+    mode => 755,
+  }
 }
 }
 
 
 class hdp-hadoop(
 class hdp-hadoop(

+ 5 - 5
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -161,24 +161,24 @@ define hdp-hadoop::namenode::create_app_directories($service_state)
 
 
     if ($hdp::params::oozie_server != "") {
     if ($hdp::params::oozie_server != "") {
       $oozie_user = $hdp::params::oozie_user
       $oozie_user = $hdp::params::oozie_user
-      hdp-hadoop::hdfs::directory{ '/user/oozie':
+      hdp-hadoop::hdfs::directory{ "/user/${oozie_user}":
         service_state => $service_state,
         service_state => $service_state,
         owner => $oozie_user,
         owner => $oozie_user,
-        mode  => '770',
+        mode  => '775',
         recursive_chmod => true
         recursive_chmod => true
       }
       }
     }
     }
     
     
-    if ($hdp::params::templeton_server_host != "") {
+    if ($hdp::params::webhcat_server_host != "") {
       $templeton_user = $hdp::params::templeton_user
       $templeton_user = $hdp::params::templeton_user
-      hdp-hadoop::hdfs::directory{ '/user/templeton':
+      hdp-hadoop::hdfs::directory{ '/user/hcat':
         service_state => $service_state,
         service_state => $service_state,
         owner => $templeton_user,
         owner => $templeton_user,
         mode  => '755',
         mode  => '755',
         recursive_chmod => true
         recursive_chmod => true
       }
       }
 
 
-      hdp-hadoop::hdfs::directory{ '/apps/templeton':
+      hdp-hadoop::hdfs::directory{ '/apps/webhcat':
         service_state => $service_state,
         service_state => $service_state,
         owner => $templeton_user,
         owner => $templeton_user,
         mode  => '755',
         mode  => '755',

+ 8 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -169,4 +169,12 @@ class hdp-hadoop::params(
   $security_enabled = $hdp::params::security_enabled
   $security_enabled = $hdp::params::security_enabled
 
 
   $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
   $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
+
+  $rca_enabled = hdp_default("rca_enabled", false)
+  if ($rca_enabled == true or $rca_enabled == "true") {
+    $rca_prefix = ""
+  } else {
+    $rca_prefix = "###"
+  }
+  $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
 }
 }

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp

@@ -84,7 +84,7 @@ define hdp-hadoop::tasktracker::create_local_dirs($service_state)
 {
 {
   if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
   if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
     $dirs = hdp_array_from_comma_list($name)
     $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create { $dirs :
+    hdp::directory_recursive_create_ignore_failure { $dirs :
       owner => $hdp-hadoop::params::mapred_user,
       owner => $hdp-hadoop::params::mapred_user,
       mode => '0755',
       mode => '0755',
       service_state => $service_state,
       service_state => $service_state,

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb

@@ -0,0 +1,3 @@
+<% exlude_hosts_list.each do |val| -%>
+<%= val%>
+<% end -%>

+ 16 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb

@@ -169,3 +169,19 @@ log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
 log4j.appender.JSA.DatePattern=.yyyy-MM-dd
 log4j.appender.JSA.DatePattern=.yyyy-MM-dd
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=jdbc:postgresql://<%=scope.function_hdp_host("ambari_db_server_host")%>:5432/ambarirca
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=org.postgresql.Driver
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=mapred
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=mapred
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
+
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
+
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

+ 23 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql

@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

+ 31 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh

@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

+ 22 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh

@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+/usr/lib/hive/bin/hiveserver2 > $1 2> $2 &
+echo $!|cat>$3

+ 22 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh

@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

+ 39 - 17
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp

@@ -18,15 +18,39 @@
 # under the License.
 # under the License.
 #
 #
 #
 #
-class hdp-hive::hive::service_check() 
+class hdp-hive::hive::service_check() inherits hdp-hive::params
 {
 {
-  $unique = hdp_unique_id_and_date()
   $smoke_test_user = $hdp::params::smokeuser
   $smoke_test_user = $hdp::params::smokeuser
-  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
+  $smoke_test_sql = "/tmp/$smoke_test_sql_file"
+  $smoke_test_path = "/tmp/$smoke_test_script"
+
+
+  $smoke_cmd = "env JAVA_HOME=$hdp::params::java64_home $smoke_test_path $hive_url $smoke_test_sql"
+
+
+  file { $smoke_test_path:
+    ensure => present,
+    source => "puppet:///modules/hdp-hive/$smoke_test_script",
+    mode => '0755',
+  }
 
 
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hive::hive::service_check::begin':}
+  file { $smoke_test_sql:
+    ensure => present,
+    source => "puppet:///modules/hdp-hive/$smoke_test_sql_file"
+  }
+
+  exec { $smoke_test_path:
+    command   => $smoke_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    logoutput => "true",
+    user => $smoke_test_user
+  }
+
+  $unique = hdp_unique_id_and_date()
+  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
+  $test_cmd = "fs -test -e ${output_file}"
 
 
   file { '/tmp/hiveSmoke.sh':
   file { '/tmp/hiveSmoke.sh':
     ensure => present,
     ensure => present,
@@ -35,21 +59,19 @@ class hdp-hive::hive::service_check()
   }
   }
 
 
   exec { '/tmp/hiveSmoke.sh':
   exec { '/tmp/hiveSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
-    tries     => 3,
+    command => "su - ${smoke_test_user} -c 'env JAVA_HOME=$hdp::params::java64_home sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
+    tries => 3,
     try_sleep => 5,
     try_sleep => 5,
-    require   => File['/tmp/hiveSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
+    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
     logoutput => "true"
     logoutput => "true"
   }
   }
 
 
   hdp-hadoop::exec-hadoop { 'hive::service_check::test':
   hdp-hadoop::exec-hadoop { 'hive::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hiveSmoke.sh'],
-    before      => Anchor['hdp-hive::hive::service_check::end'] 
+    command => $test_cmd,
+    refreshonly => true
   }
   }
-  
-  anchor{ 'hdp-hive::hive::service_check::end':}
+
+  File[$smoke_test_path] -> File[$smoke_test_sql] -> Exec[$smoke_test_path] -> File['/tmp/hiveSmoke.sh'] -> Exec['/tmp/hiveSmoke.sh'] -> Hdp-Hadoop::Exec-Hadoop['hive::service_check::test']
+
 }
 }

+ 61 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp

@@ -0,0 +1,61 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::metastore(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-hive::params
+{ 
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+
+    $hdp::params::service_exists['hdp-hive::server'] = true
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'hive_server_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/hive.service.keytab",
+        keytabfile => 'hive.service.keytab',
+        owner => $hdp-hive::params::hive_user
+      }
+    }
+
+    #installs package, creates user, sets configuration
+    class{ 'hdp-hive' : 
+      service_state => $service_state,
+      server        => true
+    } 
+  
+    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
+
+    class { 'hdp-hive::service' :
+      ensure => $service_state,
+      service_type => "metastore"
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hive'] -> Class['hdp-hive::service']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp

@@ -27,7 +27,7 @@ class hdp-hive::mysql-connector()
   
   
   anchor { 'hdp-hive::mysql-connector::begin':}
   anchor { 'hdp-hive::mysql-connector::begin':}
 
 
-   hdp::package { 'mysql-connector' :
+   hdp::package { 'mysql-connector-java' :
      require   => Anchor['hdp-hive::mysql-connector::begin']
      require   => Anchor['hdp-hive::mysql-connector::begin']
    }
    }
 
 
@@ -36,7 +36,7 @@ class hdp-hive::mysql-connector()
        unless  => "test -f ${target}",
        unless  => "test -f ${target}",
        creates => $target,
        creates => $target,
        path    => ["/bin","/usr/bin/"],
        path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['mysql-connector'],
+       require => Hdp::Package['mysql-connector-java'],
        notify  =>  Anchor['hdp-hive::mysql-connector::end'],
        notify  =>  Anchor['hdp-hive::mysql-connector::end'],
    }
    }
 
 

+ 15 - 1
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp

@@ -31,6 +31,8 @@ class hdp-hive::params() inherits hdp::params
   ### common
   ### common
   $hive_metastore_port = hdp_default("hive_metastore_port",9083)
   $hive_metastore_port = hdp_default("hive_metastore_port",9083)
   $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
   $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
+  $hive_var_lib = hdp_default("hive_var_lib","/var/lib/hive")  
+  $hive_url = 'jdbc:hive2://localhost:10000'
 
 
   ### hive-env
   ### hive-env
   $hive_conf_dir = $hdp::params::hive_conf_dir
   $hive_conf_dir = $hdp::params::hive_conf_dir
@@ -40,6 +42,8 @@ class hdp-hive::params() inherits hdp::params
   $hive_log_dir = hdp_default("hadoop/hive-env/hive_log_dir","/var/log/hive")
   $hive_log_dir = hdp_default("hadoop/hive-env/hive_log_dir","/var/log/hive")
 
 
   $hive_pid_dir = hdp_default("hadoop/hive-env/hive_pid_dir","/var/run/hive")
   $hive_pid_dir = hdp_default("hadoop/hive-env/hive_pid_dir","/var/run/hive")
+  $hive_pid = hdp_default("hadoop/hive-env/hive_pid","hive-server.pid")
+
   
   
   ### hive-site
   ### hive-site
   $hive_database_name = hdp_default("hadoop/hive-site/hive_database_name","hive")
   $hive_database_name = hdp_default("hadoop/hive-site/hive_database_name","hive")
@@ -58,5 +62,15 @@ class hdp-hive::params() inherits hdp::params
   ###mysql connector
   ###mysql connector
   $download_url = $hdp::params::gpl_artifacts_download_url
   $download_url = $hdp::params::gpl_artifacts_download_url
   $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
   $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
-  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-0.4.0.14.jar")
+  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar")
+
+  ##smoke test
+  $smoke_test_sql_file = 'hiveserver2.sql'
+  $smoke_test_script = 'hiveserver2Smoke.sh'
+
+  ##Starting hiveserver2
+  $start_hiveserver2_script = 'startHiveserver2.sh'
+
+  ##Starting metastore
+  $start_metastore_script = 'startMetastore.sh'
 }
 }

+ 2 - 1
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp

@@ -49,7 +49,8 @@ class hdp-hive::server(
     Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
     Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
 
 
     class { 'hdp-hive::service' :
     class { 'hdp-hive::service' :
-      ensure => $service_state
+      ensure => $service_state,
+      service_type => "hiveserver2"
     }
     }
   
   
     #top level does not need anchors
     #top level does not need anchors

+ 47 - 11
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp

@@ -19,7 +19,8 @@
 #
 #
 #
 #
 class hdp-hive::service(
 class hdp-hive::service(
-  $ensure
+  $ensure,
+  $service_type
 )
 )
 {
 {
   include $hdp-hive::params
   include $hdp-hive::params
@@ -27,45 +28,80 @@ class hdp-hive::service(
   $user = $hdp-hive::params::hive_user
   $user = $hdp-hive::params::hive_user
   $hadoop_home = $hdp::params::hadoop_home
   $hadoop_home = $hdp::params::hadoop_home
   $hive_log_dir = $hdp-hive::params::hive_log_dir
   $hive_log_dir = $hdp-hive::params::hive_log_dir
-  $cmd = "env HADOOP_HOME=${hadoop_home} nohup hive --service metastore > ${hive_log_dir}/hive.out 2> ${hive_log_dir}/hive.log &"
-  $pid_file = "${hdp-hive::params::hive_pid_dir}/hive.pid" 
+
+  $start_hiveserver2_path = "/tmp/$start_hiveserver2_script"
+  $start_metastore_path = "/tmp/$start_metastore_script"
+
+  if ($service_type == 'metastore') {
+
+    $pid_file = "${hdp-hive::params::hive_pid_dir}/hive.pid" 
+    $cmd = "env HADOOP_HOME=${hadoop_home} JAVA_HOME=$hdp::params::java64_home $start_metastore_path ${hive_log_dir}/hive.out ${hive_log_dir}/hive.log $pid_file"
+    
+  } elsif ($service_type == 'hiveserver2') {
+    $pid_file = "$hive_pid_dir/$hive_pid" 
+    $cmd = "env JAVA_HOME=$hdp::params::java64_home $start_hiveserver2_path ${hive_log_dir}/hive-server2.out  ${hive_log_dir}/hive-server2.log $pid_file"
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_type}")
+  }
+
+
+  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
 
 
   if ($ensure == 'running') {
   if ($ensure == 'running') {
     $daemon_cmd = "su - ${user} -c  '${cmd} '"
     $daemon_cmd = "su - ${user} -c  '${cmd} '"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
   } elsif ($ensure == 'stopped') {
   } elsif ($ensure == 'stopped') {
-    #TODO: this needs to be fixed
-    $daemon_cmd = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \" | awk '{print \$2}' | xargs kill >/dev/null 2>&1"
-    $no_op_test = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \""
+    $daemon_cmd = "kill `cat $pid_file` >/dev/null 2>&1"
   } else {
   } else {
     $daemon_cmd = undef
     $daemon_cmd = undef
   }
   }
 
 
-  hdp-hive::service::directory { $hdp-hive::params::hive_pid_dir : }
-  hdp-hive::service::directory { $hdp-hive::params::hive_log_dir : }
+  hdp-hive::service::directory { $hive_pid_dir : }
+  hdp-hive::service::directory { $hive_log_dir : }
+  hdp-hive::service::directory { $hive_var_lib : }
+
+  file { $start_hiveserver2_path:
+    ensure => present,
+    source => "puppet:///modules/hdp-hive/$start_hiveserver2_script",
+    mode => '0755',
+  }
+
+  file { $start_metastore_path:
+    ensure => present,
+    source => "puppet:///modules/hdp-hive/$start_metastore_script",
+    mode => '0755',
+  }
 
 
   anchor{'hdp-hive::service::begin':} -> Hdp-hive::Service::Directory<||> -> anchor{'hdp-hive::service::end':}
   anchor{'hdp-hive::service::begin':} -> Hdp-hive::Service::Directory<||> -> anchor{'hdp-hive::service::end':}
   
   
   if ($daemon_cmd != undef) {
   if ($daemon_cmd != undef) {
     if ($ensure == 'running') {
     if ($ensure == 'running') {
+
+      $pid_file_state = 'present'
       hdp::exec { $daemon_cmd:
       hdp::exec { $daemon_cmd:
         command => $daemon_cmd,
         command => $daemon_cmd,
         unless  => $no_op_test
         unless  => $no_op_test
       }
       }
     } elsif ($ensure == 'stopped') {
     } elsif ($ensure == 'stopped') {
+      $pid_file_state = 'absent'
       hdp::exec { $daemon_cmd:
       hdp::exec { $daemon_cmd:
         command => $daemon_cmd,
         command => $daemon_cmd,
         onlyif  => $no_op_test
         onlyif  => $no_op_test
       }
       }
     }
     }
-    Hdp-hive::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-hive::service::end']
+
+
+  file { $pid_file:
+    ensure => $pid_file_state
+  }
+
+    Hdp-hive::Service::Directory<||> File[ $start_metastore_path]-> File[ $start_hiveserver2_path]-> Hdp::Exec[$daemon_cmd] -> File[$pid_file] -> Anchor['hdp-hive::service::end']
   }
   }
 }
 }
 
 
 define hdp-hive::service::directory()
 define hdp-hive::service::directory()
 {
 {
   hdp::directory_recursive_create { $name: 
   hdp::directory_recursive_create { $name: 
-    owner => $hdp-hive::params::hive_user,
+    owner => $hive_user,
     mode => '0755',
     mode => '0755',
     service_state => $ensure,
     service_state => $ensure,
     force => true
     force => true

+ 48 - 6
ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp

@@ -23,18 +23,60 @@ class hdp-monitor-webserver(
   $opts = {}
   $opts = {}
 ) inherits hdp::params 
 ) inherits hdp::params 
 {
 {
-  #TODO: does not install apache package
+
+  
+  if hdp_is_empty($hdp::params::services_names[httpd]) {
+      hdp_fail("There is no service name for service httpd")
+    }
+    else {
+      $service_name_by_os = $hdp::params::services_names[httpd]
+    }
+
+    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
+      
+      if hdp_is_empty($service_name_by_os['ALL']) {
+        hdp_fail("There is no service name for service httpd")
+      }
+      else {
+        $service_name = $service_name_by_os['ALL']
+      }
+    }
+    else {
+      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
+    }
+
   if ($service_state == 'no_op') {
   if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+  } elsif ($service_state in ['running','stopped','installed_and_configured', 'restart']) {
+
+
     if ($service_state == 'running') {
     if ($service_state == 'running') {
       #TODO: refine by using notify/subscribe
       #TODO: refine by using notify/subscribe
       hdp::exec { 'monitor webserver start':
       hdp::exec { 'monitor webserver start':
-        command => '/etc/init.d/httpd start',
-        unless => '/etc/init.d/httpd status'
+        command => "/etc/init.d/$service_name start",
+        unless => "/etc/init.d/$service_name status",
+        require => Hdp::Package['httpd']
+        
       } 
       } 
+
+      hdp::package { 'httpd' :
+        size   => 64
+      }
     } elsif ($service_state == 'stopped') {
     } elsif ($service_state == 'stopped') {
-      package { 'httpd':
-        ensure => 'stopped'
+      # stop should never fail if process already stopped
+      hdp::exec { 'monitor webserver stop':
+        command => "/etc/init.d/$service_name stop"
+      }
+    } elsif ($service_state == 'restart') {
+      hdp::exec { 'monitor webserver restart':
+        command => "/etc/init.d/$service_name restart",
+        require => Hdp::Package['httpd']
+      }
+      hdp::package { 'httpd' :
+        size   => 64
+      }
+    } elsif ($service_state == 'installed_and_configured') {
+      hdp::package { 'httpd' :
+        size   => 64
       }
       }
     }
     }
   } else {
   } else {

+ 3 - 3
ambari-agent/src/main/puppet/modules/hdp-mysql/files/startMysql.sh

@@ -25,6 +25,6 @@ mysqldbpasswd=$2
 mysqldbhost=$3
 mysqldbhost=$3
 
 
 echo "Adding user $mysqldbuser@$mysqldbhost"
 echo "Adding user $mysqldbuser@$mysqldbhost"
-echo "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';" | mysql -u root
-echo "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';" | mysql -u root
-echo "flush privileges;" | mysql -u root
+echo "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';" > mysql -u root
+echo "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';" > mysql -u root
+echo "flush privileges;" > mysql -u root

+ 26 - 7
ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp

@@ -34,22 +34,41 @@ class hdp-mysql::server(
     anchor { 'hdp-mysql::server::begin':}
     anchor { 'hdp-mysql::server::begin':}
 
 
     hdp::package { 'mysql' :
     hdp::package { 'mysql' :
-      size   => 32,
+      size   => 64,
       require   => Anchor['hdp-mysql::server::begin']
       require   => Anchor['hdp-mysql::server::begin']
     }
     }
 
 
-    hdp::exec { 'mysqld start':
-        command => '/etc/init.d/mysqld start',
-        unless  => '/etc/init.d/mysqld status',
-        require => Hdp::Package['mysql'],
-        notify  => File['/tmp/startMysql.sh']
+    if hdp_is_empty($hdp::params::services_names[mysql]) {
+      hdp_fail("There is no service name for service mysql")
+    }
+    else {
+      $service_name_by_os = $hdp::params::services_names[mysql]
+    }
+
+    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
+      
+      if hdp_is_empty($service_name_by_os['ALL']) {
+        hdp_fail("There is no service name for service mysql")
+      }
+      else {
+        $service_name = $service_name_by_os['ALL']
+      }
+    }
+    else {
+      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
+    }
+
+    service {$service_name:
+      ensure => running,
+      require => Hdp::Package['mysql'],
+      notify  => File['/tmp/startMysql.sh']
     }
     }
 
 
     file { '/tmp/startMysql.sh':
     file { '/tmp/startMysql.sh':
       ensure => present,
       ensure => present,
       source => "puppet:///modules/hdp-mysql/startMysql.sh",
       source => "puppet:///modules/hdp-mysql/startMysql.sh",
       mode => '0755',
       mode => '0755',
-      require => Hdp::Exec['mysqld start'],
+      require => service[$service_name],
       notify => Exec['/tmp/startMysql.sh']
       notify => Exec['/tmp/startMysql.sh']
     }
     }
 
 

+ 0 - 59
ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php

@@ -1,59 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /* This plugin check if puppet agent is alive */
-
-  $options = getopt ("h:");
-  if (!array_key_exists('h', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-
-  /* Give puppet kick --ping to check if agent is working */
-  $out_arr = array();
-  $cmd = "puppet kick -f --host $host --ping 2>/dev/null";
-  exec ($cmd, $out_arr, $err);
-  if ($err == 0 && check_error($out_arr, "status is success", 0) == 0) {
-    // success
-    echo "OK: Puppet agent is active on [$host]" . "\n";
-    exit(0);
-  } else {
-    // Fail
-    echo "WARN: Puppet agent is down on [$host]" . "\n";
-    exit(1);
-  }
-
-  /* check error function */
-  function check_error ($output, $pattern, $ret) {
-    $ret1=($ret+1)%2;
-    for ($i=0; $i<count($output); $i++) {
-      if (preg_match ("/$pattern/", $output[$i])) {
-        return $ret;
-      }
-    }
-    return $ret1;
-  }
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host>\n";
-  }
-?>

+ 7 - 2
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp

@@ -27,8 +27,11 @@ class hdp-nagios::params() inherits hdp::params
   $conf_dir = hdp_default("nagios_conf_dir","/etc/nagios")
   $conf_dir = hdp_default("nagios_conf_dir","/etc/nagios")
 
 
   $plugins_dir = "/usr/lib64/nagios/plugins"
   $plugins_dir = "/usr/lib64/nagios/plugins"
+  $nagios_pid_dir = "/var/run/nagios"
 
 
   $nagios_obj_dir = hdp_default("nagios_obj_dir","/etc/nagios/objects")
   $nagios_obj_dir = hdp_default("nagios_obj_dir","/etc/nagios/objects")
+  $nagios_var_dir = hdp_default("nagios_var_dir","/var/nagios")
+  $nagios_rw_dir = hdp_default("nagios_var_dir","/var/nagios/rw")
   $nagios_host_cfg = hdp_default("nagios_host_cfg","${nagios_obj_dir}/hadoop-hosts.cfg")
   $nagios_host_cfg = hdp_default("nagios_host_cfg","${nagios_obj_dir}/hadoop-hosts.cfg")
   $nagios_hostgroup_cfg = hdp_default("nagios_hostgroup_cfg","${nagios_obj_dir}/hadoop-hostgroups.cfg")
   $nagios_hostgroup_cfg = hdp_default("nagios_hostgroup_cfg","${nagios_obj_dir}/hadoop-hostgroups.cfg")
   $nagios_servicegroup_cfg = hdp_default("nagios_servicegroup_cfg","${nagios_obj_dir}/hadoop-servicegroups.cfg")
   $nagios_servicegroup_cfg = hdp_default("nagios_servicegroup_cfg","${nagios_obj_dir}/hadoop-servicegroups.cfg")
@@ -39,8 +42,10 @@ class hdp-nagios::params() inherits hdp::params
   $nagios_web_password = hdp_default("nagios_web_password","admin")
   $nagios_web_password = hdp_default("nagios_web_password","admin")
   
   
   $dfs_data_dir = $hdp::params::dfs_data_dir
   $dfs_data_dir = $hdp::params::dfs_data_dir
+
+  $check_result_path = hdp_default("nagios_check_result_path","/var/nagios/spool/checkresults")
    
    
-  $nagios_contact = hdp_default("nagios/nagios-contacts/nagios_contact","monitor\@monitor.com")
+  $nagios_contact = hdp_default("nagios/nagios-contacts/nagios_contact","monitor\\@monitor.com")
 
 
   $hostgroup_defs = {
   $hostgroup_defs = {
     namenode => {host_member_info => 'namenode_host'},
     namenode => {host_member_info => 'namenode_host'},
@@ -54,6 +59,6 @@ class hdp-nagios::params() inherits hdp::params
     hiveserver => {host_member_info => 'hive_server_host'},
     hiveserver => {host_member_info => 'hive_server_host'},
     region-servers => {host_member_info => 'hbase_rs_hosts'},
     region-servers => {host_member_info => 'hbase_rs_hosts'},
     oozie-server => {host_member_info => 'oozie_server'},
     oozie-server => {host_member_info => 'oozie_server'},
-    templeton-server => {host_member_info => 'templeton_server_host'}
+    webhcat-server => {host_member_info => 'webhcat_server_host'}
   }
   }
 }
 }

+ 107 - 5
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp

@@ -22,10 +22,34 @@ class hdp-nagios::server(
   $service_state = $hdp::params::cluster_service_state
   $service_state = $hdp::params::cluster_service_state
 ) inherits hdp-nagios::params
 ) inherits hdp-nagios::params
 {
 {
-
+  $nagios_var_dir = $hdp-nagios::params::nagios_var_dir
+  $nagios_rw_dir = $hdp-nagios::params::nagios_rw_dir
   $nagios_config_dir = $hdp-nagios::params::conf_dir
   $nagios_config_dir = $hdp-nagios::params::conf_dir
   $plugins_dir = $hdp-nagios::params::plugins_dir
   $plugins_dir = $hdp-nagios::params::plugins_dir
   $nagios_obj_dir = $hdp-nagios::params::nagios_obj_dir
   $nagios_obj_dir = $hdp-nagios::params::nagios_obj_dir
+  $check_result_path = $hdp-nagios::params::check_result_path
+
+
+  if hdp_is_empty($hdp::params::pathes[nagios_p1_pl]) {
+    hdp_fail("There is no path to p1.pl file for nagios")
+  }
+  else {
+    $nagios_p1_pl_by_os = $hdp::params::pathes[nagios_p1_pl]
+  }
+
+  if hdp_is_empty($nagios_p1_pl_by_os[$hdp::params::hdp_os_type]) {
+    if hdp_is_empty($nagios_p1_pl_by_os['ALL']) {
+      hdp_fail("There is no path to p1.pl file for nagios")
+    }
+      else {
+        $nagios_p1_pl = $nagios_p1_pl_by_os['ALL']
+      }
+    }
+    else {
+      $nagios_p1_pl = $nagios_p1_pl_by_os[$hdp::params::hdp_os_type]
+    }
+
+
 
 
   if ($service_state == 'no_op') {
   if ($service_state == 'no_op') {
   } elsif ($service_state in ['uninstalled']) {
   } elsif ($service_state in ['uninstalled']) {
@@ -57,8 +81,21 @@ class hdp-nagios::server(
       service_state => $service_state,
       service_state => $service_state,
       force => true
       force => true
     }
     }
+	
+	hdp::directory_recursive_create { nagios_pid_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp::directory { $nagios_var_dir:
+      service_state => $service_state,
+      force => true
+    }
+    
 
 
-     Class['hdp-nagios::server::packages'] -> Exec['rm -f /var/nagios/rw/nagios.cmd'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory[$nagios_obj_dir]
+
+
+     Class['hdp-nagios::server::packages'] -> Exec['rm -f /var/nagios/rw/nagios.cmd'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory[$nagios_obj_dir] ->  Hdp::Directory_recursive_create[$nagios_pid_dir] -> Hdp::Directory[$nagios_var_dir]
 
 
   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
     class { 'hdp-nagios::server::packages' : }
     class { 'hdp-nagios::server::packages' : }
@@ -77,6 +114,45 @@ class hdp-nagios::server(
       service_state => $service_state,
       service_state => $service_state,
       force => true
       force => true
     }
     }
+	
+	hdp::directory_recursive_create { $nagios_pid_dir:
+      service_state => $service_state,
+      owner => $nagios_user,
+      group => $nagios_group,
+      ensure => "directory",
+      mode => '0755',
+      force => true
+    }
+
+
+    hdp::directory_recursive_create { $nagios_var_dir:
+      service_state => $service_state,
+      force => true,
+      owner => $hdp-nagios::params::nagios_user,
+      group => $hdp-nagios::params::nagios_group
+    }
+
+    hdp::directory_recursive_create { $check_result_path:
+      service_state => $service_state,
+      force => true,
+      owner => $hdp-nagios::params::nagios_user,
+      group => $hdp-nagios::params::nagios_group
+    }
+
+    hdp::directory_recursive_create { $nagios_rw_dir:
+      service_state => $service_state,
+      force => true,
+      owner => $hdp-nagios::params::nagios_user,
+      group => $hdp-nagios::params::nagios_group
+    }
+	
+   if ($service_state == 'installed_and_configured') {
+      $webserver_state = 'restart'
+    } else {
+      $webserver_state = $service_state
+    }
+
+    class { 'hdp-monitor-webserver': service_state => $webserver_state}
 
 
 
 
     class { 'hdp-nagios::server::config': 
     class { 'hdp-nagios::server::config': 
@@ -86,9 +162,13 @@ class hdp-nagios::server(
     class { 'hdp-nagios::server::web_permisssions': }
     class { 'hdp-nagios::server::web_permisssions': }
 
 
     class { 'hdp-nagios::server::services': ensure => $service_state}
     class { 'hdp-nagios::server::services': ensure => $service_state}
+	
+	
+	Class['hdp-nagios::server::packages'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory_recursive_create[$nagios_pid_dir] ->
+	Hdp::Directory[$nagios_obj_dir] -> Hdp::Directory_Recursive_Create[$nagios_var_dir] ->
+	Hdp::Directory_Recursive_Create[$check_result_path] -> Hdp::Directory_Recursive_Create[$nagios_rw_dir] ->
+	Class['hdp-nagios::server::config'] -> Class['hdp-nagios::server::web_permisssions'] -> Class['hdp-nagios::server::services'] -> Class['hdp-monitor-webserver']
 
 
-    Class['hdp-nagios::server::packages'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory[$nagios_obj_dir] -> Class['hdp-nagios::server::config'] -> 
-    Class['hdp-nagios::server::web_permisssions'] -> Class['hdp-nagios::server::services']
   } else {
   } else {
     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
   }
   }
@@ -97,7 +177,29 @@ class hdp-nagios::server(
 class hdp-nagios::server::web_permisssions()
 class hdp-nagios::server::web_permisssions()
 {
 {
   $web_login = $hdp-nagios::params::nagios_web_login
   $web_login = $hdp-nagios::params::nagios_web_login
-  $cmd = "htpasswd -c -b  /etc/nagios/htpasswd.users ${web_login} ${hdp-nagios::params::nagios_web_password}"
+  $htpasswd_cmd_os = $hdp::params::cmds[htpasswd]#[$hdp::params::hdp_os_type]
+
+
+  if hdp_is_empty($hdp::params::cmds[htpasswd]) {
+    hdp_fail("There is no htpasswd command mapping")
+  }
+  else {
+    $htpasswd_cmd_by_os = $hdp::params::cmds[htpasswd]
+  }
+
+  if hdp_is_empty($htpasswd_cmd_by_os[$hdp::params::hdp_os_type]) {
+    if hdp_is_empty($htpasswd_cmd_by_os['ALL']) {
+      hdp_fail("There is no htpasswd command mapping")
+    }
+    else {
+      $htpasswd_cmd = $htpasswd_cmd_by_os['ALL']
+    }
+  }
+  else {
+    $htpasswd_cmd = $htpasswd_cmd_by_os[$hdp::params::hdp_os_type]
+  }
+
+  $cmd = "$htpasswd_cmd -c -b  /etc/nagios/htpasswd.users ${web_login} ${hdp-nagios::params::nagios_web_password}"
   $test = "grep ${web_user} /etc/nagios/htpasswd.users"
   $test = "grep ${web_user} /etc/nagios/htpasswd.users"
   hdp::exec { $cmd :
   hdp::exec { $cmd :
     command => $cmd,
     command => $cmd,

+ 0 - 1
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp

@@ -39,7 +39,6 @@ class hdp-nagios::server::config()
   hdp-nagios::server::check { 'check_rpcq_latency.php': }
   hdp-nagios::server::check { 'check_rpcq_latency.php': }
   hdp-nagios::server::check { 'check_webui.sh': }
   hdp-nagios::server::check { 'check_webui.sh': }
   hdp-nagios::server::check { 'check_name_dir_status.php': }
   hdp-nagios::server::check { 'check_name_dir_status.php': }
-  hdp-nagios::server::check { 'check_puppet_agent_status.php': }
   hdp-nagios::server::check { 'check_oozie_status.sh': }
   hdp-nagios::server::check { 'check_oozie_status.sh': }
   hdp-nagios::server::check { 'check_templeton_status.sh': }
   hdp-nagios::server::check { 'check_templeton_status.sh': }
   hdp-nagios::server::check { 'check_hive_metastore_status.sh': }
   hdp-nagios::server::check { 'check_hive_metastore_status.sh': }

+ 3 - 10
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp

@@ -28,23 +28,16 @@ class hdp-nagios::server::packages(
       ensure => 'uninstalled'
       ensure => 'uninstalled'
     }
     }
   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    case $hdp::params::hdp_os_type {
-      centos6, rhel6: {
-        hdp-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons']:
-          ensure => 'present'
-        }
-      }
-      default: {
         hdp-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons','nagios-php-pecl-json']:
         hdp-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons','nagios-php-pecl-json']:
           ensure => 'present'
           ensure => 'present'
-        }
-      }
     }
     }
   } 
   } 
+  Hdp-nagios::Server::Package['nagios-server'] -> Hdp::Package['nagios-plugins'] #other order produces package conflict
   Hdp-nagios::Server::Package['nagios-plugins'] -> Hdp::Package['nagios-addons'] #other order produces package conflict
   Hdp-nagios::Server::Package['nagios-plugins'] -> Hdp::Package['nagios-addons'] #other order produces package conflict
 
 
   anchor{'hdp-nagios::server::packages::begin':} -> Hdp-nagios::Server::Package<||> -> anchor{'hdp-nagios::server::packages::end':}
   anchor{'hdp-nagios::server::packages::begin':} -> Hdp-nagios::Server::Package<||> -> anchor{'hdp-nagios::server::packages::end':}
-  Anchor['hdp-nagios::server::packages::begin'] -> Hdp::Package['nagios-addons'] -> Anchor['hdp-nagios::server::packages::end']
+  Anchor['hdp-nagios::server::packages::begin'] -> Hdp::Package['nagios-server'] ->
+      Hdp::Package['nagios-addons'] -> Anchor['hdp-nagios::server::packages::end']
   Hdp-nagios::Server::Package['nagios-fping'] -> Hdp-nagios::Server::Package['nagios-plugins']
   Hdp-nagios::Server::Package['nagios-fping'] -> Hdp-nagios::Server::Package['nagios-plugins']
 }
 }
 
 

+ 0 - 5
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb

@@ -67,11 +67,6 @@ define command{
         command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
         command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
        }
        }
 
 
-define command{
-        command_name    check_puppet_agent_status
-        command_line    php $USER1$/check_puppet_agent_status.php -h $HOSTADDRESS$
-       }
-
 define command{
 define command{
         command_name    check_oozie_status
         command_name    check_oozie_status
         command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$
         command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$

+ 0 - 4
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb

@@ -34,7 +34,3 @@ define servicegroup {
   servicegroup_name  ZOOKEEPER
   servicegroup_name  ZOOKEEPER
   alias  ZOOKEEPER Checks
   alias  ZOOKEEPER Checks
 }
 }
-define servicegroup {
-  servicegroup_name  PUPPET
-  alias  Puppet Agent Checks
-}

+ 7 - 18
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb

@@ -30,17 +30,6 @@ define service {
         notification_interval           0     # Send the notification once
         notification_interval           0     # Send the notification once
 }
 }
 
 
-define service {        
-        hostgroup_name          all-servers
-        use                     hadoop-service
-        service_description     PUPPET::Puppet agent down
-        servicegroups           PUPPET
-        check_command           check_tcp!8139!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
 define service {        
 define service {        
         hostgroup_name          nagios-server        
         hostgroup_name          nagios-server        
         use                     hadoop-service
         use                     hadoop-service
@@ -433,10 +422,10 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     HIVE-METASTORE::HIVE-METASTORE status check
         service_description     HIVE-METASTORE::HIVE-METASTORE status check
         servicegroups           HIVE-METASTORE
         servicegroups           HIVE-METASTORE
-        <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java32_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
+        <%if scope.function_hdp_template_var("security_enabled") == "true"-%>
+        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
         <%else-%>
         <%else-%>
-        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java32_home")%>!false
+        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!false
         <%end-%>
         <%end-%>
         normal_check_interval   0.5
         normal_check_interval   0.5
         retry_check_interval    0.5
         retry_check_interval    0.5
@@ -450,10 +439,10 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     OOZIE::Oozie status check
         service_description     OOZIE::Oozie status check
         servicegroups           OOZIE
         servicegroups           OOZIE
-        <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java32_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
+        <%if scope.function_hdp_template_var("security_enabled") == "true" -%>
+        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
         <%else-%>
         <%else-%>
-        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java32_home")%>!false
+        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!false
         <%end-%>
         <%end-%>
         normal_check_interval   1
         normal_check_interval   1
         retry_check_interval    1
         retry_check_interval    1
@@ -467,7 +456,7 @@ define service {
         use                     hadoop-service
         use                     hadoop-service
         service_description     TEMPLETON::Templeton status check
         service_description     TEMPLETON::Templeton status check
         servicegroups           TEMPLETON
         servicegroups           TEMPLETON
-        <%if scope.function_hdp_template_var("security_enabled")-%>
+        <%if scope.function_hdp_template_var("security_enabled") == "true"-%>
         check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
         check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
         <%else-%>
         <%else-%>
         check_command           check_templeton_status!50111!v1!false
         check_command           check_templeton_status!50111!v1!false

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb

@@ -215,7 +215,7 @@ external_command_buffer_slots=4096
 # This is the lockfile that Nagios will use to store its PID number
 # This is the lockfile that Nagios will use to store its PID number
 # in when it is running in daemon mode.
 # in when it is running in daemon mode.
 
 
-lock_file=/var/nagios/nagios.pid
+lock_file=/var/run/nagios/nagios.pid
 
 
 
 
 
 
@@ -1141,7 +1141,7 @@ date_format=us
 # embedded Perl interpreter) is located.  If you didn't compile
 # embedded Perl interpreter) is located.  If you didn't compile
 # Nagios with embedded Perl support, this option has no effect.
 # Nagios with embedded Perl support, this option has no effect.
 
 
-p1_file=/usr/bin/p1.pl
+p1_file = <%=nagios_p1_pl %>
 
 
 
 
 
 

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh

@@ -66,7 +66,7 @@ export OOZIE_EXIT_CODE=0
 export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
 export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
 export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
 export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
 export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url`
 export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url`
-export OOZIE_EXAMPLES_DIR=/usr/share/doc/oozie-*/
+export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
 cd $OOZIE_EXAMPLES_DIR
 cd $OOZIE_EXAMPLES_DIR
 
 
 tar -zxf oozie-examples.tar.gz
 tar -zxf oozie-examples.tar.gz

+ 1 - 15
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp

@@ -20,26 +20,12 @@
 #
 #
 class hdp-oozie::download-ext-zip()
 class hdp-oozie::download-ext-zip()
 {
 {
-  include hdp-oozie::params
-
-  $zip_name = $hdp-oozie::params::ext_zip_name
-  $target = "${hdp::params::artifact_dir}/${zip_name}"
-
   anchor { 'hdp-oozie::download-ext-zip::begin':}
   anchor { 'hdp-oozie::download-ext-zip::begin':}
 
 
    hdp::package { 'extjs' :
    hdp::package { 'extjs' :
      require   => Anchor['hdp-oozie::download-ext-zip::begin']
      require   => Anchor['hdp-oozie::download-ext-zip::begin']
    }
    }
 
 
-   hdp::exec { 'mkdir -p ${artifact_dir} ;  cp /tmp/HDP-oozie/${zip_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-oozie/${zip_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['extjs'],
-       notify  =>  Anchor['hdp-oozie::download-ext-zip::end'],
-   }
-
    anchor { 'hdp-oozie::download-ext-zip::end':}
    anchor { 'hdp-oozie::download-ext-zip::end':}
 
 
-}
+}

+ 9 - 6
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp

@@ -32,13 +32,19 @@ class hdp-oozie::service(
   $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
   $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
   $pid_file = "${hdp-oozie::params::oozie_pid_dir}/oozie.pid" 
   $pid_file = "${hdp-oozie::params::oozie_pid_dir}/oozie.pid" 
   $jar_location = $hdp::params::hadoop_jar_location
   $jar_location = $hdp::params::hadoop_jar_location
-  $ext_js_path = "/tmp/HDP-artifacts/${hdp-oozie::params::ext_zip_name}"
+  $ext_js_path = "/usr/share/HDP-oozie/ext.zip"
+  
+  if ($lzo_enabled) {
+    $lzo_jar_suffix = "-jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar"
+  } else {
+    $lzo_jar_suffix = ""
+  }
 
 
   if ($ensure == 'running') {
   if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  'cd ${oozie_tmp} ; /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path; /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run Validate DB Connection; /usr/lib/oozie/bin/oozie-start.sh'"
+    $daemon_cmd = "/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p ${oozie_tmp} && chown ${user}:hadoop ${oozie_tmp} && cd ${oozie_tmp}' && su - ${user} -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $lzo_jar_suffix && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/${user}/share && /usr/lib/oozie/bin/oozie-start.sh' "
     $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
     $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
   } elsif ($ensure == 'stopped') {
   } elsif ($ensure == 'stopped') {
-    $daemon_cmd  = "su - ${user} -c  'cd ${oozie_tmp} ; /usr/lib/oozie/bin/oozie-stop.sh'"
+    $daemon_cmd  = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-stop.sh'"
     $no_op_test = undef
     $no_op_test = undef
   } else {
   } else {
     $daemon_cmd = undef
     $daemon_cmd = undef
@@ -49,9 +55,6 @@ class hdp-oozie::service(
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_tmp_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_tmp_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_data_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_data_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
-  if ($ensure == 'running') {
-    hdp-oozie::service::createsymlinks { '/usr/lib/oozie/oozie-server/lib/mapred-site.xml' : }
-  }
 
 
   anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
   anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
   
   

+ 1 - 0
ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb

@@ -61,3 +61,4 @@ export OOZIE_DATA=<%=scope.function_hdp_template_var("oozie_data_dir")%>/
 # The base URL for callback URLs to Oozie
 # The base URL for callback URLs to Oozie
 #
 #
 # export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
 # export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

+ 2 - 1
ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb

@@ -1,5 +1,5 @@
 #
 #
-#
+# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
 # distributed with this work for additional information
@@ -24,3 +24,4 @@ name=<%=repo_name %>
 <%if scope.function_hdp_is_empty(base_url)%>mirrorlist=<%=mirror_list %><% else %>baseurl=<%=base_url %><% end %>
 <%if scope.function_hdp_is_empty(base_url)%>mirrorlist=<%=mirror_list %><% else %>baseurl=<%=base_url %><% end %>
 path=/
 path=/
 enabled=1
 enabled=1
+gpgcheck=0

+ 4 - 7
ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp

@@ -28,17 +28,14 @@ class hdp-sqoop::mysql-connector()
 
 
   anchor { 'hdp-sqoop::mysql-connector::begin':}
   anchor { 'hdp-sqoop::mysql-connector::begin':}
 
 
-   hdp::exec { 'yum install -y mysql-connector-java':
-       command => "yum install -y mysql-connector-java",
-       unless  => "rpm -qa | grep mysql-connector-java",
-       path    => ["/bin","/usr/bin/"],
-       require   => Anchor['hdp-sqoop::mysql-connector::begin']
-   }
+  hdp::package { 'mysql-connector-java' :
+    require   => Anchor['hdp-sqoop::mysql-connector::begin']
+  }
 
 
    file { "${sqoop_lib}/mysql-connector-java.jar" :
    file { "${sqoop_lib}/mysql-connector-java.jar" :
        ensure => link,
        ensure => link,
        target => "/usr/share/java/mysql-connector-java.jar",
        target => "/usr/share/java/mysql-connector-java.jar",
-       require => Hdp::Exec['yum install -y mysql-connector-java'],
+       require => Hdp::Package['mysql-connector-java'],
        notify  =>  Anchor['hdp-sqoop::mysql-connector::end'],
        notify  =>  Anchor['hdp-sqoop::mysql-connector::end'],
    }
    }
 
 

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb

@@ -28,5 +28,8 @@ export HBASE_HOME=${HBASE_HOME:-<%=scope.function_hdp_template_var("hbase_home")
 #Set the path to where bin/hive is available
 #Set the path to where bin/hive is available
 export HIVE_HOME=${HIVE_HOME:-<%=scope.function_hdp_template_var("hive_home")%>}
 export HIVE_HOME=${HIVE_HOME:-<%=scope.function_hdp_template_var("hive_home")%>}
 
 
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
+
 #Set the path for where zookeper config dir is
 #Set the path for where zookeper config dir is
 export ZOOCFGDIR=${ZOOCFGDIR:-<%=scope.function_hdp_template_var("zoo_conf_dir")%>}
 export ZOOCFGDIR=${ZOOCFGDIR:-<%=scope.function_hdp_template_var("zoo_conf_dir")%>}

+ 9 - 9
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp

@@ -28,18 +28,18 @@ class hdp-templeton::download-hive-tar()
  
  
   anchor { 'hdp-templeton::download-hive-tar::begin':}         
   anchor { 'hdp-templeton::download-hive-tar::begin':}         
 
 
-   hdp::package { 'templeton-tar-hive' :
+   hdp::package { 'webhcat-tar-hive' :
      require   => Anchor['hdp-templeton::download-hive-tar::begin']                                                              
      require   => Anchor['hdp-templeton::download-hive-tar::begin']                                                              
    }
    }
   
   
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['templeton-tar-hive'],
-       notify  =>  Anchor['hdp-templeton::download-hive-tar::end'],
-   }
+#   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
+#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
+#       unless  => "test -f ${target}",
+#       creates => $target,
+#       path    => ["/bin","/usr/bin/"],
+#       require => Hdp::Package['webhcat-tar-hive'],
+#       notify  =>  Anchor['hdp-templeton::download-hive-tar::end'],
+#   }
 
 
    anchor { 'hdp-templeton::download-hive-tar::end':}       
    anchor { 'hdp-templeton::download-hive-tar::end':}       
 
 

+ 9 - 9
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp

@@ -28,18 +28,18 @@ class hdp-templeton::download-pig-tar()
 
 
   anchor { 'hdp-templeton::download-pig-tar::begin':}
   anchor { 'hdp-templeton::download-pig-tar::begin':}
 
 
-   hdp::package { 'templeton-tar-pig' :
+   hdp::package { 'webhcat-tar-pig' :
      require   => Anchor['hdp-templeton::download-pig-tar::begin']
      require   => Anchor['hdp-templeton::download-pig-tar::begin']
    }
    }
 
 
-   hdp::exec { 'pig ; mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['templeton-tar-pig'],
-       notify  =>  Anchor['hdp-templeton::download-pig-tar::end'],
-   }
+#   hdp::exec { 'pig ; mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
+#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
+#       unless  => "test -f ${target}",
+#       creates => $target,
+#       path    => ["/bin","/usr/bin/"],
+#       require => Hdp::Package['webhcat-tar-pig'],
+#       notify  =>  Anchor['hdp-templeton::download-pig-tar::end'],
+#   }
 
 
    anchor { 'hdp-templeton::download-pig-tar::end':}
    anchor { 'hdp-templeton::download-pig-tar::end':}
 
 

+ 10 - 10
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp

@@ -25,12 +25,12 @@ class hdp-templeton(
 {
 {
 # Configs generation  
 # Configs generation  
 
 
-  if has_key($configuration, 'templeton-site') {
-    configgenerator::configfile{'templeton-site': 
+  if has_key($configuration, 'webhcat-site') {
+    configgenerator::configfile{'webhcat-site': 
       modulespath => $hdp-templeton::params::conf_dir,
       modulespath => $hdp-templeton::params::conf_dir,
-      filename => 'templeton-site.xml',
+      filename => 'webhcat-site.xml',
       module => 'hdp-templeton',
       module => 'hdp-templeton',
-      configuration => $configuration['templeton-site']
+      configuration => $configuration['webhcat-site']
     }
     }
   }
   }
 
 
@@ -46,7 +46,7 @@ class hdp-templeton(
   $templeton_config_dir = $hdp-templeton::params::conf_dir
   $templeton_config_dir = $hdp-templeton::params::conf_dir
 
 
   if ($service_state == 'uninstalled') {
   if ($service_state == 'uninstalled') {
-      hdp::package { 'templeton' :
+      hdp::package { 'webhcat' :
       size => $size,
       size => $size,
       ensure => 'uninstalled'
       ensure => 'uninstalled'
     }
     }
@@ -55,10 +55,10 @@ class hdp-templeton(
         force => true
         force => true
       }
       }
 
 
-     anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
+     anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
 
 
   } else {
   } else {
-    hdp::package { 'templeton' :
+    hdp::package { 'webhcat' :
       size => $size
       size => $size
     }
     }
     class { hdp-templeton::download-hive-tar: }
     class { hdp-templeton::download-hive-tar: }
@@ -71,12 +71,12 @@ class hdp-templeton(
       force => true
       force => true
     }
     }
 
 
-    hdp-templeton::configfile { ['templeton-env.sh']: }
+    hdp-templeton::configfile { ['webhcat-env.sh']: }
 
 
-    anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::User[$templeton_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
+    anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::User[$templeton_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
 
 
      if ($server == true ) { 
      if ($server == true ) { 
-      Hdp::Package['templeton'] -> Hdp::User[$templeton_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
+      Hdp::Package['webhcat'] -> Hdp::User[$templeton_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
      }
      }
   }
   }
 }
 }

+ 8 - 8
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp

@@ -27,21 +27,21 @@ class hdp-templeton::params() inherits hdp::params
 
 
   $dest_pig_tar_name = hdp_default("dest_pig_tar_name","pig.tar.gz")
   $dest_pig_tar_name = hdp_default("dest_pig_tar_name","pig.tar.gz")
   $dest_hive_tar_name = hdp_default("dest_hive_tar_name","hive.tar.gz")
   $dest_hive_tar_name = hdp_default("dest_hive_tar_name","hive.tar.gz")
-  $src_pig_tar_name = hdp_default("src_pig_tar_name","pig-0.9.2.14.tar.gz")
-  $src_hive_tar_name = hdp_default("src_hive_tar_name","hive-0.9.0.14.tar.gz")
+  $src_pig_tar_name = hdp_default("src_pig_tar_name","pig.tar.gz")
+  $src_hive_tar_name = hdp_default("src_hive_tar_name","hive.tar.gz")
 
 
   ### templeton-env
   ### templeton-env
-  $conf_dir = hdp_default("hadoop/templeton-env/conf_dir","/etc/templeton")
+  $conf_dir = hdp_default("hadoop/templeton-env/conf_dir","/etc/hcatalog/conf")
 
 
   ### templeton-env
   ### templeton-env
-  $templeton_log_dir = hdp_default("hadoop/templeton-env/templeton_log_dir","/var/log/templeton")
+  $templeton_log_dir = hdp_default("hadoop/templeton-env/templeton_log_dir","/var/log/webhcat")
 
 
-  $templeton_pid_dir = hdp_default("hadoop/templeton-env/templeton_pid_dir","/var/run/templeton")
+  $templeton_pid_dir = hdp_default("hadoop/templeton-env/templeton_pid_dir","/var/run/webhcat")
 
 
-  $templeton_jar_name= hdp_default("hadoop/templeton-env/templeton_jar_name","templeton-0.1.4.14.jar")
+#  $templeton_jar_name= hdp_default("hadoop/templeton-env/templeton_jar_name","templeton-0.1.4.14.jar")
  
  
-  $hadoop_prefix = hdp_default("hadoop/templeton-env/hadoop_prefix","/usr")
-  $hive_prefix = hdp_default("hadoop/templeton-env/hive_prefix","/usr")
+#  $hadoop_prefix = hdp_default("hadoop/templeton-env/hadoop_prefix","/usr")
+#  $hive_prefix = hdp_default("hadoop/templeton-env/hive_prefix","/usr")
   
   
   ### templeton-site
   ### templeton-site
   $hadoop_conf_dir = hdp_default("hadoop/templeton-site/hadoop_conf_dir")
   $hadoop_conf_dir = hdp_default("hadoop/templeton-site/hadoop_conf_dir")

+ 17 - 17
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp

@@ -76,32 +76,32 @@ class hdp-templeton::server(
 class hdp-templeton::copy-hdfs-directories($service_state)
 class hdp-templeton::copy-hdfs-directories($service_state)
 {
 {
  $templeton_user = $hdp-templeton::params::templeton_user
  $templeton_user = $hdp-templeton::params::templeton_user
- $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
+# $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
 
 
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
-    service_state => $service_state,
-    owner => $hdp-templeton::params::templeton_user,
-    mode  => '755',
-    dest_dir => '/apps/templeton/ugi.jar'
-  }
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
-   service_state => $service_state,
-   owner => $hdp-templeton::params::templeton_user,
-   mode  => '755',
-   dest_dir => '/apps/templeton/hadoop-streaming.jar'
- }
+#  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
+#    service_state => $service_state,
+#    owner => $hdp-templeton::params::templeton_user,
+#    mode  => '755',
+#    dest_dir => '/apps/templeton/ugi.jar'
+#  }
+#  hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
+#   service_state => $service_state,
+#   owner => $hdp-templeton::params::templeton_user,
+#   mode  => '755',
+#   dest_dir => '/apps/templeton/hadoop-streaming.jar'
+# }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/tmp/HDP-artifacts/pig.tar.gz' :
+  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
     service_state => $service_state,
     service_state => $service_state,
     owner => $hdp-templeton::params::templeton_user,
     owner => $hdp-templeton::params::templeton_user,
     mode  => '755',
     mode  => '755',
-    dest_dir => '/apps/templeton/pig.tar.gz'
+    dest_dir => '/apps/webhcat/pig.tar.gz'
   }
   }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/tmp/HDP-artifacts/hive.tar.gz' :
+  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/hive.tar.gz' :
     service_state => $service_state,
     service_state => $service_state,
     owner => $hdp-templeton::params::templeton_user,
     owner => $hdp-templeton::params::templeton_user,
     mode  => '755',
     mode  => '755',
-    dest_dir => '/apps/templeton/hive.tar.gz'
+    dest_dir => '/apps/webhcat/hive.tar.gz'
   }
   }
 }
 }

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp

@@ -27,8 +27,8 @@ class hdp-templeton::service(
   
   
   $user = "$hdp-templeton::params::templeton_user"
   $user = "$hdp-templeton::params::templeton_user"
   $hadoop_home = $hdp-templeton::params::hadoop_prefix
   $hadoop_home = $hdp-templeton::params::hadoop_prefix
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/templeton_server.sh"
-  $pid_file = "${hdp-templeton::params::templeton_pid_dir}/templeton.pid" 
+  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh"
+  $pid_file = "${hdp-templeton::params::templeton_pid_dir}/webhcat.pid" 
 
 
   if ($ensure == 'running') {
   if ($ensure == 'running') {
     $daemon_cmd = "su - ${user} -c  '${cmd} start'"
     $daemon_cmd = "su - ${user} -c  '${cmd} start'"

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp

@@ -30,7 +30,7 @@ class hdp-templeton::templeton::service_check()
   }
   }
   $smoke_user_keytab = "${hdp-templeton::params::keytab_path}/${smoke_test_user}.headless.keytab"
   $smoke_user_keytab = "${hdp-templeton::params::keytab_path}/${smoke_test_user}.headless.keytab"
 
 
-  $templeton_host = $hdp::params::templeton_server_host
+  $templeton_host = $hdp::params::webhcat_server_host
 
 
   $smoke_shell_files = ['templetonSmoke.sh']
   $smoke_shell_files = ['templetonSmoke.sh']
 
 

+ 9 - 6
ambari-agent/src/main/puppet/modules/hdp-templeton/templates/templeton-env.sh.erb → ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb

@@ -21,18 +21,21 @@
 #
 #
 
 
 # The file containing the running pid
 # The file containing the running pid
-PID_FILE=<%=scope.function_hdp_template_var("templeton_pid_dir")%>/templeton.pid
+PID_FILE=<%=scope.function_hdp_template_var("templeton_pid_dir")%>/webhcat.pid
 
 
 TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("templeton_log_dir")%>/
 TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("templeton_log_dir")%>/
 
 
 # The console error log
 # The console error log
-ERROR_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/templeton-console-error.log
+ERROR_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/webhcat-console-error.log
 
 
 # The console log
 # The console log
-CONSOLE_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/templeton-console.log
+CONSOLE_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/webhcat-console.log
 
 
-TEMPLETON_JAR=<%=scope.function_hdp_template_var("templeton_jar_name")%>
+#TEMPLETON_JAR=<%=scope.function_hdp_template_var("templeton_jar_name")%>
 
 
-HADOOP_PREFIX=<%=scope.function_hdp_template_var("hadoop_prefix")%>/
+#HADOOP_PREFIX=<%=scope.function_hdp_template_var("hadoop_prefix")%>/
 
 
-HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
+#HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=/usr/lib/hadoop

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp

@@ -27,7 +27,7 @@ class hdp-zookeeper::service(
   $user = $hdp-zookeeper::params::zk_user
   $user = $hdp-zookeeper::params::zk_user
   $conf_dir = $hdp-zookeeper::params::conf_dir
   $conf_dir = $hdp-zookeeper::params::conf_dir
   $zk_bin = $hdp::params::zk_bin
   $zk_bin = $hdp::params::zk_bin
-  $cmd = "/bin/env ZOOCFGDIR=${conf_dir} ZOOCFG=zoo.cfg ${zk_bin}/zkServer.sh"
+  $cmd = "env ZOOCFGDIR=${conf_dir} ZOOCFG=zoo.cfg ${zk_bin}/zkServer.sh"
 
 
   $pid_file = $hdp-zookeeper::params::zk_pid_file  
   $pid_file = $hdp-zookeeper::params::zk_pid_file  
 
 

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp

@@ -36,7 +36,7 @@ define hdp::configfile(
   $hcat_server_host = $hdp::params::hcat_server_host,
   $hcat_server_host = $hdp::params::hcat_server_host,
   $hive_server_host = $hdp::params::hive_server_host,
   $hive_server_host = $hdp::params::hive_server_host,
   $oozie_server = $hdp::params::oozie_server,
   $oozie_server = $hdp::params::oozie_server,
-  $templeton_server_host = $hdp::params::templeton_server_host,
+  $webhcat_server_host = $hdp::params::webhcat_server_host,
   $hcat_mysql_host = $hdp::params::hcat_mysql_host,
   $hcat_mysql_host = $hdp::params::hcat_mysql_host,
   $nagios_server_host = $hdp::params::nagios_server_host,
   $nagios_server_host = $hdp::params::nagios_server_host,
   $ganglia_server_host = $hdp::params::ganglia_server_host,
   $ganglia_server_host = $hdp::params::ganglia_server_host,
@@ -52,7 +52,7 @@ define hdp::configfile(
   $public_dashboard_host = $hdp::params::public_dashboard_host,
   $public_dashboard_host = $hdp::params::public_dashboard_host,
   $public_hive_server_host = $hdp::params::public_hive_server_host,
   $public_hive_server_host = $hdp::params::public_hive_server_host,
   $public_oozie_server = $hdp::params::public_oozie_server,
   $public_oozie_server = $hdp::params::public_oozie_server,
-  $public_templeton_server_host = $hdp::params::public_templeton_server_host
+  $public_webhcat_server_host = $hdp::params::public_webhcat_server_host
 ) 
 ) 
 {
 {
 
 

+ 31 - 11
ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp

@@ -23,6 +23,7 @@ class hdp(
   $pre_installed_pkgs = undef
   $pre_installed_pkgs = undef
 )
 )
 {
 {
+
   import 'params.pp'
   import 'params.pp'
   include hdp::params
   include hdp::params
 
 
@@ -70,21 +71,20 @@ class hdp(
     ensure => stopped,
     ensure => stopped,
   }
   }
 
 
-  case $hdp::params::hdp_os_type {
-    centos6, rhel6: {
-      hdp::package{ 'glibc-rhel6':
-        ensure       => 'present',
-        size         => $size,
-        java_needed  => false,
-        lzo_needed   => false
-      }
-    }
+
+  
+  hdp::package{ 'glibc':
+    ensure       => 'present',
+    size         => $size,
+    java_needed  => false,
+    lzo_needed   => false
   }
   }
 
 
 }
 }
 
 
 class hdp::pre_install_pkgs
 class hdp::pre_install_pkgs
 {
 {
+
   if ($service_state == 'installed_and_configured') {
   if ($service_state == 'installed_and_configured') {
     hdp::exec{ 'yum install $pre_installed_pkgs':
     hdp::exec{ 'yum install $pre_installed_pkgs':
        command => "yum install -y $pre_installed_pkgs"
        command => "yum install -y $pre_installed_pkgs"
@@ -135,8 +135,9 @@ class hdp::set_selinux()
  $cmd = "/bin/echo 0 > /selinux/enforce"
  $cmd = "/bin/echo 0 > /selinux/enforce"
  hdp::exec{ $cmd:
  hdp::exec{ $cmd:
     command => $cmd,
     command => $cmd,
-    unless => "head -n 1 /selinux/enforce | grep ^0$"
-  }
+    unless => "head -n 1 /selinux/enforce | grep ^0$",
+    onlyif => "test -f /selinux/enforce"
+ }
 }
 }
 
 
 define hdp::user(
 define hdp::user(
@@ -206,6 +207,7 @@ define hdp::directory_recursive_create(
   $service_state = 'running'
   $service_state = 'running'
   )
   )
 {
 {
+
   hdp::exec {"mkdir -p ${name}" :
   hdp::exec {"mkdir -p ${name}" :
     command => "mkdir -p ${name}",
     command => "mkdir -p ${name}",
     creates => $name
     creates => $name
@@ -222,6 +224,22 @@ define hdp::directory_recursive_create(
   Hdp::Exec["mkdir -p ${name}"] -> Hdp::Directory[$name]
   Hdp::Exec["mkdir -p ${name}"] -> Hdp::Directory[$name]
 }
 }
 
 
+define hdp::directory_recursive_create_ignore_failure(
+  $owner = $hdp::params::hadoop_user,
+  $group = $hdp::params::hadoop_user_group,
+  $mode = undef,
+  $context_tag = undef,
+  $ensure = directory,
+  $force = undef,
+  $service_state = 'running'
+  )
+{
+  hdp::exec {"mkdir -p ${name} ; exit 0" :
+    command => "mkdir -p ${name} ; chown ${owner}:${group} ${name}; chmod ${mode} ${name} ; exit 0",
+    creates => $name
+  }
+}
+
 ### helper to do exec
 ### helper to do exec
 define hdp::exec(
 define hdp::exec(
   $command,
   $command,
@@ -240,6 +258,8 @@ define hdp::exec(
 )
 )
 {
 {
      
      
+
+
   if (($initial_wait != undef) and ($initial_wait != "undef")) {
   if (($initial_wait != undef) and ($initial_wait != "undef")) {
     #passing in creates and unless so dont have to wait if condition has been acheived already
     #passing in creates and unless so dont have to wait if condition has been acheived already
     hdp::wait { "service ${name}" : 
     hdp::wait { "service ${name}" : 

+ 1 - 2
ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp

@@ -23,7 +23,7 @@ define hdp::lzo::package()
   $size = $name
   $size = $name
 
 
   case $hdp::params::hdp_os_type {
   case $hdp::params::hdp_os_type {
-    centos6, rhel6: {
+    centos6, redhat6: {
       $pkg_type = 'lzo-rhel6'
       $pkg_type = 'lzo-rhel6'
     }
     }
     default: {
     default: {
@@ -41,4 +41,3 @@ define hdp::lzo::package()
   $anchor_end = "hdp::lzo::package::${size}::end"
   $anchor_end = "hdp::lzo::package::${size}::end"
   anchor{$anchor_beg:} ->  Hdp::Package["lzo ${size}"] -> anchor{$anchor_end:}
   anchor{$anchor_beg:} ->  Hdp::Package["lzo ${size}"] -> anchor{$anchor_end:}
 }
 }
-

+ 48 - 29
ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp

@@ -23,33 +23,25 @@ define hdp::package(
   $package_type = undef,
   $package_type = undef,
   $size = 64,
   $size = 64,
   $java_needed = true,
   $java_needed = true,
-  $lzo_needed = false,
-  $provider = yum
+  $lzo_needed = false
   )
   )
 {
 {
- 
+
   $pt = $package_type ? {
   $pt = $package_type ? {
     undef  => $name,
     undef  => $name,
     default  => $package_type
     default  => $package_type
   }
   }
   
   
-  case $provider {
-    'yum': { 
-      hdp::package::yum { $name:
-        ensure       => $ensure,
-        package_type => $pt,
-        size         => $size,
-        java_needed  => $java_needed,
-        lzo_needed   => $lzo_needed
-      }
-    }
-    default: {
-      hdp_fail("No support for provider ${provider}")
-    }
+  hdp::package::process_pkg { $name:
+    ensure       => $ensure,
+    package_type => $pt,
+    size         => $size,
+    java_needed  => $java_needed,
+    lzo_needed   => $lzo_needed
   }
   }
 }
 }
 
 
-define hdp::package::yum(
+define hdp::package::process_pkg(
   $ensure = present,
   $ensure = present,
   $package_type,
   $package_type,
   $size,
   $size,
@@ -58,17 +50,43 @@ define hdp::package::yum(
   )
   )
 {
 {
     
     
+
+  debug("##Processing package:  $ensure,$package_type,$size,$java_needed,$lzo_needed")
+
   include hdp::params
   include hdp::params
- 
-  $package_type_info = $hdp::params::package_names[$package_type]
-  if hdp_is_empty($package_type_info) {
-    hdp_fail("Cannot find info about package type ${package_type}") 
+
+  if hdp_is_empty($hdp::params::alt_package_names[$package_type]) {
+    hdp_fail("No packages for $package_type")
   }
   }
-  $package_name = $package_type_info[$size]
-  if hdp_is_empty($package_name) {
-    hdp_fail("Cannot find package ${package_type} of size ${size}")
+
+  if hdp_is_empty($hdp::params::alt_package_names[$package_type][$size]) {
+
+    if hdp_is_empty($hdp::params::alt_package_names[$package_type][ALL]) {
+      hdp_fail("No packages for $package_type")
+    }
+    else {
+      $packages_list_by_size = $hdp::params::alt_package_names[$package_type][ALL]
+    }
   }
   }
-  
+  else {
+    $packages_list_by_size = $hdp::params::alt_package_names[$package_type][$size]
+
+  }
+  if hdp_is_empty($packages_list_by_size[$hdp::params::hdp_os_type]) {
+
+    if hdp_is_empty($packages_list_by_size[ALL]) {
+      hdp_fail("No packages for $package_type")
+    }
+    else {
+      $packages_list = $packages_list_by_size[ALL]
+    }
+  }
+  else {
+    $packages_list = $packages_list_by_size[$hdp::params::hdp_os_type]
+  }
+
+  debug("##Packages list: $packages_list")
+
   if (($java_needed == true) and ($ensure == 'present')){
   if (($java_needed == true) and ($ensure == 'present')){
     hdp::java::package{ $name:
     hdp::java::package{ $name:
       size                 => $size
       size                 => $size
@@ -85,10 +103,11 @@ define hdp::package::yum(
     $ensure_actual = $ensure
     $ensure_actual = $ensure
   }
   }
   $tag = regsubst($name,' ','-',G)
   $tag = regsubst($name,' ','-',G)
-  package{ $package_name:
-    ensure   => $ensure_actual,
-    provider => yum,
-    tag      => $tag
+  if $packages_list != $hdp::params::NOTHING {
+    package{ $packages_list:
+      ensure   => $ensure_actual,
+      tag      => $tag
+    }
   }
   }
   anchor{ "hdp::package::${name}::begin": } -> Package<|tag == $tag|> -> anchor{ "hdp::package::${name}::end": }
   anchor{ "hdp::package::${name}::begin": } -> Package<|tag == $tag|> -> anchor{ "hdp::package::${name}::end": }
   
   

+ 192 - 16
ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp

@@ -21,6 +21,9 @@
 class hdp::params()
 class hdp::params()
 {
 {
 
 
+  ##Constants##
+  $NOTHING='NOTHING'
+
   ##### global state defaults ####
   ##### global state defaults ####
   $cluster_service_state = hdp_default("cluster_service_state","running")
   $cluster_service_state = hdp_default("cluster_service_state","running")
   $cluster_client_state = hdp_default("cluster_client_state","installed_and_configured")
   $cluster_client_state = hdp_default("cluster_client_state","installed_and_configured")
@@ -44,7 +47,7 @@ class hdp::params()
 
 
   $hive_server_host = hdp_default("hive_server_host", "")
   $hive_server_host = hdp_default("hive_server_host", "")
   $oozie_server =  hdp_default("oozie_server", "")
   $oozie_server =  hdp_default("oozie_server", "")
-  $templeton_server_host = hdp_default("templeton_server_host", "")
+  $webhcat_server_host = hdp_default("webhcat_server_host", "")
   $gateway_host = hdp_default("gateway_host")
   $gateway_host = hdp_default("gateway_host")
   
   
   $nagios_server_host = hdp_default("nagios_server_host")
   $nagios_server_host = hdp_default("nagios_server_host")
@@ -54,6 +57,8 @@ class hdp::params()
 
 
   $hdp_os = $::operatingsystem
   $hdp_os = $::operatingsystem
   $hdp_os_version = $::operatingsystemrelease
   $hdp_os_version = $::operatingsystemrelease
+
+  
   case $::operatingsystem {
   case $::operatingsystem {
     centos: {
     centos: {
       case $::operatingsystemrelease {
       case $::operatingsystemrelease {
@@ -63,15 +68,19 @@ class hdp::params()
     }
     }
     redhat: {
     redhat: {
       case $::operatingsystemrelease {
       case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "rhel5" }
-        /^6\..+$/: { $hdp_os_type = "rhel6" }
+        /^5\..+$/: { $hdp_os_type = "redhat5" }
+        /^6\..+$/: { $hdp_os_type = "redhat6" }
       }
       }
     }
     }
     suse: {
     suse: {
       $hdp_os_type = "suse"
       $hdp_os_type = "suse"
     }
     }
+    SLES: {
+      $hdp_os_type = "suse"
+    }
+
     default: {
     default: {
-      hdp_fail("No support for os  ${hdp_os} ${hdp_os_version}")
+      hdp_fail("No support for os $::operatingsystem  ${hdp_os} ${hdp_os_version}")
     }
     }
   }
   }
 
 
@@ -86,7 +95,7 @@ class hdp::params()
     $public_dashboard_host = hdp_host_attribute($hostAttributes,"publicfqdn",$dashboard_host)
     $public_dashboard_host = hdp_host_attribute($hostAttributes,"publicfqdn",$dashboard_host)
     $public_hive_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hive_server_host)
     $public_hive_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hive_server_host)
     $public_oozie_server = hdp_host_attribute($hostAttributes,"publicfqdn",$oozie_server)
     $public_oozie_server = hdp_host_attribute($hostAttributes,"publicfqdn",$oozie_server)
-    $public_templeton_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$templeton_server_host)
+    $public_webhcat_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$webhcat_server_host)
   } else {
   } else {
     $public_namenode_host = hdp_default("namenode_host")
     $public_namenode_host = hdp_default("namenode_host")
     $public_snamenode_host = hdp_default("snamenode_host")
     $public_snamenode_host = hdp_default("snamenode_host")
@@ -98,7 +107,7 @@ class hdp::params()
     $public_dashboard_host = hdp_default("dashboard_host")
     $public_dashboard_host = hdp_default("dashboard_host")
     $public_hive_server_host = hdp_default("hive_server_host")
     $public_hive_server_host = hdp_default("hive_server_host")
     $public_oozie_server = hdp_default("oozie_server")
     $public_oozie_server = hdp_default("oozie_server")
-    $public_templeton_server_host = hdp_default("templeton_server_host")
+    $public_webhcat_server_host = hdp_default("webhcat_server_host")
   }
   }
 
 
   ############ Hdfs directories
   ############ Hdfs directories
@@ -117,7 +126,7 @@ class hdp::params()
   $hcat_user = hdp_default("hcat_user","hcat")
   $hcat_user = hdp_default("hcat_user","hcat")
 
 
   $oozie_user = hdp_default("oozie_user","oozie")
   $oozie_user = hdp_default("oozie_user","oozie")
-  $templeton_user = hdp_default("templeton_user","templeton")
+  $templeton_user = hdp_default("templeton_user","hcat")
 
 
   $gmetad_user = hdp_default("gmetad_user","nobody")
   $gmetad_user = hdp_default("gmetad_user","nobody")
   $gmond_user = hdp_default("gmond_user","nobody")
   $gmond_user = hdp_default("gmond_user","nobody")
@@ -194,7 +203,7 @@ class hdp::params()
   $gpl_artifacts_download_url = hdp_default("gpl_artifacts_download_url","") 
   $gpl_artifacts_download_url = hdp_default("gpl_artifacts_download_url","") 
 
 
   ### related to package resources  
   ### related to package resources  
- 
+  #TODO: delete variable $package_names
   $package_names = {
   $package_names = {
    # hadoop => {
    # hadoop => {
    #   32 => 'hadoop.i386',
    #   32 => 'hadoop.i386',
@@ -247,7 +256,7 @@ class hdp::params()
       64 => 'php-pecl-json.x86_64'
       64 => 'php-pecl-json.x86_64'
     },
     },
     snmp => {
     snmp => {
-      64 => ['net-snmp','net-snmp-utils'],
+      64 => ['net-snmp'],
     },
     },
     dashboard => {
     dashboard => {
       64 => 'hdp_mon_dashboard'
       64 => 'hdp_mon_dashboard'
@@ -255,15 +264,15 @@ class hdp::params()
     # sqoop => {
     # sqoop => {
     #   32 => 'sqoop-1.4.1-1.noarch'
     #   32 => 'sqoop-1.4.1-1.noarch'
     #},
     #},
-    templeton => {
-       32 => 'templeton',
-       64 => 'templeton'
+    webhcat => {
+       32 => 'hcatalog',
+       64 => 'hcatalog'
     },
     },
     oozie-client => {
     oozie-client => {
-      64 => 'oozie-client.noarch'
+      64 => 'oozie-client'
     },
     },
     oozie-server => {
     oozie-server => {
-      64 => 'oozie.noarch'
+      64 => 'oozie'
     },
     },
     lzo-rhel5 => {
     lzo-rhel5 => {
       32 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386'],
       32 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386'],
@@ -292,7 +301,13 @@ class hdp::params()
     },
     },
     templeton-tar-pig => {
     templeton-tar-pig => {
       64 => ['templeton-tar-pig-0.0.1.14-1']
       64 => ['templeton-tar-pig-0.0.1.14-1']
-    }
+    },
+    rrdtool-python => {
+      64 => ['python-rrdtool.x86_64']
+    },
+    ambari-log4j => {
+      64 => ['ambari-log4j']
+    } 
   }
   }
   $packages = 'bigtop' 
   $packages = 'bigtop' 
   if ($packages == 'hdp') {
   if ($packages == 'hdp') {
@@ -355,13 +370,174 @@ class hdp::params()
     $hcat_mysql_host = hdp_default("hive_mysql_host")
     $hcat_mysql_host = hdp_default("hive_mysql_host")
 
 
 
 
+
+    $pathes = {
+      nagios_p1_pl => {
+      'ALL' => '/usr/bin/p1.pl',
+      suse => '/usr/lib/nagios/p1.pl'
+      }
+    }
+
+    $services_names = {
+      mysql => {
+        'ALL' => 'mysqld',
+        suse => 'mysql'},
+      httpd => {  
+      'ALL' => 'httpd',
+      suse => 'apache2'}
+    }
+
+    $cmds = {
+    htpasswd => {
+    'ALL' => 'htpasswd',
+     suse => 'htpasswd2'} 
+
+    }
+
+    $alt_package_names = 
+{
+  snmp => 
+    { 64 => {suse =>['net-snmp'],
+             'ALL' => ['net-snmp', 'net-snmp-utils']}
+    },
+
+  oozie-server => 
+    {
+      64 => {'ALL' => 'oozie.noarch'}
+    },
+
+
+    snappy => {
+      64 => {'ALL' => ['snappy','snappy-devel']}
+    },
+
+
+    hadoop => {
+      32 => {'ALL' => ['hadoop','hadoop-libhdfs.i386','hadoop-native.i386','hadoop-pipes.i386','hadoop-sbin.i386','hadoop-lzo', 'hadoop-lzo-native.i386']},
+      64 => {'ALL' =>['hadoop','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']}
+    },
+
+    glibc=> {
+      'ALL' => {'ALL' => ['glibc','glibc.i686'],
+                suse => ['glibc']},
+    },
+
+    zookeeper=> {
+      64 => {'ALL' => 'zookeeper'},
+    },
+    hbase=> {
+      64 => {'ALL' => 'hbase'},
+    },
+
+    pig=> {
+      'ALL' => {'ALL'=>['pig.noarch']}
+    },
+
+    sqoop=> {
+      'ALL' =>{'ALL' => ['sqoop']}
+    },
+
+    mysql-connector-java=> {
+      'ALL' =>{'ALL' => ['mysql-connector-java']}
+    },
+    oozie-client=> {
+      '64' =>{'ALL' => ['oozie-client.noarch']}
+    },
+    extjs=> {
+      64 =>{'ALL' => ['extjs-2.2-1']}
+    },
+    hive=> {
+      64 =>{'ALL' => ['hive']}
+    },
+    hcat=> {
+      'ALL' =>{'ALL' => ['hcatalog']}
+    },
+
+    mysql => {
+      64 =>  {'ALL' => ['mysql','mysql-server'],
+              suse => ['mysql-client','mysql']}
+    },
+    webhcat => {
+      'ALL' => {'ALL' => 'hcatalog'}
+    },
+    webhcat-tar-hive => {
+      64 => {'ALL' => 'webhcat-tar-hive'}
+    },
+    webhcat-tar-pig => {
+      64 => {'ALL' =>'webhcat-tar-pig'}
+    },
+    dashboard => {
+      64 => {'ALL' => 'hdp_mon_dashboard'}
+    },
+
+    nagios-server => {
+      64 => {'ALL' => 'nagios-3.2.3'}
+    },
+
+    nagios-fping => {
+      64 =>{'ALL' => 'fping'}
+    },
+
+    nagios-plugins => {
+      64 => {'ALL' => 'nagios-plugins-1.4.9'}
+    },
+
+    nagios-addons => {
+      64 => {'ALL' => 'hdp_mon_nagios_addons'}
+    },
+    nagios-php-pecl-json => {
+      64 => {'ALL' => $NOTHING,
+             suse => 'php5-json',
+             centos6 => $NOTHING,
+             rhel6 => $NOTHING}
+    },
+
+    ganglia-server => {
+      64 => {'ALL' => 'ganglia-gmetad-3.2.0'}
+    },
+
+    ganglia-gweb => {
+      64 => {'ALL' => 'gweb'}
+    },
+
+    ganglia-hdp-gweb-addons => {
+      64 => {'ALL' => 'hdp_mon_ganglia_addons'}
+    },
+
+    ganglia-monitor => {
+      64 => {'ALL' =>'ganglia-gmond-3.2.0'}
+    },
+	
+    rrdtool-python => {
+      64 => {'ALL' =>'python-rrdtool.x86_64'}
+    },
+    ambari-log4j => {
+      64 => {'ALL' =>'ambari-log4j'}
+    },
+    httpd => {
+      64 => {'ALL' =>'httpd',
+        suse => ['apache2', 'apache2-mod_php5']}
+    }
+
+	
+}
+
   $repos_paths = 
   $repos_paths = 
   {
   {
     centos6 => '/etc/yum.repos.d',
     centos6 => '/etc/yum.repos.d',
-    suse => '/etc/zypp/repos.d'
+    centos5 => '/etc/yum.repos.d',
+    suse => '/etc/zypp/repos.d',
+    redhat6 => '/etc/yum.repos.d',
+    redhat5 => '/etc/yum.repos.d'
   }
   }
 
 
+  $rrd_py_path = '/var/www/cgi-bin'
+
+
+
+
   }
   }
+
  
  
 ###### snmp
 ###### snmp
 
 

+ 102 - 23
ambari-agent/src/main/python/ambari_agent/ActionQueue.py

@@ -24,9 +24,9 @@ import logging.handlers
 import Queue
 import Queue
 import threading
 import threading
 import AmbariConfig
 import AmbariConfig
+from LiveStatus import LiveStatus
 from shell import shellRunner
 from shell import shellRunner
 from FileUtil import writeFile, createStructure, deleteStructure, getFilePath, appendToFile
 from FileUtil import writeFile, createStructure, deleteStructure, getFilePath, appendToFile
-from shell import shellRunner
 import json
 import json
 import pprint
 import pprint
 import os
 import os
@@ -34,6 +34,8 @@ import time
 import subprocess
 import subprocess
 import copy
 import copy
 import puppetExecutor
 import puppetExecutor
+import tempfile
+from Grep import Grep
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 installScriptHash = -1
 installScriptHash = -1
@@ -41,10 +43,14 @@ installScriptHash = -1
 class ActionQueue(threading.Thread):
 class ActionQueue(threading.Thread):
   """ Action Queue for the agent. We pick one command at a time from the queue
   """ Action Queue for the agent. We pick one command at a time from the queue
   and execute that """
   and execute that """
-  global commandQueue, resultQueue
+  global commandQueue, resultQueue #, STATUS_COMMAND, EXECUTION_COMMAND
   commandQueue = Queue.Queue()
   commandQueue = Queue.Queue()
   resultQueue = Queue.Queue()
   resultQueue = Queue.Queue()
- 
+
+  STATUS_COMMAND='STATUS_COMMAND'
+  EXECUTION_COMMAND='EXECUTION_COMMAND'
+  IDLE_SLEEP_TIME = 5
+
   def __init__(self, config):
   def __init__(self, config):
     super(ActionQueue, self).__init__()
     super(ActionQueue, self).__init__()
     #threading.Thread.__init__(self)
     #threading.Thread.__init__(self)
@@ -56,7 +62,10 @@ class ActionQueue(threading.Thread):
     self.executor = puppetExecutor.puppetExecutor(config.get('puppet', 'puppetmodules'),
     self.executor = puppetExecutor.puppetExecutor(config.get('puppet', 'puppetmodules'),
                                    config.get('puppet', 'puppet_home'),
                                    config.get('puppet', 'puppet_home'),
                                    config.get('puppet', 'facter_home'),
                                    config.get('puppet', 'facter_home'),
-                                   config.get('agent', 'prefix'))
+                                   config.get('agent', 'prefix'), config)
+    self.tmpdir = config.get('agent', 'prefix')
+    self.commandInProgress = None
+
   def stop(self):
   def stop(self):
     self._stop.set()
     self._stop.set()
 
 
@@ -68,37 +77,92 @@ class ActionQueue(threading.Thread):
     return self.sh
     return self.sh
 
 
   def put(self, command):
   def put(self, command):
-    logger.info("The command from the server is \n" + pprint.pformat(command))
+    logger.info("The " + command['commandType'] + " from the server is \n" + pprint.pformat(command))
     commandQueue.put(command)
     commandQueue.put(command)
     pass
     pass
 
 
+  def getCommandQueue(self):
+    """ For Testing purpose only."""
+    return commandQueue
+
   def run(self):
   def run(self):
     result = []
     result = []
     while not self.stopped():
     while not self.stopped():
       while not commandQueue.empty():
       while not commandQueue.empty():
         command = commandQueue.get()
         command = commandQueue.get()
-        try:
-          #pass a copy of action since we don't want anything to change in the 
-          #action dict 
-          commandCopy = copy.copy(command)
-          result = self.executeCommand(commandCopy)
-          
-        except Exception, err:
-          traceback.print_exc()  
-          logger.warn(err)
+        logger.info("Took an element of Queue: " + pprint.pformat(command))
+        if command['commandType'] == self.EXECUTION_COMMAND:
+          try:
+            #pass a copy of action since we don't want anything to change in the
+            #action dict
+            result = self.executeCommand(command)
+
+          except Exception, err:
+            traceback.print_exc()
+            logger.warn(err)
+            pass
+
+          for entry in result:
+            resultQueue.put((ActionQueue.EXECUTION_COMMAND, entry))
           pass
           pass
-        
-        for entry in result:
-          resultQueue.put(entry)
-        pass
+        elif command['commandType'] == self.STATUS_COMMAND:
+          cluster = command['clusterName']
+          service = command['serviceName']
+          component = command['componentName']
+          try:
+            livestatus = LiveStatus(cluster, service, component)
+            result = livestatus.build()
+            logger.info("Got live status for component " + component + " of service " + str(service) +\
+                        " of cluster " + str(cluster) + "\n" + pprint.pformat(result))
+            if result is not None:
+              resultQueue.put((ActionQueue.STATUS_COMMAND, result))
+          except Exception, err:
+            traceback.print_exc()
+            logger.warn(err)
+            pass
+        else:
+          logger.warn("Unrecognized command " + pprint.pformat(result))
       if not self.stopped():
       if not self.stopped():
-        time.sleep(5)
+        time.sleep(self.IDLE_SLEEP_TIME)
 
 
   # Store action result to agent response queue
   # Store action result to agent response queue
   def result(self):
   def result(self):
-    result = []
+    resultReports = []
+    resultComponentStatus = []
     while not resultQueue.empty():
     while not resultQueue.empty():
-      result.append(resultQueue.get())
+      res = resultQueue.get()
+      if res[0] == ActionQueue.EXECUTION_COMMAND:
+        resultReports.append(res[1])
+      elif res[0] == ActionQueue.STATUS_COMMAND:
+        resultComponentStatus.append(res[1])
+
+    # Building report for command in progress
+    if self.commandInProgress is not None:
+      try:
+        tmpout= open(self.commandInProgress['tmpout'], 'r').read()
+        tmperr= open(self.commandInProgress['tmperr'], 'r').read()
+      except Exception, err:
+        logger.warn(err)
+        tmpout='...'
+        tmperr='...'
+      grep = Grep()
+      output = grep.tail(tmpout, puppetExecutor.puppetExecutor.OUTPUT_LAST_LINES)
+      inprogress = {
+        'role' : self.commandInProgress['role'],
+        'actionId' : self.commandInProgress['actionId'],
+        'taskId' : self.commandInProgress['taskId'],
+        'stdout' : grep.filterMarkup(output),
+        'clusterName' : self.commandInProgress['clusterName'],
+        'stderr' : tmperr,
+        'exitCode' : 777,
+        'serviceName' : self.commandInProgress['serviceName'],
+        'status' : 'IN_PROGRESS'
+      }
+      resultReports.append(inprogress)
+    result={
+      'reports' : resultReports,
+      'componentStatus' : resultComponentStatus
+    }
     return result
     return result
 
 
   def registerCommand(self, command):
   def registerCommand(self, command):
@@ -118,9 +182,24 @@ class ActionQueue(threading.Thread):
     serviceName = command['serviceName']
     serviceName = command['serviceName']
     configurations = command['configurations']
     configurations = command['configurations']
     result = []
     result = []
-    commandresult = self.executor.runCommand(command)
+
+    taskId = command['taskId']
+    # Preparing 'IN_PROGRESS' report
+    self.commandInProgress = {
+      'role' : command['role'],
+      'actionId' : commandId,
+      'taskId' : taskId,
+      'clusterName' : clusterName,
+      'serviceName' : serviceName,
+      'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
+      'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt'
+    }
+    # running command
+    commandresult = self.executor.runCommand(command, self.commandInProgress['tmpout'], self.commandInProgress['tmperr'])
+    # dumping results
+    self.commandInProgress = None
     status = "COMPLETED"
     status = "COMPLETED"
-    if (commandresult['exitcode'] != 0):
+    if commandresult['exitcode'] != 0:
       status = "FAILED"
       status = "FAILED"
       
       
     # assume some puppet pluing to run these commands
     # assume some puppet pluing to run these commands

+ 93 - 32
ambari-agent/src/main/python/ambari_agent/Controller.py

@@ -32,6 +32,7 @@ import httplib
 import ssl
 import ssl
 import AmbariConfig
 import AmbariConfig
 import pprint
 import pprint
+import ProcessHelper
 from Heartbeat import Heartbeat
 from Heartbeat import Heartbeat
 from Register import Register
 from Register import Register
 from ActionQueue import ActionQueue
 from ActionQueue import ActionQueue
@@ -39,13 +40,13 @@ from optparse import OptionParser
 from wsgiref.simple_server import ServerHandler
 from wsgiref.simple_server import ServerHandler
 import security
 import security
 from NetUtil import NetUtil
 from NetUtil import NetUtil
+from random import randrange, randint
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
-
 class Controller(threading.Thread):
 class Controller(threading.Thread):
 
 
-  def __init__(self, config):
+  def __init__(self, config, range=120):
     threading.Thread.__init__(self)
     threading.Thread.__init__(self)
     logger.debug('Initializing Controller RPC thread.')
     logger.debug('Initializing Controller RPC thread.')
     self.lock = threading.Lock()
     self.lock = threading.Lock()
@@ -54,10 +55,14 @@ class Controller(threading.Thread):
     self.config = config
     self.config = config
     self.hostname = socket.gethostname()
     self.hostname = socket.gethostname()
     server_secured_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'secured_url_port')
     server_secured_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'secured_url_port')
-    self.registerUrl = server_secured_url + '/agent/register/' + self.hostname
-    self.heartbeatUrl = server_secured_url + '/agent/heartbeat/' + self.hostname
+    self.registerUrl = server_secured_url + '/agent/v1/register/' + self.hostname
+    self.heartbeatUrl = server_secured_url + '/agent/v1/heartbeat/' + self.hostname
     self.netutil = NetUtil()
     self.netutil = NetUtil()
-     
+    self.responseId = -1
+    self.repeatRegistration = False
+    self.cachedconnect = None
+    self.range = range
+
   def start(self):
   def start(self):
     self.actionQueue = ActionQueue(self.config)
     self.actionQueue = ActionQueue(self.config)
     self.actionQueue.start()
     self.actionQueue.start()
@@ -75,21 +80,26 @@ class Controller(threading.Thread):
     registered=False
     registered=False
     id = -1
     id = -1
     ret = {}
     ret = {}
-    while registered == False:
+
+    while not registered:
       try:
       try:
         data = json.dumps(self.register.build(id))
         data = json.dumps(self.register.build(id))
         logger.info("Registering with the server " + pprint.pformat(data))
         logger.info("Registering with the server " + pprint.pformat(data))
-        req = urllib2.Request(self.registerUrl, data, {'Content-Type': 
-                                                      'application/json'})
-        stream = security.secured_url_open(req)
-        response = stream.read()
-        stream.close()
+        response = self.sendRequest(self.registerUrl, data)
         ret = json.loads(response)
         ret = json.loads(response)
+
         logger.info("Registered with the server with " + pprint.pformat(ret))
         logger.info("Registered with the server with " + pprint.pformat(ret))
+        print("Registered with the server")
+        self.responseId= int(ret['responseId'])
         registered = True
         registered = True
+        if 'statusCommands' in ret.keys():
+          logger.info("Got status commands on registration " + pprint.pformat(ret['statusCommands']) )
+          self.addToQueue(ret['statusCommands'])
+          pass
         pass
         pass
       except Exception, err:
       except Exception, err:
-        delay = self.netutil.CONNECT_SERVER_RETRY_INTERVAL_SEC
+        # try a reconnect only after a certain amount of random time
+        delay = randint(0, self.range)
         logger.info("Unable to connect to: " + self.registerUrl, exc_info = True)
         logger.info("Unable to connect to: " + self.registerUrl, exc_info = True)
         """ Sleeping for {0} seconds and then retrying again """.format(delay)
         """ Sleeping for {0} seconds and then retrying again """.format(delay)
         time.sleep(delay)
         time.sleep(delay)
@@ -100,15 +110,15 @@ class Controller(threading.Thread):
   
   
   def addToQueue(self, commands):
   def addToQueue(self, commands):
     """Add to the queue for running the commands """
     """Add to the queue for running the commands """
-    """ Put the required actions into the Queue """ 
+    """ Put the required actions into the Queue """
     """ Verify if the action is to reboot or not """
     """ Verify if the action is to reboot or not """
     if not commands:
     if not commands:
-      logger.info("No commands from the server.")
+      logger.info("No commands from the server : " + pprint.pformat(commands))
     else:
     else:
       """Only add to the queue if not empty list """
       """Only add to the queue if not empty list """
       for command in commands:
       for command in commands:
         logger.info("Adding command to the action queue: \n" +
         logger.info("Adding command to the action queue: \n" +
-                     pprint.pformat(command)) 
+                     pprint.pformat(command))
         self.actionQueue.put(command)
         self.actionQueue.put(command)
         pass
         pass
       pass
       pass
@@ -123,43 +133,73 @@ class Controller(threading.Thread):
     self.DEBUG_HEARTBEAT_RETRIES = 0
     self.DEBUG_HEARTBEAT_RETRIES = 0
     self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
     self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
     retry = False
     retry = False
+    certVerifFailed = False
+
     #TODO make sure the response id is monotonically increasing
     #TODO make sure the response id is monotonically increasing
     id = 0
     id = 0
-    while True:
+    while not self.DEBUG_STOP_HEARTBITTING:
       try:
       try:
-        if self.DEBUG_STOP_HEARTBITTING:
-          return
-
         if not retry:
         if not retry:
-          data = json.dumps(self.heartbeat.build(id))
+          data = json.dumps(self.heartbeat.build(self.responseId))
           pass
           pass
         else:
         else:
           self.DEBUG_HEARTBEAT_RETRIES += 1
           self.DEBUG_HEARTBEAT_RETRIES += 1
-
-        req = urllib2.Request(self.heartbeatUrl, data, {'Content-Type':
-                                                        'application/json'})
-        f = security.secured_url_open(req)
-        response = f.read()
-        f.close()
+        response = self.sendRequest(self.heartbeatUrl, data)
         response = json.loads(response)
         response = json.loads(response)
-        id=int(response['responseId'])
+
+        logger.info('Got server response: ' + pprint.pformat(response))
         
         
+        serverId=int(response['responseId'])
+
+        if 'registrationCommand' in response.keys():
+          # check if the registration command is None. If none skip
+          if response['registrationCommand'] is not None:
+            logger.info("RegistrationCommand received - repeat agent registration")
+            self.repeatRegistration = True
+            return
+
+        if serverId!=self.responseId+1:
+          logger.error("Error in responseId sequence - restarting")
+          self.restartAgent()
+        else:
+          self.responseId=serverId
+
         if 'executionCommands' in response.keys():
         if 'executionCommands' in response.keys():
           self.addToQueue(response['executionCommands'])
           self.addToQueue(response['executionCommands'])
           pass
           pass
+        if 'statusCommands' in response.keys():
+          self.addToQueue(response['statusCommands'])
+          pass
+        if "true" == response['restartAgent']:
+          logger.error("Got restartAgent command")
+          self.restartAgent()
         else:
         else:
           logger.info("No commands sent from the Server.")
           logger.info("No commands sent from the Server.")
           pass
           pass
+
+        if retry:
+          print("Reconnected to the server")
+          logger.info("Reconnected to the server")
         retry=False
         retry=False
+        certVerifFailed = False
         self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
         self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
         self.DEBUG_HEARTBEAT_RETRIES = 0
         self.DEBUG_HEARTBEAT_RETRIES = 0
       except Exception, err:
       except Exception, err:
-        retry=True
+        #randomize the heartbeat
+        delay = randint(0, self.range)
+        time.sleep(delay)
         if "code" in err:
         if "code" in err:
           logger.error(err.code)
           logger.error(err.code)
         else:
         else:
-          logger.error("Unable to connect to: "+ 
-                       self.heartbeatUrl,exc_info=True)
+          logger.error("Unable to connect to: " + self.heartbeatUrl + " due to " + str(err))
+          logger.debug("Details: " + str(err), exc_info=True)
+          if not retry:
+            print("Connection to the server was lost. Reconnecting...")
+          if 'certificate verify failed' in str(err) and not certVerifFailed:
+            print("Server certificate verify failed. Did you regenerate server certificate?")
+            certVerifFailed = True
+        self.cachedconnect = None # Previous connection is broken now
+        retry=True
       if self.actionQueue.isIdle():
       if self.actionQueue.isIdle():
         time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
         time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
       else:
       else:
@@ -169,13 +209,34 @@ class Controller(threading.Thread):
   def run(self):
   def run(self):
     opener = urllib2.build_opener()
     opener = urllib2.build_opener()
     urllib2.install_opener(opener)
     urllib2.install_opener(opener)
-    
+
+    while True:
+      self.repeatRegistration = False
+      self.registerAndHeartbeat()
+      if not self.repeatRegistration:
+        break
+
+    pass
+
+  def registerAndHeartbeat(self):
     registerResponse = self.registerWithServer()
     registerResponse = self.registerWithServer()
     message = registerResponse['response']
     message = registerResponse['response']
     logger.info("Response from server = " + message)
     logger.info("Response from server = " + message)
+    time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
     self.heartbeatWithServer()
     self.heartbeatWithServer()
+
+  def restartAgent(self):
+    #stopping for now, restart will be added later
+    ProcessHelper.stopAgent()
     pass
     pass
-    
+
+  def sendRequest(self, url, data):
+    if self.cachedconnect is None: # Lazy initialization
+      self.cachedconnect = security.CachedHTTPSConnection(self.config)
+    req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
+    response = self.cachedconnect.request(req)
+    return response
+
 def main(argv=None):
 def main(argv=None):
   # Allow Ctrl-C
   # Allow Ctrl-C
   signal.signal(signal.SIGINT, signal.SIG_DFL)
   signal.signal(signal.SIGINT, signal.SIG_DFL)

+ 22 - 8
ambari-agent/src/main/python/ambari_agent/Grep.py

@@ -1,3 +1,5 @@
+import re
+
 class Grep:
 class Grep:
 
 
   def __init__(self):
   def __init__(self):
@@ -12,21 +14,22 @@ class Grep:
     """
     """
     stripped_string = string.strip()
     stripped_string = string.strip()
     lines = stripped_string.splitlines(True)
     lines = stripped_string.splitlines(True)
-    last_occurence = None
+    first_occurence = None
     for index in range(len(lines)):
     for index in range(len(lines)):
       line = lines[index]
       line = lines[index]
       if phrase.lower() in line.lower():
       if phrase.lower() in line.lower():
-        last_occurence = index
-    if last_occurence is None:
+        first_occurence = index
+        break
+    if first_occurence is None:
       return None
       return None
     bound_a = before
     bound_a = before
-    if last_occurence < before:
-      bound_a = last_occurence
+    if first_occurence < before:
+      bound_a = first_occurence
     result = None
     result = None
-    if (len(lines) - last_occurence) < after:
-      result = lines[last_occurence - bound_a :]
+    if (len(lines) - first_occurence) < after:
+      result = lines[first_occurence - bound_a :]
     else:
     else:
-      result = lines[last_occurence - bound_a : last_occurence + after + 1]
+      result = lines[first_occurence - bound_a : first_occurence + after + 1]
     return "".join(result).strip()
     return "".join(result).strip()
 
 
 
 
@@ -42,3 +45,14 @@ class Grep:
       length = len(lines)
       length = len(lines)
       tailed = lines[length - n:]
       tailed = lines[length - n:]
       return "".join(tailed)
       return "".join(tailed)
+
+  def filterMarkup(self, string):
+    """
+    Filters given string from puppet colour markup done using escape codes like 
+    """
+    if string is None:
+      result = None
+    else:
+      regexp = "\x1b" + r"\[[\d;]{1,4}m"
+      result = re.sub(regexp, '', string)
+    return result

+ 26 - 6
ambari-agent/src/main/python/ambari_agent/Hardware.py

@@ -44,20 +44,21 @@ class Hardware:
     platforms. Note that this parser ignores any filesystems with spaces 
     platforms. Note that this parser ignores any filesystems with spaces 
     and any mounts with spaces. """
     and any mounts with spaces. """
     mounts = []
     mounts = []
-    df = subprocess.Popen(["df", "-kP"], stdout=subprocess.PIPE)
+    df = subprocess.Popen(["df", "-kPT"], stdout=subprocess.PIPE)
     dfdata = df.communicate()[0]
     dfdata = df.communicate()[0]
     lines = dfdata.splitlines()
     lines = dfdata.splitlines()
     for l in lines:
     for l in lines:
       split = l.split()
       split = l.split()
       """ this ignores any spaces in the filesystemname and mounts """
       """ this ignores any spaces in the filesystemname and mounts """
-      if (len(split)) == 6:
-        device, size, used, available, percent, mountpoint = split
+      if (len(split)) == 7:
+        device, type, size, used, available, percent, mountpoint = split
         mountinfo = { 
         mountinfo = { 
                      'size' : size,
                      'size' : size,
                      'used' : used,
                      'used' : used,
                      'available' : available,
                      'available' : available,
                      'percent' : percent,
                      'percent' : percent,
                      'mountpoint' : mountpoint,
                      'mountpoint' : mountpoint,
+                     'type': type,
                      'device' : device }
                      'device' : device }
 
 
         mounts.append(mountinfo)
         mounts.append(mountinfo)
@@ -66,13 +67,29 @@ class Hardware:
     return mounts
     return mounts
     
     
   def facterBin(self, facterHome):
   def facterBin(self, facterHome):
-    return facterHome + "/bin/facter"
+    facterBin = facterHome + "/bin/facter"
+    if (os.path.exists(facterBin)):
+      return facterBin
+    else:
+      return "facter"
     pass
     pass
   
   
   def facterLib(self, facterHome):
   def facterLib(self, facterHome):
     return facterHome + "/lib/"
     return facterHome + "/lib/"
     pass
     pass
   
   
+  def configureEnviron(self, environ):
+    if not AmbariConfig.config.has_option("puppet", "ruby_home"):
+      return environ
+    ruby_home = AmbariConfig.config.get("puppet", "ruby_home")
+    if os.path.exists(ruby_home):
+      """Only update ruby home if the config is configured"""
+      path = os.environ["PATH"]
+      if not ruby_home in path:
+        environ["PATH"] = ruby_home + os.path.sep + "bin"  + ":"+environ["PATH"] 
+      environ["MY_RUBY_HOME"] = ruby_home
+    return environ
+    
   def parseFacterOutput(self, facterOutput):
   def parseFacterOutput(self, facterOutput):
     retDict = {}
     retDict = {}
     allLines = facterOutput.splitlines()
     allLines = facterOutput.splitlines()
@@ -117,9 +134,12 @@ class Hardware:
         rubyLib = ""
         rubyLib = ""
         if os.environ.has_key("RUBYLIB"):
         if os.environ.has_key("RUBYLIB"):
           rubyLib = os.environ["RUBYLIB"]
           rubyLib = os.environ["RUBYLIB"]
-          logger.info("Ruby Lib env from Env " + rubyLib)
-        rubyLib = rubyLib + ":" + self.facterLib(facterHome)
+          logger.info("RUBYLIB from Env " + rubyLib)
+        if not (self.facterLib(facterHome) in rubyLib):
+          rubyLib = rubyLib + ":" + self.facterLib(facterHome)
+        
         facterEnv["RUBYLIB"] = rubyLib
         facterEnv["RUBYLIB"] = rubyLib
+        facterEnv = self.configureEnviron(facterEnv)
         logger.info("Setting RUBYLIB as: " + rubyLib)
         logger.info("Setting RUBYLIB as: " + rubyLib)
         facter = subprocess.Popen([self.facterBin(facterHome)],
         facter = subprocess.Popen([self.facterBin(facterHome)],
                                   stdout=subprocess.PIPE,
                                   stdout=subprocess.PIPE,

+ 5 - 35
ambari-agent/src/main/python/ambari_agent/Heartbeat.py

@@ -23,7 +23,6 @@ import logging
 from Hardware import Hardware
 from Hardware import Hardware
 from ActionQueue import ActionQueue
 from ActionQueue import ActionQueue
 from ServerStatus import ServerStatus
 from ServerStatus import ServerStatus
-from StatusCheck import StatusCheck
 import AmbariConfig
 import AmbariConfig
 import socket
 import socket
 import time
 import time
@@ -32,18 +31,6 @@ from pprint import pprint, pformat
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
-COMPONENTS = [
-               {"serviceName" : "HDFS",
-                "componentName" : "DATANODE"},
-               {"serviceName" : "HDFS",
-                "componentName" : "NAMENODE"},
-               {"serviceName" : "HDFS",
-                "componentName" : "SECONDARYNAMENODE"}
-]
-
-LIVE_STATUS = "LIVE"
-DEAD_STATUS = "DEAD"
-
 firstContact = True
 firstContact = True
 class Heartbeat:
 class Heartbeat:
 
 
@@ -53,40 +40,23 @@ class Heartbeat:
 
 
   def build(self, id='-1'):
   def build(self, id='-1'):
     global clusterId, clusterDefinitionRevision, firstContact
     global clusterId, clusterDefinitionRevision, firstContact
-    serverStatus = ServerStatus()
     timestamp = int(time.time()*1000)
     timestamp = int(time.time()*1000)
     queueResult = self.actionQueue.result()
     queueResult = self.actionQueue.result()
-    installedRoleStates = serverStatus.build()
-    pidLookupPath = AmbariConfig.config.get('services','pidLookupPath')
-    serviceToPidMapFile = AmbariConfig.config.get('services','serviceToPidMapFile')
-    statusCheck = StatusCheck(pidLookupPath, serviceToPidMapFile)
-    servicesStatusesDict = {}
-    componentStatus = []
-    for component in COMPONENTS:
-      serviceStatus = statusCheck.getStatus(component["componentName"])
-      if serviceStatus == None:
-        logger.warn("There is no service to pid mapping for " + component["componentName"])
-      status = LIVE_STATUS if serviceStatus else DEAD_STATUS 
-      componentStatus.append({"componentName" : component["componentName"],
-                                   "msg" : "",
-                                   "status" : status,
-                                   "serviceName" : component["serviceName"],
-                                   "clusterName" : ""})
-     
+
     
     
     nodeStatus = { "status" : "HEALTHY",
     nodeStatus = { "status" : "HEALTHY",
                    "cause" : "NONE"}
                    "cause" : "NONE"}
     
     
     heartbeat = { 'responseId'        : int(id),
     heartbeat = { 'responseId'        : int(id),
                   'timestamp'         : timestamp,
                   'timestamp'         : timestamp,
-                  'hostname'          : socket.gethostname(),
-                  'componentStatus'   : componentStatus,
+                  'hostname'          : socket.getfqdn(),
                   'nodeStatus'        : nodeStatus
                   'nodeStatus'        : nodeStatus
                 }
                 }
     if len(queueResult) != 0:
     if len(queueResult) != 0:
-      heartbeat['reports'] = queueResult
+      heartbeat['reports'] = queueResult['reports']
+      heartbeat['componentStatus'] = queueResult['componentStatus']
       pass
       pass
-    logger.info("Status for node heartbeat: " + pformat(nodeStatus))
+    logger.info("Heartbeat dump: " + pformat(heartbeat))
     return heartbeat
     return heartbeat
 
 
 def main(argv=None):
 def main(argv=None):

+ 134 - 0
ambari-agent/src/main/python/ambari_agent/LiveStatus.py

@@ -0,0 +1,134 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import json
+import logging
+from StatusCheck import StatusCheck
+import AmbariConfig
+import socket
+import time
+import traceback
+from pprint import pprint, pformat
+
+logger = logging.getLogger()
+
+class LiveStatus:
+
+  SERVICES = [
+    "HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
+    "NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
+    "KERBEROS", "TEMPLETON", "HIVE"
+  ]
+
+  COMPONENTS = [
+      {"serviceName" : "HDFS",
+       "componentName" : "DATANODE"},
+      {"serviceName" : "HDFS",
+       "componentName" : "NAMENODE"},
+      {"serviceName" : "HDFS",
+       "componentName" : "SECONDARY_NAMENODE"},
+#      {"serviceName" : "HDFS",
+#       "componentName" : "HDFS_CLIENT"},
+      {"serviceName" : "MAPREDUCE",
+       "componentName" : "JOBTRACKER"},
+      {"serviceName" : "MAPREDUCE",
+       "componentName" : "TASKTRACKER"},
+#      {"serviceName" : "MAPREDUCE",
+#       "componentName" : "MAPREDUCE_CLIENT"},
+      {"serviceName" : "GANGLIA",             #!
+       "componentName" : "GANGLIA_SERVER"},
+      {"serviceName" : "GANGLIA",             #!
+       "componentName" : "GANGLIA_MONITOR"},
+      {"serviceName" : "HBASE",               #!
+       "componentName" : "HBASE_MASTER"},
+      {"serviceName" : "HBASE",              #!
+       "componentName" : "HBASE_REGIONSERVER"},
+#      {"serviceName" : "HBASE",
+#       "componentName" : "HBASE_CLIENT"},
+      {"serviceName" : "NAGIOS",             #!
+       "componentName" : "NAGIOS_SERVER"},
+      {"serviceName" : "ZOOKEEPER",
+       "componentName" : "ZOOKEEPER_SERVER"},
+#      {"serviceName" : "ZOOKEEPER",
+#       "componentName" : "ZOOKEEPER_CLIENT"},
+      {"serviceName" : "OOZIE",
+       "componentName" : "OOZIE_SERVER"},
+#      {"serviceName" : "OOZIE",
+#       "componentName" : "OOZIE_CLIENT"},
+      {"serviceName" : "HCATALOG",            #!
+       "componentName" : "HCATALOG_SERVER"},
+      {"serviceName" : "KERBEROS",
+       "componentName" : "KERBEROS_SERVER"}, #!
+#      {"serviceName" : "TEMPLETON",
+#       "componentName" : "TEMPLETON_SERVER"},
+#      {"serviceName" : "TEMPLETON",
+#       "componentName" : "TEMPLETON_CLIENT"},
+      {"serviceName" : "HIVE",               #!
+       "componentName" : "HIVE_SERVER"},
+      {"serviceName" : "HIVE",               #!
+       "componentName" : "HIVE_METASTORE"},
+      {"serviceName" : "HIVE",               #!
+       "componentName" : "MYSQL_SERVER"},
+  ]
+
+  LIVE_STATUS = "STARTED"
+  DEAD_STATUS = "INSTALLED"
+
+  def __init__(self, cluster, service, component):
+    self.cluster = cluster
+    self.service = service
+    self.component = component
+
+
+  def belongsToService(self, component):
+    #TODO: Should also check belonging of server to cluster
+    return component['serviceName'] == self.service
+
+  # Live status was stripped from heartbeat after revision e1718dd
+  def build(self):
+    global SERVICES, COMPONENTS, LIVE_STATUS, DEAD_STATUS
+    pidLookupPath = AmbariConfig.config.get('services','pidLookupPath')
+    serviceToPidMapFile = AmbariConfig.config.get('services','serviceToPidMapFile')
+    statusCheck = StatusCheck(pidLookupPath, serviceToPidMapFile)
+    livestatus = None
+    for component in self.COMPONENTS:
+      if component["serviceName"] == self.service and component["componentName"] == self.component:
+        serviceStatus = statusCheck.getStatus(component["componentName"])
+        if serviceStatus is None:
+          logger.warn("There is no service to pid mapping for " + component["componentName"])
+        status = self.LIVE_STATUS if serviceStatus else self.DEAD_STATUS
+        livestatus ={"componentName" : component["componentName"],
+                       "msg" : "",
+                       "status" : status,
+                       "clusterName" : self.cluster,
+                       "serviceName" : self.service
+        }
+        break
+    logger.info("The live status for component " + str(self.component) + " of service " + \
+                str(self.service) + " is " + str(livestatus))
+    return livestatus
+
+def main(argv=None):
+  for service in SERVICES:
+    livestatus = LiveStatus('', service)
+    print json.dumps(livestatus.build())
+
+if __name__ == '__main__':
+  main()

+ 22 - 9
ambari-agent/src/main/python/ambari_agent/NetUtil.py

@@ -1,16 +1,22 @@
-from httplib import HTTP
+from httplib import HTTPS
 from urlparse import urlparse
 from urlparse import urlparse
 import time
 import time
+import logging
+import pprint
+import traceback
+import httplib
+
+logger = logging.getLogger()
 
 
 class NetUtil:
 class NetUtil:
 
 
   CONNECT_SERVER_RETRY_INTERVAL_SEC = 10
   CONNECT_SERVER_RETRY_INTERVAL_SEC = 10
-  HEARTBEAT_IDDLE_INTERVAL_SEC = 3
-  HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 1
+  HEARTBEAT_IDDLE_INTERVAL_SEC = 10
+  HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 5
 
 
   # Url within server to request during status check. This url
   # Url within server to request during status check. This url
   # should return HTTP code 200
   # should return HTTP code 200
-  SERVER_STATUS_REQUEST = "{0}/api/check"
+  SERVER_STATUS_REQUEST = "{0}/cert/ca"
 
 
   # For testing purposes
   # For testing purposes
   DEBUG_STOP_RETRIES_FLAG = False
   DEBUG_STOP_RETRIES_FLAG = False
@@ -19,14 +25,19 @@ class NetUtil:
     """Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
     """Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
     (like unreachable server or wrong HTTP code) result will be False
     (like unreachable server or wrong HTTP code) result will be False
     """
     """
+    logger.info("DEBUG:: Connecting to the following url " + url);
     try:
     try:
-      p = urlparse(url)
-      h = HTTP(p[1])
-      h.putrequest('HEAD', p[2])
-      h.endheaders()
-      if h.getreply()[0] == 200: return True
+      parsedurl = urlparse(url)
+      ca_connection = httplib.HTTPSConnection(parsedurl[1])
+      ca_connection.request("GET", parsedurl[2])
+      response = ca_connection.getresponse()  
+      status = response.status    
+      logger.info("DEBUG: Calling url received " + str(status))
+      
+      if status == 200: return True
       else: return False
       else: return False
     except Exception, e:
     except Exception, e:
+      logger.info("Failed to connect to " + str(url) + " due to " + str(e))
       return False
       return False
 
 
   def try_to_connect(self, server_url, max_retries, logger = None):
   def try_to_connect(self, server_url, max_retries, logger = None):
@@ -35,6 +46,8 @@ class NetUtil:
     attempts will be repeated forever until server is not reachable
     attempts will be repeated forever until server is not reachable
     Returns count of retries
     Returns count of retries
     """
     """
+    if logger is not None:
+      logger.info("DEBUG: Trying to connect to the server at " + server_url)
     retries = 0
     retries = 0
     while (max_retries == -1 or retries < max_retries) and not self.DEBUG_STOP_RETRIES_FLAG:
     while (max_retries == -1 or retries < max_retries) and not self.DEBUG_STOP_RETRIES_FLAG:
       server_is_up = self.checkURL(self.SERVER_STATUS_REQUEST.format(server_url))
       server_is_up = self.checkURL(self.SERVER_STATUS_REQUEST.format(server_url))

+ 51 - 0
ambari-agent/src/main/python/ambari_agent/ProcessHelper.py

@@ -0,0 +1,51 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import logging
+import traceback
+from shell import getTempFiles
+
+logger = logging.getLogger()
+
+
+if 'AMBARI_PID_DIR' in os.environ:
+    pidfile = os.environ['AMBARI_PID_DIR'] + "/ambari-agent.pid"
+else:
+    pidfile = "/var/run/ambari-agent/ambari-agent.pid"
+
+
+def stopAgent():
+  try:
+    os.unlink(pidfile)
+  except Exception:
+    logger.warn("Unable to remove: "+pidfile)
+    traceback.print_exc()
+
+  tempFiles = getTempFiles()
+  for tempFile in tempFiles:
+    if os.path.exists(tempFile):
+      try:
+          os.unlink(tempFile)
+      except Exception:
+          traceback.print_exc()
+          logger.warn("Unable to remove: "+tempFile)
+  os._exit(0)
+  pass

+ 63 - 4
ambari-agent/src/main/python/ambari_agent/Register.py

@@ -18,12 +18,15 @@ See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
 
 
+import sys
 import json
 import json
 from Hardware import Hardware
 from Hardware import Hardware
 from ActionQueue import ActionQueue
 from ActionQueue import ActionQueue
 from ServerStatus import ServerStatus
 from ServerStatus import ServerStatus
 import socket
 import socket
 import time
 import time
+import urllib2
+import subprocess
 
 
 
 
 firstContact = True
 firstContact = True
@@ -33,19 +36,75 @@ class Register:
   def __init__(self):
   def __init__(self):
     self.hardware = Hardware()
     self.hardware = Hardware()
 
 
+  def pfqdn(self):
+    try:
+      handle = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-hostname', '', 3)
+      str = handle.read()
+      handle.close()
+      return str
+    except Exception, e:
+      return socket.getfqdn()
+
   def build(self, id='-1'):
   def build(self, id='-1'):
     global clusterId, clusterDefinitionRevision, firstContact
     global clusterId, clusterDefinitionRevision, firstContact
     timestamp = int(time.time()*1000)
     timestamp = int(time.time()*1000)
     register = { 'responseId'        : int(id),
     register = { 'responseId'        : int(id),
                   'timestamp'         : timestamp,
                   'timestamp'         : timestamp,
-                  'hostname'          : socket.gethostname(),
+                  'hostname'          : socket.getfqdn(),
+                  'publicHostname'    : self.pfqdn(),
                   'hardwareProfile'   : self.hardware.get(),
                   'hardwareProfile'   : self.hardware.get(),
                 }
                 }
     return register
     return register
+
+def doExec(vals, key, command, preLF=False):
+  template = "{0}: {1} {2}"
+  try:
+    osStat = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    out, err = osStat.communicate()
+    if 0 != osStat.returncode or 0 == len(out.strip()):
+      print template.format(key, "UNAVAILABLE", "")
+    else:
+      if (preLF):
+        print template.format(key, "ok,\n", out.strip())
+      else:
+        print template.format(key, "ok,", out.strip())
+  except:
+    print template.format(key, "UNAVAILABLE", "")
+  
+
+# Linux only
+def machineInfo():
+  vals = { }
+  doExec(vals, 'hostname', ["hostname", "-f"])
+  doExec(vals, 'ip', ["hostname", "-i"])
+  doExec(vals, 'cpu', ["sh", "-c", "cat /proc/cpuinfo | grep 'model name' | awk -F': ' '{ print $2; }'"])
+  doExec(vals, 'memory', ["sh", "-c", "cat /proc/meminfo | grep MemTotal | awk -F': ' '{ print $2/1024/1024 \" GB\"; }'"])
+  doExec(vals, 'disks', ["df", "-h"], True)
+  doExec(vals, 'os', ["sh", "-c", "cat /etc/issue.net | head -1"])
+  doExec(vals, 'iptables', ["iptables", "-vnL"], True)
+  doExec(vals, 'selinux', ["sh", "-c", "cat /etc/selinux/config | grep ^SELINUX"])
+
+  rpm_req = { }
+  for REQ in (["yum", "rpm", "openssl", "curl", "wget", "net-snmp", "net-snmp-utils", "ntpd"]):
+   doExec(rpm_req, REQ, ["rpm", "-qa", REQ])
+  vals["required_packages"] = rpm_req
+
+  rpm_opt = { }
+  for OPT in (["ruby", "puppet", "nagios", "ganglia", "passenger", "hadoop"]):
+   doExec(rpm_opt, OPT, ["rpm", "-qa", OPT])
+  vals["optional_packages"] = rpm_opt
+
+  doExec(vals, "yum_repos", ["sh", "-c", "yum -C repolist enabled | egrep \"(AMBARI|HDP)\""], True)
+  # for SUSE-based agents
+  doExec(vals, "zypper_repos", ["sh", "-c", "zypper repos | egrep \"(AMBARI|HDP)\""], True)
+  
   
   
 def main(argv=None):
 def main(argv=None):
-  register = Register()
-  print json.dumps(register.build())
+  if len(argv) == 1:
+    register = Register()
+    print json.dumps(register.build())
+  else:
+    machineInfo()
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-  main()
+  main(sys.argv)

+ 24 - 5
ambari-agent/src/main/python/ambari_agent/RepoInstaller.py

@@ -25,19 +25,22 @@ from shell import shellRunner
 from manifestGenerator import writeImports
 from manifestGenerator import writeImports
 from pprint import pprint, pformat
 from pprint import pprint, pformat
 import ast
 import ast
+import urlparse, urllib
+import re
 
 
 PUPPET_EXT=".pp"
 PUPPET_EXT=".pp"
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
 class RepoInstaller:
 class RepoInstaller:
-  def __init__(self, parsedJson, path, modulesdir, taskId):
+  def __init__(self, parsedJson, path, modulesdir, taskId, config):
     self.parsedJson = parsedJson
     self.parsedJson = parsedJson
     self.path = path
     self.path = path
     self.modulesdir = modulesdir
     self.modulesdir = modulesdir
     self.taskId = taskId
     self.taskId = taskId
     self.sh = shellRunner()
     self.sh = shellRunner()
-
+    self.config = config
+    
   def prepareReposInfo(self):
   def prepareReposInfo(self):
     params = {}
     params = {}
     self.repoInfoList = []
     self.repoInfoList = []
@@ -45,23 +48,37 @@ class RepoInstaller:
       params = self.parsedJson['hostLevelParams']
       params = self.parsedJson['hostLevelParams']
     if params.has_key('repo_info'):
     if params.has_key('repo_info'):
       self.repoInfoList = params['repo_info']
       self.repoInfoList = params['repo_info']
-    self.repoInfoList = ast.literal_eval(self.repoInfoList)
+    logger.info("Repo List Info " + pformat(self.repoInfoList))
+    if (isinstance(self.repoInfoList, basestring)):
+      if (self.repoInfoList is not None and (len(self.repoInfoList) > 0)):
+        self.repoInfoList = ast.literal_eval(self.repoInfoList)
+      else:
+        self.repoInfoList = []
 
 
   def generateFiles(self):
   def generateFiles(self):
     repoPuppetFiles = []
     repoPuppetFiles = []
     for repo in self.repoInfoList:
     for repo in self.repoInfoList:
       repoFile = open(self.path + os.sep + repo['repoId'] + '-' + 
       repoFile = open(self.path + os.sep + repo['repoId'] + '-' + 
                       str(self.taskId) + PUPPET_EXT, 'w+')
                       str(self.taskId) + PUPPET_EXT, 'w+')
-      writeImports(repoFile, self.modulesdir, inputFileName='imports.txt')
+      importsfile = "imports.txt"
+      if self.config.has_option('puppet','imports_file'):
+        importsfile = self.config.get('puppet', 'imports_file')
+      writeImports(repoFile, self.modulesdir, importsfile)
       
       
       baseUrl = ''
       baseUrl = ''
       mirrorList = ''
       mirrorList = ''
       
       
       if repo.has_key('baseUrl'):
       if repo.has_key('baseUrl'):
         baseUrl = repo['baseUrl']
         baseUrl = repo['baseUrl']
+        baseUrl = baseUrl.decode('unicode-escape').encode('utf-8')
+        # Hack to take care of $ signs in the repo url
+        baseUrl = baseUrl.replace('$', '\$')
 
 
       if repo.has_key('mirrorsList'):
       if repo.has_key('mirrorsList'):
         mirrorList = repo['mirrorsList']
         mirrorList = repo['mirrorsList']
+        mirrorList = mirrorList.decode('unicode-escape').encode('utf-8')
+        # Hack to take care of $ signs in the repo url
+        mirrorList = mirrorList.replace('$', '\$')
 
 
       repoFile.write('node /default/ {')
       repoFile.write('node /default/ {')
       repoFile.write('class{ "hdp-repos::process_repo" : ' + ' os_type => "' + repo['osType'] +
       repoFile.write('class{ "hdp-repos::process_repo" : ' + ' os_type => "' + repo['osType'] +
@@ -80,10 +97,12 @@ class RepoInstaller:
 
 
 def main():
 def main():
   #Test code
   #Test code
+  logging.basicConfig(level=logging.DEBUG)    
+  #test code
   jsonFile = open('test.json', 'r')
   jsonFile = open('test.json', 'r')
   jsonStr = jsonFile.read() 
   jsonStr = jsonFile.read() 
   parsedJson = json.loads(jsonStr)
   parsedJson = json.loads(jsonStr)
-  repoInstaller = RepoInstaller(parsedJson, '/tmp', '/home/centos/ambari_ws/ambari-agent/src/main/puppet/modules')
+  repoInstaller = RepoInstaller(parsedJson, '/tmp', '/home/centos/ambari_ws/ambari-agent/src/main/puppet/modules',0)
   repoInstaller.installRepos()
   repoInstaller.installRepos()
   
   
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 8 - 3
ambari-agent/src/main/python/ambari_agent/StatusCheck.py

@@ -60,7 +60,7 @@ class StatusCheck:
       raise ValueError("Path argument must be valid directory")
       raise ValueError("Path argument must be valid directory")
 
 
     if not os.path.exists(mappingFilePath):
     if not os.path.exists(mappingFilePath):
-      raise IOError("File with services to pid mapping doesn't exists")
+      raise IOError("File with services to pid mapping doesn't exist")
     self.path = path
     self.path = path
     self.mappingFilePath = mappingFilePath
     self.mappingFilePath = mappingFilePath
     self.sh = shellRunner()
     self.sh = shellRunner()
@@ -73,8 +73,13 @@ class StatusCheck:
 
 
   def getIsLive(self, pidPath):
   def getIsLive(self, pidPath):
     isLive = False
     isLive = False
-    pidFile = open(pidPath, 'r')
-    pid = int(pidFile.readline())
+    pid = -1
+    try:
+      pidFile = open(pidPath, 'r')
+      pid = int(pidFile.readline())
+    except IOError, e:
+      logger.warn("Can not open file " + str(pidPath) + " due to " + str(e))
+      return isLive
     res = self.sh.run(['ps -p', str(pid), '-f'])
     res = self.sh.run(['ps -p', str(pid), '-f'])
     lines = res['output'].strip().split(os.linesep)
     lines = res['output'].strip().split(os.linesep)
     try:
     try:

+ 39 - 38
ambari-agent/src/main/python/ambari_agent/main.py

@@ -22,14 +22,15 @@ import logging
 import logging.handlers
 import logging.handlers
 import code
 import code
 import signal
 import signal
+from optparse import OptionParser
 import sys, traceback
 import sys, traceback
 import os
 import os
 import time
 import time
 import ConfigParser
 import ConfigParser
+import ProcessHelper
 from createDaemon import createDaemon
 from createDaemon import createDaemon
 from Controller import Controller
 from Controller import Controller
-from shell import getTempFiles
-from shell import killstaleprocesses 
+from shell import killstaleprocesses
 import AmbariConfig
 import AmbariConfig
 from security import CertificateManager
 from security import CertificateManager
 from NetUtil import NetUtil
 from NetUtil import NetUtil
@@ -37,15 +38,10 @@ from NetUtil import NetUtil
 logger = logging.getLogger()
 logger = logging.getLogger()
 agentPid = os.getpid()
 agentPid = os.getpid()
 
 
-if 'AMBARI_PID_DIR' in os.environ:
-  pidfile = os.environ['AMBARI_PID_DIR'] + "/ambari-agent.pid"
-else:
-  pidfile = "/var/run/ambari/ambari-agent.pid"
-
 if 'AMBARI_LOG_DIR' in os.environ:
 if 'AMBARI_LOG_DIR' in os.environ:
   logfile = os.environ['AMBARI_LOG_DIR'] + "/ambari-agent.log"
   logfile = os.environ['AMBARI_LOG_DIR'] + "/ambari-agent.log"
 else:
 else:
-  logfile = "/var/log/ambari/ambari-agent.log"
+  logfile = "/var/log/ambari-agent/ambari-agent.log"
 
 
 def signal_handler(signum, frame):
 def signal_handler(signum, frame):
   #we want the handler to run only for the agent process and not
   #we want the handler to run only for the agent process and not
@@ -53,21 +49,7 @@ def signal_handler(signum, frame):
   if (os.getpid() != agentPid):
   if (os.getpid() != agentPid):
     os._exit(0)
     os._exit(0)
   logger.info('signal received, exiting.')
   logger.info('signal received, exiting.')
-  try:
-    os.unlink(pidfile)
-  except Exception:
-    logger.warn("Unable to remove: "+pidfile)
-    traceback.print_exc()
-
-  tempFiles = getTempFiles()
-  for tempFile in tempFiles:
-    if os.path.exists(tempFile):
-      try:
-        os.unlink(tempFile)
-      except Exception:
-        traceback.print_exc()
-        logger.warn("Unable to remove: "+tempFile)
-  os._exit(0)
+  ProcessHelper.stopAgent()
 
 
 def debug(sig, frame):
 def debug(sig, frame):
     """Interrupt running process, and provide a python prompt for
     """Interrupt running process, and provide a python prompt for
@@ -85,6 +67,25 @@ def debug(sig, frame):
 
 
 def main():
 def main():
   global config
   global config
+  parser = OptionParser()
+  parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
+  (options, args) = parser.parse_args()
+
+  formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
+  formatter = logging.Formatter(formatstr)
+  rotateLog = logging.handlers.RotatingFileHandler(logfile, "a", 10000000, 25)
+  rotateLog.setFormatter(formatter)
+  logger.addHandler(rotateLog)
+
+  if options.verbose:
+    logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
+    logger.setLevel(logging.DEBUG)
+  else:
+    logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
+    logger.setLevel(logging.INFO)
+
+  logger.debug("loglevel=logging.DEBUG")
+
   default_cfg = { 'agent' : { 'prefix' : '/home/ambari' } }
   default_cfg = { 'agent' : { 'prefix' : '/home/ambari' } }
   config = ConfigParser.RawConfigParser(default_cfg)
   config = ConfigParser.RawConfigParser(default_cfg)
   signal.signal(signal.SIGINT, signal_handler)
   signal.signal(signal.SIGINT, signal_handler)
@@ -92,42 +93,41 @@ def main():
   signal.signal(signal.SIGUSR1, debug)
   signal.signal(signal.SIGUSR1, debug)
   if (len(sys.argv) >1) and sys.argv[1]=='stop':
   if (len(sys.argv) >1) and sys.argv[1]=='stop':
     # stop existing Ambari agent
     # stop existing Ambari agent
+    pid = -1
     try:
     try:
-      f = open(pidfile, 'r')
+      f = open(ProcessHelper.pidfile, 'r')
       pid = f.read()
       pid = f.read()
       pid = int(pid)
       pid = int(pid)
       f.close()
       f.close()
       os.kill(pid, signal.SIGTERM)
       os.kill(pid, signal.SIGTERM)
       time.sleep(5)
       time.sleep(5)
-      if os.path.exists(pidfile):
+      if os.path.exists(ProcessHelper.pidfile):
         raise Exception("PID file still exists.")
         raise Exception("PID file still exists.")
       os._exit(0)
       os._exit(0)
     except Exception, err:
     except Exception, err:
-      os.kill(pid, signal.SIGKILL)
+      if pid == -1:
+        print ("Agent process is not running")
+      else:
+        os.kill(pid, signal.SIGKILL)
       os._exit(1)
       os._exit(1)
 
 
   # Check if there is another instance running
   # Check if there is another instance running
-  if os.path.isfile(pidfile):
-    print("%s already exists, exiting" % pidfile)
+  if os.path.isfile(ProcessHelper.pidfile):
+    print("%s already exists, exiting" % ProcessHelper.pidfile)
     sys.exit(1)
     sys.exit(1)
   else:
   else:
     # Daemonize current instance of Ambari Agent
     # Daemonize current instance of Ambari Agent
     #retCode = createDaemon()
     #retCode = createDaemon()
     pid = str(os.getpid())
     pid = str(os.getpid())
-    file(pidfile, 'w').write(pid)
-    
-  logger.setLevel(logging.INFO)
-  formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
-  rotateLog = logging.handlers.RotatingFileHandler(logfile, "a", 10000000, 10)
-  rotateLog.setFormatter(formatter)
-  logger.addHandler(rotateLog)
+    file(ProcessHelper.pidfile, 'w').write(pid)
+
   credential = None
   credential = None
 
 
   # Check for ambari configuration file.
   # Check for ambari configuration file.
   try:
   try:
     config = AmbariConfig.config
     config = AmbariConfig.config
-    if os.path.exists('/etc/ambari/ambari.ini'):
-      config.read('/etc/ambari/ambari.ini')
+    if os.path.exists('/etc/ambari-agent/ambari-agent.ini'):
+      config.read('/etc/ambari-agent/ambari-agent.ini')
       AmbariConfig.setConfig(config)
       AmbariConfig.setConfig(config)
     else:
     else:
       raise Exception("No config found, use default")
       raise Exception("No config found, use default")
@@ -137,7 +137,8 @@ def main():
   killstaleprocesses()
   killstaleprocesses()
 
 
   server_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'url_port')
   server_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'url_port')
-  logger.info('Connecting to Server at: ' + server_url)
+  print("Connecting to the server at " + server_url + "...")
+  logger.info('Connecting to the server at: ' + server_url)
 
 
   # Wait until server is reachable
   # Wait until server is reachable
   netutil = NetUtil()
   netutil = NetUtil()

+ 37 - 19
ambari-agent/src/main/python/ambari_agent/manifestGenerator.py

@@ -23,21 +23,24 @@ import os.path
 import logging
 import logging
 from uuid import getnode as get_mac
 from uuid import getnode as get_mac
 from shell import shellRunner
 from shell import shellRunner
+from datetime import datetime
+from AmbariConfig import AmbariConfig
 
 
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
-xml_configurations_keys= ["hdfs-site", "core-site", 
+non_global_configuration_types = ["hdfs-site", "core-site", 
                           "mapred-queue-acls",
                           "mapred-queue-acls",
                              "hadoop-policy", "mapred-site", 
                              "hadoop-policy", "mapred-site", 
                              "capacity-scheduler", "hbase-site",
                              "capacity-scheduler", "hbase-site",
                              "hbase-policy", "hive-site", "oozie-site", 
                              "hbase-policy", "hive-site", "oozie-site", 
-                             "templeton-site"]
+                             "templeton-site", "hdfs-exclude-file"]
 
 
 #read static imports from file and write them to manifest
 #read static imports from file and write them to manifest
 def writeImports(outputFile, modulesdir, inputFileName='imports.txt'):
 def writeImports(outputFile, modulesdir, inputFileName='imports.txt'):
   inputFile = open(inputFileName, 'r')
   inputFile = open(inputFileName, 'r')
   logger.info("Modules dir is " + modulesdir)
   logger.info("Modules dir is " + modulesdir)
+  outputFile.write('#' + datetime.now().strftime('%d.%m.%Y %H:%M:%S') + os.linesep)
   for line in inputFile:
   for line in inputFile:
     modulename = line.rstrip()
     modulename = line.rstrip()
     line = "import '" + modulesdir + os.sep + modulename + "'" + os.linesep
     line = "import '" + modulesdir + os.sep + modulename + "'" + os.linesep
@@ -45,7 +48,7 @@ def writeImports(outputFile, modulesdir, inputFileName='imports.txt'):
     
     
   inputFile.close()
   inputFile.close()
 
 
-def generateManifest(parsedJson, fileName, modulesdir):
+def generateManifest(parsedJson, fileName, modulesdir, ambariconfig):
   logger.info("JSON Received:")
   logger.info("JSON Received:")
   logger.info(json.dumps(parsedJson, sort_keys=True, indent=4))
   logger.info(json.dumps(parsedJson, sort_keys=True, indent=4))
 #reading json
 #reading json
@@ -62,7 +65,7 @@ def generateManifest(parsedJson, fileName, modulesdir):
   if 'configurations' in parsedJson:
   if 'configurations' in parsedJson:
     if parsedJson['configurations']:
     if parsedJson['configurations']:
       configurations = parsedJson['configurations']
       configurations = parsedJson['configurations']
-  xmlConfigurationsKeys = xml_configurations_keys
+  nonGlobalConfigurationsKeys = non_global_configuration_types
   #hostAttributes = parsedJson['hostAttributes']
   #hostAttributes = parsedJson['hostAttributes']
   roleParams = {}
   roleParams = {}
   if 'roleParams' in parsedJson:
   if 'roleParams' in parsedJson:
@@ -73,9 +76,14 @@ def generateManifest(parsedJson, fileName, modulesdir):
             'roleParams' : roleParams}]
             'roleParams' : roleParams}]
   #writing manifest
   #writing manifest
   manifest = open(fileName, 'w')
   manifest = open(fileName, 'w')
-
+  #Check for Ambari Config and make sure you pick the right imports file
+  importsfile = "imports.txt"
+  if ambariconfig.has_option('puppet', 'imports_file') :
+    importsfile = ambariconfig.get('puppet', 'imports_file')
+    
+  logger.info("Using imports file " + importsfile)   
   #writing imports from external static file
   #writing imports from external static file
-  writeImports(outputFile=manifest, modulesdir=modulesdir)
+  writeImports(outputFile=manifest, modulesdir=modulesdir, inputFileName=importsfile)
   
   
   #writing nodes
   #writing nodes
   writeNodes(manifest, clusterHostInfo)
   writeNodes(manifest, clusterHostInfo)
@@ -84,19 +92,19 @@ def generateManifest(parsedJson, fileName, modulesdir):
   writeParams(manifest, params, modulesdir)
   writeParams(manifest, params, modulesdir)
   
   
   
   
-  xmlConfigurations = {}
+  nonGlobalConfigurations = {}
   flatConfigurations = {}
   flatConfigurations = {}
 
 
   if configurations: 
   if configurations: 
     for configKey in configurations.iterkeys():
     for configKey in configurations.iterkeys():
-      if configKey in xmlConfigurationsKeys:
-        xmlConfigurations[configKey] = configurations[configKey]
+      if configKey in nonGlobalConfigurationsKeys:
+        nonGlobalConfigurations[configKey] = configurations[configKey]
       else:
       else:
         flatConfigurations[configKey] = configurations[configKey]
         flatConfigurations[configKey] = configurations[configKey]
       
       
   #writing config maps
   #writing config maps
-  if (xmlConfigurations):
-    writeXmlConfigurations(manifest, xmlConfigurations)
+  if (nonGlobalConfigurations):
+    writeNonGlobalConfigurations(manifest, nonGlobalConfigurations)
   if (flatConfigurations):
   if (flatConfigurations):
     writeFlatConfigurations(manifest, flatConfigurations)
     writeFlatConfigurations(manifest, flatConfigurations)
 
 
@@ -104,7 +112,7 @@ def generateManifest(parsedJson, fileName, modulesdir):
   #writeHostAttributes(manifest, hostAttributes)
   #writeHostAttributes(manifest, hostAttributes)
 
 
   #writing task definitions 
   #writing task definitions 
-  writeTasks(manifest, roles)
+  writeTasks(manifest, roles, ambariconfig)
      
      
   manifest.close()
   manifest.close()
     
     
@@ -169,12 +177,15 @@ def writeHostAttributes(outputFile, hostAttributes):
 
 
 #write flat configurations
 #write flat configurations
 def writeFlatConfigurations(outputFile, flatConfigs):
 def writeFlatConfigurations(outputFile, flatConfigs):
+  flatDict = {}
   for flatConfigName in flatConfigs.iterkeys():
   for flatConfigName in flatConfigs.iterkeys():
     for flatConfig in flatConfigs[flatConfigName].iterkeys():
     for flatConfig in flatConfigs[flatConfigName].iterkeys():
-      outputFile.write('$' + flatConfig + ' = "' + flatConfigs[flatConfigName][flatConfig] + '"' + os.linesep)
+      flatDict[flatConfig] = flatConfigs[flatConfigName][flatConfig]
+  for gconfigKey in flatDict.iterkeys():
+    outputFile.write('$' + gconfigKey + ' = "' + flatDict[gconfigKey] + '"' + os.linesep)
 
 
 #write xml configurations
 #write xml configurations
-def writeXmlConfigurations(outputFile, xmlConfigs):
+def writeNonGlobalConfigurations(outputFile, xmlConfigs):
   outputFile.write('$configuration =  {\n')
   outputFile.write('$configuration =  {\n')
 
 
   for configName in xmlConfigs.iterkeys():
   for configName in xmlConfigs.iterkeys():
@@ -184,7 +195,7 @@ def writeXmlConfigurations(outputFile, xmlConfigs):
     outputFile.write(configName + '=> {\n')
     outputFile.write(configName + '=> {\n')
     coma = ''
     coma = ''
     for configParam in config.iterkeys():
     for configParam in config.iterkeys():
-      outputFile.write(coma + '"' + configParam + '" => "' + config[configParam] + '"')
+      outputFile.write(coma + '"' + configParam + '" => \'' + config[configParam] + '\'')
       coma = ',\n'
       coma = ',\n'
 
 
     outputFile.write('\n},\n')
     outputFile.write('\n},\n')
@@ -192,13 +203,21 @@ def writeXmlConfigurations(outputFile, xmlConfigs):
   outputFile.write('\n}\n')
   outputFile.write('\n}\n')
 
 
 #write node tasks
 #write node tasks
-def writeTasks(outputFile, roles):
+def writeTasks(outputFile, roles, ambariconfig):
   #reading dictionaries
   #reading dictionaries
-  rolesToClassFile = open('rolesToClass.dict', 'r')
+  rolestoclass = "rolesToClass.dict"
+  if ambariconfig.has_option('puppet','roles_to_class'):
+    rolestoclass = ambariconfig.get('puppet', 'roles_to_class')
+                              
+  rolesToClassFile = open(rolestoclass, 'r')
   rolesToClass = readDict(rolesToClassFile)
   rolesToClass = readDict(rolesToClassFile)
   rolesToClassFile.close()
   rolesToClassFile.close()
 
 
-  serviceStatesFile =  open('serviceStates.dict', 'r')
+  servicestates = "serviceStates.dict"
+  if ambariconfig.has_option('puppet','service_states'):
+    servicestates = ambariconfig.get('puppet', 'service_states')
+                              
+  serviceStatesFile =  open(servicestates, 'r')
   serviceStates = readDict(serviceStatesFile)
   serviceStates = readDict(serviceStatesFile)
   serviceStatesFile.close()
   serviceStatesFile.close()
 
 
@@ -267,7 +286,6 @@ def main():
   parsedJson = json.loads(inputJsonStr)
   parsedJson = json.loads(inputJsonStr)
   generateManifest(parsedJson, 'site.pp', modulesdir)
   generateManifest(parsedJson, 'site.pp', modulesdir)
 
 
-  installRepos()
 if __name__ == '__main__':
 if __name__ == '__main__':
   main()
   main()
 
 

+ 146 - 76
ambari-agent/src/main/python/ambari_agent/puppetExecutor.py

@@ -23,8 +23,10 @@ import logging
 import subprocess
 import subprocess
 from manifestGenerator import generateManifest
 from manifestGenerator import generateManifest
 from RepoInstaller import RepoInstaller
 from RepoInstaller import RepoInstaller
-import pprint
+import pprint, threading
 from Grep import Grep
 from Grep import Grep
+from threading import Thread
+import traceback
 
 
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
@@ -36,29 +38,58 @@ class puppetExecutor:
   # How many lines from command output send to server
   # How many lines from command output send to server
   OUTPUT_LAST_LINES = 10
   OUTPUT_LAST_LINES = 10
   # How many lines from command error output send to server (before Err phrase)
   # How many lines from command error output send to server (before Err phrase)
-  ERROR_LAST_LINES_BEFORE = 10
+  ERROR_LAST_LINES_BEFORE = 30
   # How many lines from command error output send to server (after Err phrase)
   # How many lines from command error output send to server (after Err phrase)
   ERROR_LAST_LINES_AFTER = 30
   ERROR_LAST_LINES_AFTER = 30
 
 
+  # How many seconds will pass before running puppet is terminated on timeout
+  PUPPET_TIMEOUT_SECONDS = 600
+
+  event = threading.Event()
+  last_puppet_has_been_killed = False
+
   NO_ERROR = "none"
   NO_ERROR = "none"
 
 
-  def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir):
+  def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config):
     self.puppetModule = puppetModule
     self.puppetModule = puppetModule
     self.puppetInstall = puppetInstall
     self.puppetInstall = puppetInstall
     self.facterInstall = facterInstall
     self.facterInstall = facterInstall
     self.tmpDir = tmpDir
     self.tmpDir = tmpDir
+    self.reposInstalled = False
+    self.config = config
 
 
+  def configureEnviron(self, environ):
+    if not self.config.has_option("puppet", "ruby_home"):
+      return environ
+    ruby_home = self.config.get("puppet", "ruby_home")
+    if os.path.exists(ruby_home):
+      """Only update ruby home if the config is configured"""
+      path = os.environ["PATH"]
+      if not ruby_home in path:
+        environ["PATH"] = ruby_home + os.path.sep + "bin"  + ":"+environ["PATH"] 
+      environ["MY_RUBY_HOME"] = ruby_home
+    return environ
+    
   def getPuppetBinary(self):
   def getPuppetBinary(self):
-    return os.path.join(self.puppetInstall, "bin", "puppet") 
+    puppetbin = os.path.join(self.puppetInstall, "bin", "puppet") 
+    if (os.path.exists(puppetbin)):
+      return puppetbin
+    else:
+      logger.info("Using default puppet on the host : " + puppetbin 
+                  + " does not exist.")
+      return "puppet"
      
      
+  def deployRepos(self, command, tmpDir, modulesdir, taskId):
+    """ Hack to only create the repo files once """
+    result = []
+    if (not self.reposInstalled):
+      repoInstaller = RepoInstaller(command, tmpDir, modulesdir, taskId, self.config)
+      result = repoInstaller.installRepos()
+    return result
+  
   def puppetCommand(self, sitepp):
   def puppetCommand(self, sitepp):
     modules = self.puppetModule
     modules = self.puppetModule
-    puppetcommand = [];
-    puppetcommand.append(self.getPuppetBinary())
-    puppetcommand.append("apply")
-    puppetcommand.append("--confdir=" + modules)
-    puppetcommand.append("--detailed-exitcodes")
-    puppetcommand.append(sitepp)
+    puppetcommand = [self.getPuppetBinary(), "apply", "--confdir=" + modules, "--detailed-exitcodes", sitepp]
     return puppetcommand
     return puppetcommand
   
   
   def facterLib(self):
   def facterLib(self):
@@ -68,85 +99,124 @@ class puppetExecutor:
   def puppetLib(self):
   def puppetLib(self):
     return self.puppetInstall + "/lib"
     return self.puppetInstall + "/lib"
     pass
     pass
-      
-  def runCommand(self, command):
-    result = {}
-    taskId = 0;
+
+  def condenseOutput(self, stdout, stderr, retcode):
     grep = Grep()
     grep = Grep()
+    if stderr == self.NO_ERROR:
+      result = grep.tail(stdout, self.OUTPUT_LAST_LINES)
+    else:
+      result = grep.grep(stdout, "fail", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER)
+      if result is None: # Second try
+       result = grep.grep(stdout, "err", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER)
+    filteredresult = grep.filterMarkup(result)
+    return filteredresult
+
+  def isSuccessfull(self, returncode):
+    return not self.last_puppet_has_been_killed and (returncode == 0 or returncode == 2)
+
+  def runCommand(self, command, tmpoutfile, tmperrfile):
+    result = {}
+    taskId = 0
     if command.has_key("taskId"):
     if command.has_key("taskId"):
       taskId = command['taskId']
       taskId = command['taskId']
       
       
     puppetEnv = os.environ
     puppetEnv = os.environ
     #Install repos
     #Install repos
     modulesdir = self.puppetModule + "/modules"
     modulesdir = self.puppetModule + "/modules"
-    repoInstaller = RepoInstaller(command, self.tmpDir, modulesdir, taskId)
-
-    puppetFiles = repoInstaller.installRepos()
+    puppetFiles = self.deployRepos(command, self.tmpDir, modulesdir, taskId)
     siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp") 
     siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp") 
     puppetFiles.append(siteppFileName)
     puppetFiles.append(siteppFileName)
-    generateManifest(command, siteppFileName, modulesdir)
+    generateManifest(command, siteppFileName, modulesdir, self.config)
     #Run all puppet commands, from manifest generator and for repos installation
     #Run all puppet commands, from manifest generator and for repos installation
     #Appending outputs and errors, exitcode - maximal from all
     #Appending outputs and errors, exitcode - maximal from all
     for puppetFile in puppetFiles:
     for puppetFile in puppetFiles:
-      puppetcommand = self.puppetCommand(puppetFile)
-      """ Run the command and make sure the output gets propagated"""
-      rubyLib = ""
-      if os.environ.has_key("RUBYLIB"):
-        rubyLib = os.environ["RUBYLIB"]
-        logger.info("Ruby Lib env from Env " + rubyLib)
-      rubyLib = rubyLib + ":" + self.facterLib() + ":" + self.puppetLib()
-      puppetEnv["RUBYLIB"] = rubyLib
-      logger.info("Setting RUBYLIB as: " + rubyLib)
-      logger.info("Running command " + pprint.pformat(puppetcommand))
-      puppet = subprocess.Popen(puppetcommand,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE,
-                                    env=puppetEnv)
-      stderr_out = puppet.communicate()
-      error = "none"
-      returncode = 0
-      if (puppet.returncode != 0 and puppet.returncode != 2) :
-        returncode = puppet.returncode
-        error = stderr_out[1]
-        logging.error("Error running puppet: \n" + stderr_out[1])
-        pass
-		
-      if result.has_key("stderr"):
-        result["stderr"] = result["stderr"] + os.linesep + error
-      else:
-        result["stderr"] = error
-      puppetOutput = stderr_out[0]
-      logger.info("Output from puppet :\n" + puppetOutput)
-      if result.has_key("exitcode"):
-        result["exitcode"] = max(returncode, result["exitcode"])
-      else:
-        result["exitcode"] = returncode
-        
-
-      if result.has_key("stdout"):
-        result["stdout"] = result["stdout"] + os.linesep + puppetOutput
-      else:
-        result["stdout"] = puppetOutput
-
-    if error == self.NO_ERROR:
-      if result.has_key("stdout"):
-        result["stdout"] = result["stdout"] + os.linesep + \
-          str(grep.tail(puppetOutput, self.OUTPUT_LAST_LINES))
-      else:
-        result["stdout"] = grep.tail(puppetOutput, self.OUTPUT_LAST_LINES)
-    else:
-      if result.has_key("stdout"):
-        result["stdout"] = result["stdout"] + os.linesep + \
-        str(grep.grep(puppetOutput, "err", self.ERROR_LAST_LINES_BEFORE, 
-                      self.ERROR_LAST_LINES_AFTER))
-      else:
-        result["stdout"] = str(grep.grep(puppetOutput, "err", 
-                                         self.ERROR_LAST_LINES_BEFORE, 
-                                         self.ERROR_LAST_LINES_AFTER))
-	
+      self.runPuppetFile(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile)
+      # Check if one of the puppet command fails and error out
+      if not self.isSuccessfull(result["exitcode"]):
+        break
+
+    if self.isSuccessfull(result["exitcode"]):
+      # Check if all the repos were installed or not and reset the flag
+      self.reposInstalled = True
+      
     logger.info("ExitCode : "  + str(result["exitcode"]))
     logger.info("ExitCode : "  + str(result["exitcode"]))
     return result
     return result
- 
+
+  def runPuppetFile(self, puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
+    """ Run the command and make sure the output gets propagated"""
+    puppetcommand = self.puppetCommand(puppetFile)
+    rubyLib = ""
+    if os.environ.has_key("RUBYLIB"):
+      rubyLib = os.environ["RUBYLIB"]
+      logger.info("RUBYLIB from Env " + rubyLib)
+    if not (self.facterLib() in rubyLib):
+      rubyLib = rubyLib + ":" + self.facterLib()
+    if not (self.puppetLib() in rubyLib):
+      rubyLib = rubyLib + ":" + self.puppetLib()
+    tmpout =  open(tmpoutfile, 'w')
+    tmperr =  open(tmperrfile, 'w')
+    puppetEnv["RUBYLIB"] = rubyLib
+    puppetEnv = self.configureEnviron(puppetEnv)
+    logger.info("Setting RUBYLIB as: " + rubyLib)
+    logger.info("Running command " + pprint.pformat(puppetcommand))
+    puppet = self.lauch_puppet_subprocess(puppetcommand,tmpout, tmperr, puppetEnv)
+    logger.info("Launching watchdog thread")
+    self.event.clear()
+    self.last_puppet_has_been_killed = False
+    thread = Thread(target =  self.puppet_watchdog_func, args = (puppet, ))
+    thread.start()
+    # Waiting for process to finished or killed
+    puppet.communicate()
+    self.event.set()
+    thread.join()
+    # Building results
+    error = self.NO_ERROR
+    returncode = 0
+    if not self.isSuccessfull(puppet.returncode):
+      returncode = puppet.returncode
+      error = open(tmperrfile, 'r').read()
+      logging.error("Error running puppet: \n" + str(error))
+      pass
+    if self.last_puppet_has_been_killed:
+      error = str(error) + "\n Puppet has been killed due to timeout"
+      returncode = 999
+    if result.has_key("stderr"):
+      result["stderr"] = result["stderr"] + os.linesep + str(error)
+    else:
+      result["stderr"] = str(error)
+    puppetOutput = open(tmpoutfile, 'r').read()
+    logger.info("Output from puppet :\n" + puppetOutput)
+    logger.info("Puppet exit code is " + str(returncode))
+    if result.has_key("exitcode"):
+      result["exitcode"] = max(returncode, result["exitcode"])
+    else:
+      result["exitcode"] = returncode
+    condensed = self.condenseOutput(puppetOutput, error, returncode)
+    if result.has_key("stdout"):
+      result["stdout"] = result["stdout"] + os.linesep + str(condensed)
+    else:
+      result["stdout"] = str(condensed)
+    return result
+
+  def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
+    """
+    Creates subprocess with given parameters. This functionality was moved to separate method
+    to make possible unit testing
+    """
+    return subprocess.Popen(puppetcommand,
+      stdout=tmpout,
+      stderr=tmperr,
+      env=puppetEnv)
+
+  def puppet_watchdog_func(self, puppet):
+    self.event.wait(self.PUPPET_TIMEOUT_SECONDS)
+    if puppet.returncode is None:
+      logger.error("Task timed out and will be killed")
+      puppet.terminate()
+      self.last_puppet_has_been_killed = True
+    pass
+
+
 def main():
 def main():
   logging.basicConfig(level=logging.DEBUG)    
   logging.basicConfig(level=logging.DEBUG)    
   #test code
   #test code
@@ -161,7 +231,7 @@ def main():
   jsonFile = open('test.json', 'r')
   jsonFile = open('test.json', 'r')
   jsonStr = jsonFile.read() 
   jsonStr = jsonFile.read() 
   parsedJson = json.loads(jsonStr)
   parsedJson = json.loads(jsonStr)
-  result = puppetInstance.runCommand(parsedJson)
+  result = puppetInstance.runCommand(parsedJson, '/tmp/out.txt', '/tmp/err.txt')
   logger.debug(result)
   logger.debug(result)
   
   
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 4 - 4
ambari-agent/src/main/python/ambari_agent/rolesToClass.dict

@@ -18,9 +18,9 @@ HIVE_CLIENT = hdp-hive::client
 HCATALOG_CLIENT = hdp-hcat
 HCATALOG_CLIENT = hdp-hcat
 HCATALOG_SERVER = hdp-hcat::server
 HCATALOG_SERVER = hdp-hcat::server
 HIVE_SERVER = hdp-hive::server
 HIVE_SERVER = hdp-hive::server
-HIVE_MYSQL = hdp-mysql::server
-TEMPLETON_SERVER = hdp-templeton::server
-TEMPLETON_CLIENT = hdp-templeton::client
+HIVE_METASTORE = hdp-hive::metastore
+MYSQL_SERVER = hdp-mysql::server
+WEBHCAT_SERVER = hdp-templeton::server
 DASHBOARD = hdp-dashboard
 DASHBOARD = hdp-dashboard
 NAGIOS_SERVER = hdp-nagios::server
 NAGIOS_SERVER = hdp-nagios::server
 GANGLIA_SERVER = hdp-ganglia::server
 GANGLIA_SERVER = hdp-ganglia::server
@@ -36,6 +36,6 @@ HCAT_SERVICE_CHECK = hdp-hcat::hcat::service_check
 OOZIE_SERVICE_CHECK = hdp-oozie::oozie::service_check
 OOZIE_SERVICE_CHECK = hdp-oozie::oozie::service_check
 PIG_SERVICE_CHECK = hdp-pig::pig::service_check
 PIG_SERVICE_CHECK = hdp-pig::pig::service_check
 SQOOP_SERVICE_CHECK = hdp-sqoop::sqoop::service_check
 SQOOP_SERVICE_CHECK = hdp-sqoop::sqoop::service_check
-TEMPLETON_SERVICE_CHECK = hdp-templeton::templeton::service_check
+WEBHCAT_SERVICE_CHECK = hdp-templeton::templeton::service_check
 DASHBOARD_SERVICE_CHECK = hdp-dashboard::dashboard::service_check
 DASHBOARD_SERVICE_CHECK = hdp-dashboard::dashboard::service_check
 DECOMMISSION_DATANODE = hdp-hadoop::hdfs::decommission
 DECOMMISSION_DATANODE = hdp-hadoop::hdfs::decommission

+ 53 - 15
ambari-agent/src/main/python/ambari_agent/security.py

@@ -11,6 +11,7 @@ from subprocess import Popen, PIPE
 import AmbariConfig
 import AmbariConfig
 import json
 import json
 import pprint
 import pprint
+import traceback
 logger = logging.getLogger()
 logger = logging.getLogger()
 
 
 GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
 GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
@@ -19,8 +20,18 @@ GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(ho
 
 
 
 
 class VerifiedHTTPSConnection(httplib.HTTPSConnection):
 class VerifiedHTTPSConnection(httplib.HTTPSConnection):
+  """ Connecting using ssl wrapped sockets """
+  def __init__(self, host, port=None, key_file=None, cert_file=None,
+                     strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+    httplib.HTTPSConnection.__init__(self, host, port=port)
+    pass
+     
   def connect(self):
   def connect(self):
-    sock = socket.create_connection((self.host, self.port), self.timeout)
+    if self.sock:
+      self.sock.close()
+    logger.info("SSL Connect being called.. connecting to the server")
+    sock = socket.create_connection((self.host, self.port), 60)
+    sock.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
     if self._tunnel_host:
     if self._tunnel_host:
       self.sock = sock
       self.sock = sock
       self._tunnel()
       self._tunnel()
@@ -36,26 +47,52 @@ class VerifiedHTTPSConnection(httplib.HTTPSConnection):
                                 certfile=agent_crt,
                                 certfile=agent_crt,
                                 cert_reqs=ssl.CERT_REQUIRED,
                                 cert_reqs=ssl.CERT_REQUIRED,
                                 ca_certs=server_crt)
                                 ca_certs=server_crt)
-class VerifiedHTTPSHandler(urllib2.HTTPSHandler):
-  def __init__(self, connection_class = VerifiedHTTPSConnection):
-    self.specialized_conn_class = connection_class
-    urllib2.HTTPSHandler.__init__(self)
-  def https_open(self, req):
-    return self.do_open(self.specialized_conn_class, req)
 
 
-def secured_url_open(req):
-  logger.info("Secured url open")
-  https_handler = VerifiedHTTPSHandler()
-  url_opener = urllib2.build_opener(https_handler)
-  stream = url_opener.open(req)
-  return stream
 
 
+class CachedHTTPSConnection:
+  """ Caches a ssl socket and uses a single https connection to the server. """
+  
+  def __init__(self, config):
+    self.connected = False;
+    self.config = config
+    self.server = config.get('server', 'hostname')
+    self.port = config.get('server', 'secured_url_port')
+    self.connect()
+  
+  def connect(self):
+      if  not self.connected:
+        self.httpsconn = VerifiedHTTPSConnection(self.server, self.port)
+        self.httpsconn.connect()
+        self.connected = True
+      # possible exceptions are catched and processed in Controller
+
+  
+  def forceClear(self):
+    self.httpsconn = VerifiedHTTPSConnection(self.server, self.port)
+    self.connect()
+    
+  def request(self, req): 
+    self.connect()
+    try:
+      self.httpsconn.request(req.get_method(), req.get_full_url(), 
+                                  req.get_data(), req.headers)
+      response = self.httpsconn.getresponse()
+      readResponse = response.read()
+    except Exception as ex:
+      # This exception is catched later in Controller
+      logger.debug("Error in sending/receving data from the server " +
+                   traceback.format_exc())
+      self.connected = False
+      raise IOError("Error occured during connecting to the server: " + str(ex))
+    return readResponse
+  
 class CertificateManager():
 class CertificateManager():
   def __init__(self, config):
   def __init__(self, config):
     self.config = config
     self.config = config
     self.keysdir = self.config.get('security', 'keysdir')
     self.keysdir = self.config.get('security', 'keysdir')
     self.server_crt=self.config.get('security', 'server_crt')
     self.server_crt=self.config.get('security', 'server_crt')
-    self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' + self.config.get('server', 'url_port')
+    self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' \
+       + self.config.get('server', 'url_port')
     
     
   def getAgentKeyName(self):
   def getAgentKeyName(self):
     keysdir = self.config.get('security', 'keysdir')
     keysdir = self.config.get('security', 'keysdir')
@@ -100,6 +137,7 @@ class CertificateManager():
             
             
   def loadSrvrCrt(self):
   def loadSrvrCrt(self):
     get_ca_url = self.server_url + '/cert/ca/'
     get_ca_url = self.server_url + '/cert/ca/'
+    logger.info("Downloading server cert from " + get_ca_url)
     stream = urllib2.urlopen(get_ca_url)
     stream = urllib2.urlopen(get_ca_url)
     response = stream.read()
     response = stream.read()
     stream.close()
     stream.close()
@@ -137,4 +175,4 @@ class CertificateManager():
     p.wait()
     p.wait()
       
       
   def initSecurity(self):
   def initSecurity(self):
-    self.checkCertExists()
+    self.checkCertExists()

+ 14 - 4
ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict

@@ -1,9 +1,19 @@
 NAMENODE=hadoop-hdfs-namenode.pid
 NAMENODE=hadoop-hdfs-namenode.pid
-SECONDARYNAMENODE=hadoop-hdfs-secondarynamenode.pid
+SECONDARY_NAMENODE=hadoop-hdfs-secondarynamenode.pid
 DATANODE=hadoop-hdfs-datanode.pid
 DATANODE=hadoop-hdfs-datanode.pid
 JOBTRACKER=hadoop-mapred-jobtracker.pid
 JOBTRACKER=hadoop-mapred-jobtracker.pid
 TASKTRACKER=hadoop-mapred-tasktracker.pid
 TASKTRACKER=hadoop-mapred-tasktracker.pid
 OOZIE_SERVER=oozie.pid
 OOZIE_SERVER=oozie.pid
-ZOOKEEPER=zookeeper_server.pid
-TEMPLETON=templeton.pid
-NAGIOS=nagios.pid
+ZOOKEEPER_SERVER=zookeeper_server.pid
+TEMPLETON_SERVER=templeton.pid
+NAGIOS_SERVER=nagios.pid
+GANGLIA_SERVER=gmetad.pid
+GANGLIA_MONITOR=gmond.pid
+HBASE_MASTER=hbase-hbase-master.pid
+HBASE_REGIONSERVER=hbase-hbase-regionserver.pid
+NAGIOS_SERVER=nagios.pid
+HCATALOG_SERVER=hcat.pid
+KERBEROS_SERVER=kadmind.pid
+HIVE_SERVER=hive-server.pid
+HIVE_METASTORE=hive.pid
+MYSQL_SERVER=mysqld.pid

+ 28 - 26
ambari-agent/src/main/python/ambari_agent/site.pp

@@ -10,54 +10,55 @@ import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-age
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/*.pp'
+import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/*.pp'
 $NAMENODE= ['h2.hortonworks.com']
 $NAMENODE= ['h2.hortonworks.com']
 $DATANODE= ['h1.hortonworks.com', 'h2.hortonworks.com']
 $DATANODE= ['h1.hortonworks.com', 'h2.hortonworks.com']
-$hdfs_user="hdfs"
 $jdk_location="http://hdp1/downloads"
 $jdk_location="http://hdp1/downloads"
 $jdk_bins= {
 $jdk_bins= {
 "32" => "jdk-6u31-linux-x64.bin",
 "32" => "jdk-6u31-linux-x64.bin",
 "64" => "jdk-6u31-linux-x64.bin"
 "64" => "jdk-6u31-linux-x64.bin"
 }
 }
+$hdfs_user="hdfs"
 $java32_home="/usr/jdk64/jdk1.6.0_31"
 $java32_home="/usr/jdk64/jdk1.6.0_31"
 $java64_home="/usr/jdk64/jdk1.6.0_31"
 $java64_home="/usr/jdk64/jdk1.6.0_31"
 $configuration =  {
 $configuration =  {
-capacity_scheduler=> {
+capacity-scheduler=> {
 "mapred.capacity-scheduler.queue.default.capacity" => "100",
 "mapred.capacity-scheduler.queue.default.capacity" => "100",
 "mapred.capacity-scheduler.queue.default.supports-priorit" => "false"
 "mapred.capacity-scheduler.queue.default.supports-priorit" => "false"
 },
 },
-hdfs_site=> {
-"dfs.block.size" => "256000000",
-"dfs.replication" => "1"
+oozie-site=> {
+"oozie.service.ActionService.executor.ext.classes" => "org.apache.oozie.action.hadoop.HiveActionExecutor, org.apache.oozie.action.hadoop.SqoopActionExecutor,org.apache.oozie.action.email.EmailActionExecutor,"
 },
 },
-hbase_policy=> {
+mapred-site=> {
+"mapred.queue.names" => "hive,pig,default",
+"mapred.jobtracker.taskScheduler" => "org.apache.hadoop.mapred.CapacityTaskScheduler"
+},
+core-site=> {
+"fs.default.name" => "hrt8n36.cc1.ygridcore.net"
+},
+hbase-policy=> {
 "security.client.protocol.acl" => "*"
 "security.client.protocol.acl" => "*"
 },
 },
-hadoop_policy=> {
+hbase-site=> {
+"hbase.cluster.distributed" => "true"
+},
+hdfs-site=> {
+"dfs.block.size" => "256000000",
+"dfs.replication" => "1"
+},
+hadoop-policy=> {
 "security.client.datanode.protocol.acl" => "*",
 "security.client.datanode.protocol.acl" => "*",
 "security.client.protocol.acl" => "*"
 "security.client.protocol.acl" => "*"
 },
 },
-mapred_queue_acls=> {
+mapred-queue-acls=> {
 "mapred.queue.default.acl-submit-job" => "*",
 "mapred.queue.default.acl-submit-job" => "*",
 "mapred.queue.default.acl-administer-jobs" => "*"
 "mapred.queue.default.acl-administer-jobs" => "*"
 },
 },
-hbase_site=> {
-"hbase.cluster.distributed" => "true"
-},
-core_site=> {
-"fs.default.name" => "hrt8n36.cc1.ygridcore.net"
-},
-hive_site=> {
-"hive.exec.scratchdir" => "/tmp"
-},
-templeton_site=> {
+templeton-site=> {
 "templeton.override.enabled" => "true"
 "templeton.override.enabled" => "true"
 },
 },
-oozie_site=> {
-"oozie.service.ActionService.executor.ext.classes" => "org.apache.oozie.action.hadoop.HiveActionExecutor, org.apache.oozie.action.hadoop.SqoopActionExecutor,org.apache.oozie.action.email.EmailActionExecutor,"
-},
-mapred_site=> {
-"mapred.queue.names" => "hive,pig,default",
-"mapred.jobtracker.taskScheduler" => "org.apache.hadoop.mapred.CapacityTaskScheduler"
+hive-site=> {
+"hive.exec.scratchdir" => "/tmp"
 },
 },
 
 
 }
 }
@@ -66,6 +67,7 @@ $task_bin_exe = "ls"
 $hadoop_piddirprefix = "/tmp"
 $hadoop_piddirprefix = "/tmp"
 $ganglia_server_host = "localhost"
 $ganglia_server_host = "localhost"
 node /default/ {
 node /default/ {
- stage{1 :}
-class {'hdp-hadoop::namenode': stage => 1, service_state => installed_and_configured}
+ stage{1 :} -> stage{2 :}
+class {'hdp': stage => 1}
+class {'hdp-hadoop::namenode': stage => 2, service_state => installed_and_configured}
 }
 }

+ 8 - 8
ambari-agent/src/main/python/ambari_agent/test.json

@@ -17,17 +17,17 @@
 "jdk_bins" :  { "32" : "jdk-6u31-linux-x64.bin", "64" : "jdk-6u31-linux-x64.bin" },
 "jdk_bins" :  { "32" : "jdk-6u31-linux-x64.bin", "64" : "jdk-6u31-linux-x64.bin" },
 "repo_info" :[
 "repo_info" :[
 {
 {
-  "base_url":"http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
-  "os_type":"centos5",
-  "repo_id":"HDP-1.1.1.16_TEST",
-  "repo_name":"HDP_TEST"
+  "baseUrl":"http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
+  "osType":"centos5",
+  "repoId":"HDP-1.1.1.16_TEST",
+  "repoName":"HDP_TEST"
 }
 }
 ,
 ,
 {
 {
-  "base_url":"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
-  "os_type":"centos5",
-  "repo_id":"HDP-UTILS-1.1.0.15_TEST",
-  "repo_name":"HDP-UTILS_TEST"
+  "baseUrl":"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
+  "osType":"centos5",
+  "repoId":"HDP-UTILS-1.1.0.15_TEST",
+  "repoName":"HDP-UTILS_TEST"
 }]
 }]
 
 
 },
 },

+ 87 - 1
ambari-agent/src/test/python/TestActionQueue.py

@@ -22,11 +22,14 @@ from unittest import TestCase
 from ambari_agent.ActionQueue import ActionQueue
 from ambari_agent.ActionQueue import ActionQueue
 from ambari_agent.AmbariConfig import AmbariConfig
 from ambari_agent.AmbariConfig import AmbariConfig
 from ambari_agent.FileUtil import getFilePath
 from ambari_agent.FileUtil import getFilePath
-import os, errno, time
+import os, errno, time, pprint, tempfile, threading
+
+event = threading.Event()
 
 
 class TestActionQueue(TestCase):
 class TestActionQueue(TestCase):
   def test_ActionQueueStartStop(self):
   def test_ActionQueueStartStop(self):
     actionQueue = ActionQueue(AmbariConfig().getConfig())
     actionQueue = ActionQueue(AmbariConfig().getConfig())
+    actionQueue.IDLE_SLEEP_TIME = 0.01
     actionQueue.start()
     actionQueue.start()
     actionQueue.stop()
     actionQueue.stop()
     actionQueue.join()
     actionQueue.join()
@@ -36,3 +39,86 @@ class TestActionQueue(TestCase):
   def test_RetryAction(self):
   def test_RetryAction(self):
     pass
     pass
 
 
+
+  def test_command_in_progress(self):
+    config = AmbariConfig().getConfig()
+    tmpfile = tempfile.gettempdir()
+    config.set('puppet', 'puppetmodules', tmpfile)
+    actionQueue = ActionQueue(config)
+    actionQueue.IDLE_SLEEP_TIME = 0.01
+    executor_started_event = threading.Event()
+    end_executor_event = threading.Event()
+    actionQueue.executor = FakeExecutor(executor_started_event, end_executor_event)
+    before_start_result = actionQueue.result()
+
+    command = {
+      'commandId': 17,
+      'role' : "role",
+      'taskId' : "taskId",
+      'clusterName' : "clusterName",
+      'serviceName' : "serviceName",
+      'status' : 'IN_PROGRESS',
+      'hostname' : "localhost.localdomain",
+      'hostLevelParams': "hostLevelParams",
+      'clusterHostInfo': "clusterHostInfo",
+      'roleCommand': "roleCommand",
+      'configurations': "configurations",
+      'commandType': "EXECUTION_COMMAND"
+    }
+    actionQueue.put(command)
+
+    actionQueue.start()
+    executor_started_event.wait()
+    #print ("ii: " + pprint.pformat(actionQueue.commandInProgress))
+    in_progress_result = actionQueue.result()
+    end_executor_event.set()
+    actionQueue.stop()
+    actionQueue.join()
+    after_start_result = actionQueue.result()
+
+    self.assertEquals(len(before_start_result['componentStatus']), 0)
+    self.assertEquals(len(before_start_result['reports']), 0)
+
+    self.assertEquals(len(in_progress_result['componentStatus']), 0)
+    self.assertEquals(len(in_progress_result['reports']), 1)
+    self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS")
+    self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output")
+    self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777)
+    self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err')
+
+    self.assertEquals(len(after_start_result['componentStatus']), 0)
+    self.assertEquals(len(after_start_result['reports']), 1)
+    self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
+    self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
+    self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
+    self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
+
+    #print("tmpout: " + pprint.pformat(actionQueue.tmpdir))
+    #print("before: " + pprint.pformat(before_start_result))
+    #print("in_progress: " + pprint.pformat(in_progress_result))
+    #print("after: " + pprint.pformat(after_start_result))
+
+
+class FakeExecutor():
+
+  def __init__(self, executor_started_event, end_executor_event):
+    self.executor_started_event = executor_started_event
+    self.end_executor_event = end_executor_event
+    pass
+
+  def runCommand(self, command, tmpoutpath, tmperrpath):
+    tmpout= open(tmpoutpath, 'w')
+    tmpout.write("Dummy output")
+    tmpout.flush()
+
+    tmperr= open(tmperrpath, 'w')
+    tmperr.write("Dummy err")
+    tmperr.flush()
+
+    self.executor_started_event.set()
+    self.end_executor_event.wait()
+    return {
+      "exitcode": 0,
+      "stdout": "returned stdout",
+      "stderr": "returned stderr"
+    }

+ 325 - 0
ambari-agent/src/test/python/TestController.py

@@ -0,0 +1,325 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from ambari_agent.Register import Register
+from ambari_agent.Controller import Controller
+from ambari_agent.Heartbeat import Heartbeat
+from ambari_agent.ActionQueue import ActionQueue
+from ambari_agent import AmbariConfig
+from ambari_agent.NetUtil import NetUtil
+import socket, ConfigParser, logging
+import os, pprint, json, sys
+from threading import Thread
+import time
+import Queue
+
+
+BAD_URL = 'http://localhost:54222/badurl/'
+
+logger = logging.getLogger()
+
+class TestController(TestCase):
+
+  def setUp(self):
+    logger.disabled = True
+    self.defaulttimeout = -1.0
+    if hasattr(socket, 'getdefaulttimeout'):
+      # get the default timeout on sockets
+      self.defaulttimeout = socket.getdefaulttimeout()
+
+
+  def tearDown(self):
+    if self.defaulttimeout is not None and self.defaulttimeout > 0 and hasattr(socket, 'setdefaulttimeout'):
+      # Set the default timeout on sockets
+      socket.setdefaulttimeout(self.defaulttimeout)
+    logger.disabled = False
+
+
+  def test_reregister_loop(self):
+    class ControllerMock(Controller):
+      def __init__(self, config, range=0):
+        self.repeatRegistration = False
+        self.range = range
+
+      callCounter = 0
+
+      def registerAndHeartbeat(self):
+        if self.callCounter < 3:
+          self.repeatRegistration = True;
+          self.callCounter += 1
+        else:
+          self.repeatRegistration = False;
+
+    config = ConfigParser.RawConfigParser()
+    mock = ControllerMock(config)
+    mock.run()
+    self.assertEquals(mock.callCounter, 3)
+    pass
+
+
+  def test_nonincremental_ids1(self):
+    '''
+      test to make sure nothing we act appropriately on getting non incremental reponse ids
+    '''
+    #timings adjustment
+    netutil = NetUtil()
+    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
+    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
+    #building fake responces
+    responces = Queue.Queue()
+    responce1 = {
+      'responseId':8,
+      'executionCommands':[],
+      'statusCommands':[],
+      'restartAgent':'False',
+      }
+    responce1 = json.dumps(responce1)
+
+    responce2 = {
+      'responseId':11,
+      'executionCommands':[],
+      'statusCommands':[],
+      'restartAgent':'False',
+      }
+    responce2 = json.dumps(responce2)
+    responces.put(responce1)
+    responces.put(responce2)
+    #building heartbeat object
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    # testing controller with our heartbeat
+    controller = self.ControllerMock_fake_restartAgent(AmbariConfig.config, responces)
+    controller.heartbeat = heartbeat
+    controller.actionQueue = actionQueue
+    controller.logger = logger
+    controller.netutil = netutil
+    controller.heartbeatWithServer()
+    restarts = controller.restartCount
+    self.assertEquals(restarts, 1, "Agent should restart on non incremental responce ids")
+    pass
+
+
+  def test_nonincremental_ids2(self):
+    '''
+      test to make sure nothing we act appropriately on getting incremental reponse ids
+    '''
+    #timings adjustment
+    netutil = NetUtil()
+    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
+    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
+    #building fake responces
+    responces = Queue.Queue()
+    responce1 = {
+      'responseId':8,
+      'executionCommands':[],
+      'statusCommands':[],
+      'restartAgent':'False',
+      }
+    responce1 = json.dumps(responce1)
+
+    responce2 = {
+      'responseId':9,
+      'executionCommands':[],
+      'statusCommands':[],
+      'restartAgent':'False',
+      }
+    responce2 = json.dumps(responce2)
+    responces.put(responce1)
+    responces.put(responce2)
+    #building heartbeat object
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    # testing controller with our heartbeat
+    controller = self.ControllerMock_fake_restartAgent(AmbariConfig.config, responces)
+    controller.heartbeat = heartbeat
+    controller.actionQueue = actionQueue
+    controller.logger = logger
+    controller.netutil = netutil
+    controller.heartbeatWithServer()
+    restarts = controller.restartCount
+    self.assertEquals(restarts, 0, "Agent should not restart on incremental responce ids")
+    pass
+
+
+  def test_reregister(self):
+    '''
+      test to make sure if we can get a re register command, we register with the server
+    '''
+    #timings adjustment
+    netutil = NetUtil()
+    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
+    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
+    #building fake responces
+    responces = Queue.Queue()
+    responce1 = {
+      'responseId':8,
+      'executionCommands':[],
+      'statusCommands':[],
+      'restartAgent':'true',
+      }
+    responce1 = json.dumps(responce1)
+    responces.put(responce1)
+    #building heartbeat object
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    # testing controller with our heartbeat
+    controller = self.ControllerMock_fake_restartAgent(AmbariConfig.config, responces)
+    controller.heartbeat = heartbeat
+    controller.actionQueue = actionQueue
+    controller.logger = logger
+    controller.netutil = netutil
+    controller.heartbeatWithServer()
+    restarts = controller.restartCount
+    self.assertEquals(restarts, 1, "Agent should restart if we get a re register command")
+
+
+  def test_heartbeat_retries(self):
+    netutil = NetUtil()
+    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
+    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
+    #building heartbeat object
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    # testing controller with our heartbeat and wrong url
+    controller = self.ControllerMock_failure_sendRequest(AmbariConfig.config)
+    controller.heartbeat = heartbeat
+    controller.actionQueue = actionQueue
+    controller.logger = logger
+    controller.netutil = netutil
+    thread = Thread(target =  controller.heartbeatWithServer)
+    thread.start()
+    time.sleep(0.5)
+
+    # I have to stop the thread anyway, so I'll check results later
+    threadWasAlive = thread.isAlive()
+    successfull_heartbits0 = controller.DEBUG_SUCCESSFULL_HEARTBEATS
+    heartbeat_retries0 = controller.DEBUG_HEARTBEAT_RETRIES
+    # Stopping thread
+    controller.DEBUG_STOP_HEARTBITTING = True
+    time.sleep(0.3)
+    # Checking results before thread stop
+    self.assertEquals(threadWasAlive, True, "Heartbeat should be alive now")
+    self.assertEquals(successfull_heartbits0, 0, "Heartbeat should not have any success")
+    self.assertEquals(heartbeat_retries0 > 1, True, "Heartbeat should retry connecting")
+    # Checking results after thread stop
+    self.assertEquals(thread.isAlive(), False, "Heartbeat should stop now")
+    self.assertEquals(controller.DEBUG_SUCCESSFULL_HEARTBEATS, 0, "Heartbeat should not have any success")
+
+
+  def test_status_command_on_registration(self):
+    '''
+    test to make sure if we get a status check command from the server, we are able to evaluate and register at the server
+    '''
+    #timings adjustment
+    netutil = NetUtil()
+    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
+    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
+    #building fake registration responce
+    responces = Queue.Queue()
+    responce1 = {
+      'response':'OK',
+      'responseId':8,
+      'statusCommands':[{
+        'clusterName' : "c1",
+        'commandType' : "STATUS_COMMAND",
+        'componentName' : "NAMENODE",
+        'serviceName' : "HDFS",
+        }],
+      }
+    responce1 = json.dumps(responce1)
+    responces.put(responce1)
+    #building heartbeat object
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    # testing controller with our heartbeat
+    controller = self.ControllerMock_fake_restartAgent(AmbariConfig.config, responces)
+    controller.heartbeat = heartbeat
+    controller.actionQueue = actionQueue
+    controller.logger = logger
+    controller.netutil = netutil
+    controller.registerWithServer()
+    # If test does not hang, registration is successful
+    # So, checking queue
+    queue = controller.actionQueue.getCommandQueue()
+    self.assertEquals(queue.qsize(), 1, "Status command should be queued once")
+    # Checking parsed status command
+    command = queue.get()
+    self.assertEquals(command['clusterName'], 'c1')
+    self.assertEquals(command['commandType'], 'STATUS_COMMAND')
+    self.assertEquals(command['componentName'], 'NAMENODE')
+    self.assertEquals(command['serviceName'], 'HDFS')
+
+
+  class ControllerMock_fake_restartAgent(Controller):
+    def __init__(self, config, responces, range=3):
+      self.repeatRegistration = False
+      self.responces = responces
+      self.heartbeatUrl = "fakeurl"
+      self.registerUrl = "fakeregisterurl"
+      self.responseId = 7
+      self.register = Register()
+      self.range = range
+      
+    def restartAgent(self):
+      self.restartCount += 1
+      pass
+
+    restartCount = 0
+
+    def sendRequest(self, url, data):
+      responce = self.responces.get(block=False)
+      if self.responces.empty():
+        self.DEBUG_STOP_HEARTBITTING = True # Because we have nothing to reply next time
+      return responce
+
+
+  class ControllerMock_failure_sendRequest(Controller):
+    def __init__(self, config, range=0):
+      self.repeatRegistration = False
+      self.heartbeatUrl = "fakeurl"
+      self.registerUrl = "fakeregisterurl"
+      self.responseId = 7
+      self.register = Register()
+      self.range = range
+
+    def restartAgent(self):
+      self.restartCount += 1
+      pass
+
+    restartCount = 0
+
+    def sendRequest(self, url, data):
+      raise Exception("Fake exception")

+ 8 - 1
ambari-agent/src/test/python/TestGrep.py

@@ -21,7 +21,7 @@ limitations under the License.
 from unittest import TestCase
 from unittest import TestCase
 from Grep import Grep
 from Grep import Grep
 import socket
 import socket
-import os
+import os, sys
 import logging
 import logging
 
 
 class TestGrep(TestCase):
 class TestGrep(TestCase):
@@ -95,6 +95,13 @@ debug: Processing report from ambari-dmi.cybervisiontech.com.ua with processor P
     desired = ''
     desired = ''
     self.assertEquals(fragment, desired, 'Grep tail function contains bug in index arithmetics')
     self.assertEquals(fragment, desired, 'Grep tail function contains bug in index arithmetics')
 
 
+  def test_filterMarkup(self):
+    string = """notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
+    desired="""notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
+    filtered = self.grep.filterMarkup(string)
+    #sys.stderr.write(filtered)
+    self.assertEquals(filtered, desired)
+
   def tearDown(self):
   def tearDown(self):
     pass
     pass
 
 

+ 2 - 0
ambari-agent/src/test/python/TestHardware.py

@@ -32,6 +32,7 @@ class TestHardware(TestCase):
       self.assertTrue(dev_item['percent'] != None)
       self.assertTrue(dev_item['percent'] != None)
       self.assertTrue(dev_item['device'] != None)
       self.assertTrue(dev_item['device'] != None)
       self.assertTrue(dev_item['mountpoint'] != None)
       self.assertTrue(dev_item['mountpoint'] != None)
+      self.assertTrue(dev_item['type'] != None)
       self.assertTrue(dev_item['size'] > 0)
       self.assertTrue(dev_item['size'] > 0)
 
 
     for os_disk_item in osdisks:
     for os_disk_item in osdisks:
@@ -40,6 +41,7 @@ class TestHardware(TestCase):
       self.assertTrue(os_disk_item['percent'] != None)
       self.assertTrue(os_disk_item['percent'] != None)
       self.assertTrue(os_disk_item['device'] != None)
       self.assertTrue(os_disk_item['device'] != None)
       self.assertTrue(os_disk_item['mountpoint'] != None)
       self.assertTrue(os_disk_item['mountpoint'] != None)
+      self.assertTrue(os_disk_item['type'] != None)
       self.assertTrue(os_disk_item['size'] > 0)
       self.assertTrue(os_disk_item['size'] > 0)
 
 
     self.assertTrue(len(result['mounts']) == len(osdisks))
     self.assertTrue(len(result['mounts']) == len(osdisks))

+ 94 - 4
ambari-agent/src/test/python/TestHeartbeat.py

@@ -21,16 +21,106 @@ limitations under the License.
 from unittest import TestCase
 from unittest import TestCase
 from ambari_agent.Heartbeat import Heartbeat
 from ambari_agent.Heartbeat import Heartbeat
 from ambari_agent.ActionQueue import ActionQueue
 from ambari_agent.ActionQueue import ActionQueue
+from ambari_agent.LiveStatus import LiveStatus
 from ambari_agent import AmbariConfig
 from ambari_agent import AmbariConfig
 import socket
 import socket
 import os
 import os
+import time
 
 
 class TestHeartbeat(TestCase):
 class TestHeartbeat(TestCase):
-  def test_build(self):
+
+  def setUp(self):
     testsPath = os.path.dirname(os.path.realpath(__file__))
     testsPath = os.path.dirname(os.path.realpath(__file__))
-    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
-    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    self.dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+
+  def test_build(self):
+    AmbariConfig.config.set('services','serviceToPidMapFile', self.dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    result = heartbeat.build(100)
+    print "Heartbeat: " + str(result)
+    self.assertEquals(result['hostname'] != '', True, "hostname should not be empty")
+    self.assertEquals(result['responseId'], 100)
+    self.assertEquals(result['componentStatus'] is not None, True, "Heartbeat should contain componentStatus")
+    self.assertEquals(result['reports'] is not None, True, "Heartbeat should contain reports")
+    self.assertEquals(result['timestamp'] >= 1353679373880L, True)
+    self.assertEquals(len(result['nodeStatus']), 2)
+    self.assertEquals(result['nodeStatus']['cause'], "NONE")
+    self.assertEquals(result['nodeStatus']['status'], "HEALTHY")
+    self.assertEquals(len(result), 6)
+    self.assertEquals(not heartbeat.reports, True, "Heartbeat should not contain task in progress")
+
+
+  def test_heartbeat_with_status(self):
+    AmbariConfig.config.set('services','serviceToPidMapFile', self.dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    heartbeat = Heartbeat(actionQueue)
+    statusCommand = {
+      "serviceName" : 'HDFS',
+      "commandType" : "STATUS_COMMAND",
+      "clusterName" : "",
+      "componentName" : "DATANODE"
+    }
+    actionQueue.put(statusCommand)
+    actionQueue.start()
+    time.sleep(0.1)
+    actionQueue.stop()
+    actionQueue.join()
+    result = heartbeat.build(101)
+    self.assertEquals(len(result['componentStatus']) > 0, True, 'Heartbeat should contain status of HDFS components')
+
+  def test_heartbeat_with_status_multiple(self):
+    AmbariConfig.config.set('services','serviceToPidMapFile', self.dictPath)
+    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    actionQueue.IDLE_SLEEP_TIME = 0.01
+    heartbeat = Heartbeat(actionQueue)
+    actionQueue.start()
+    max_number_of_status_entries = 0
+    for i in range(1,5):
+      statusCommand = {
+        "serviceName" : 'HDFS',
+        "commandType" : "STATUS_COMMAND",
+        "clusterName" : "",
+        "componentName" : "DATANODE"
+      }
+      actionQueue.put(statusCommand)
+      time.sleep(0.1)
+      result = heartbeat.build(101)
+      number_of_status_entries = len(result['componentStatus'])
+#      print "Heartbeat with status: " + str(result) + " XXX " + str(number_of_status_entries)
+      if max_number_of_status_entries < number_of_status_entries:
+        max_number_of_status_entries = number_of_status_entries
+    actionQueue.stop()
+    actionQueue.join()
+
+    NUMBER_OF_COMPONENTS = 1
+    self.assertEquals(max_number_of_status_entries == NUMBER_OF_COMPONENTS, True)
+
+  def test_heartbeat_with_task_in_progress(self):
+    AmbariConfig.config.set('services','serviceToPidMapFile', self.dictPath)
     actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
     actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
+    actionQueue.commandInProgress= {
+      'role' : "role",
+      'actionId' : "actionId",
+      'taskId' : "taskId",
+      'stdout' : "stdout",
+      'clusterName' : "clusterName",
+      'stderr' : 'none',
+      'exitCode' : 777,
+      'serviceName' : "serviceName",
+      'status' : 'IN_PROGRESS'
+    }
     heartbeat = Heartbeat(actionQueue)
     heartbeat = Heartbeat(actionQueue)
     result = heartbeat.build(100)
     result = heartbeat.build(100)
-  
+    #print "Heartbeat: " + str(result)
+    self.assertEquals(len(result['reports']), 1)
+    self.assertEquals(result['reports'][0]['role'], "role")
+    self.assertEquals(result['reports'][0]['actionId'], "actionId")
+    self.assertEquals(result['reports'][0]['taskId'], "taskId")
+    self.assertEquals(result['reports'][0]['stdout'], "...")
+    self.assertEquals(result['reports'][0]['clusterName'], "clusterName")
+    self.assertEquals(result['reports'][0]['stderr'], "...")
+    self.assertEquals(result['reports'][0]['exitCode'], 777)
+    self.assertEquals(result['reports'][0]['serviceName'], "serviceName")
+    self.assertEquals(result['reports'][0]['status'], "IN_PROGRESS")
+    pass

+ 37 - 0
ambari-agent/src/test/python/TestLiveStatus.py

@@ -0,0 +1,37 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from ambari_agent.LiveStatus import LiveStatus
+from ambari_agent import AmbariConfig
+import socket
+import os
+
+class TestLiveStatus(TestCase):
+  def test_build(self):
+    testsPath = os.path.dirname(os.path.realpath(__file__))
+    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
+    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
+    for component in LiveStatus.COMPONENTS:
+      livestatus = LiveStatus('', component['serviceName'], component['componentName'])
+      result = livestatus.build()
+      print "LiveStatus of {0}: {1}".format(component['serviceName'], str(result))
+      self.assertEquals(len(result) > 0, True, 'Livestatus should not be empty')
+  

+ 20 - 39
ambari-agent/src/test/python/TestConnectionRetries.py → ambari-agent/src/test/python/TestNetUtil.py

@@ -31,25 +31,38 @@ import socket
 import os
 import os
 import logging
 import logging
 from ambari_agent.Controller import Controller
 from ambari_agent.Controller import Controller
+import socket
 
 
 NON_EXISTING_DOMAIN = 'non-existing-domain43342432.com'
 NON_EXISTING_DOMAIN = 'non-existing-domain43342432.com'
 BAD_URL = 'http://localhost:54222/badurl/'
 BAD_URL = 'http://localhost:54222/badurl/'
 
 
-class TestConnectionRetries(TestCase):
+class TestNetUtil(TestCase):
 
 
   logger = logging.getLogger()
   logger = logging.getLogger()
 
 
   def setUp(self):
   def setUp(self):
+    self.logger.info("Starting TestConnectionRetries test")
     self.logger.disabled = True
     self.logger.disabled = True
+    self.defaulttimeout = -1.0
+    if hasattr(socket, 'getdefaulttimeout'):
+      # get the default timeout on sockets
+      self.defaulttimeout = socket.getdefaulttimeout()
 
 
 
 
   def test_url_checks(self):
   def test_url_checks(self):
     netutil = NetUtil()
     netutil = NetUtil()
-    self.assertEquals(netutil.checkURL('http://www.iana.org/domains/example/'), True, "Good url - HTTP code 200")
-    self.assertEquals(netutil.checkURL('https://www.iana.org/domains/example/'), True, "Good HTTPS url - HTTP code 200")
+    if hasattr(socket, 'setdefaulttimeout'):
+      # Set the default timeout on sockets
+      socket.setdefaulttimeout(1)
     self.assertEquals(netutil.checkURL('http://' + NON_EXISTING_DOMAIN), False, "Not existing domain")
     self.assertEquals(netutil.checkURL('http://' + NON_EXISTING_DOMAIN), False, "Not existing domain")
     self.assertEquals(netutil.checkURL(BAD_URL), False, "Bad url")
     self.assertEquals(netutil.checkURL(BAD_URL), False, "Bad url")
     self.assertEquals(netutil.checkURL('http://192.168.253.177'), False, "Not reachable IP")
     self.assertEquals(netutil.checkURL('http://192.168.253.177'), False, "Not reachable IP")
+    if hasattr(socket, 'setdefaulttimeout'):
+      # Set the default timeout on sockets
+      socket.setdefaulttimeout(10)
+    self.assertEquals(netutil.checkURL('http://www.iana.org/domains/example/'), True, "Good url - HTTP code 200")
+    self.assertEquals(netutil.checkURL('https://www.iana.org/domains/example/'), True, "Good HTTPS url - HTTP code 200")
+
 
 
   def test_registration_retries(self):
   def test_registration_retries(self):
     netutil = NetUtil()
     netutil = NetUtil()
@@ -72,43 +85,11 @@ class TestConnectionRetries(TestCase):
     # Checking results after thread stop
     # Checking results after thread stop
     self.assertEquals(thread.isAlive(), False, "Thread should stop now")
     self.assertEquals(thread.isAlive(), False, "Thread should stop now")
 
 
-  def test_heartbeat_retries(self):
-    netutil = NetUtil()
-    netutil.HEARTBEAT_IDDLE_INTERVAL_SEC=0.05
-    netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC=0.05
-    #building heartbeat object
-    testsPath = os.path.dirname(os.path.realpath(__file__))
-    dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict'
-    AmbariConfig.config.set('services','serviceToPidMapFile', dictPath)
-    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
-    heartbeat = Heartbeat(actionQueue)
-    # testing controller with our heartbeat and wrong url
-    controller = Controller(AmbariConfig.config)
-    controller.heartbeat = heartbeat
-    controller.heartbeatUrl = BAD_URL
-    controller.actionQueue = actionQueue
-    controller.logger = self.logger
-    controller.netutil = netutil
-    thread = Thread(target =  controller.heartbeatWithServer)
-    thread.start()
-    time.sleep(1)
-
-    # I have to stop the thread anyway, so I'll check results later
-    threadWasAlive = thread.isAlive()
-    successfull_heartbits0 = controller.DEBUG_SUCCESSFULL_HEARTBEATS
-    heartbeat_retries0 = controller.DEBUG_HEARTBEAT_RETRIES
-    # Stopping thread
-    controller.DEBUG_STOP_HEARTBITTING = True
-    time.sleep(1)
-    # Checking results before thread stop
-    self.assertEquals(threadWasAlive, True, "Heartbeat should be alive now")
-    self.assertEquals(successfull_heartbits0, 0, "Heartbeat should not have any success")
-    self.assertEquals(heartbeat_retries0 > 1, True, "Heartbeat should retry connecting")
-    # Checking results after thread stop
-    self.assertEquals(thread.isAlive(), False, "Heartbeat should stop now")
-    self.assertEquals(controller.DEBUG_SUCCESSFULL_HEARTBEATS, 0, "Heartbeat should not have any success")
-
   def tearDown(self):
   def tearDown(self):
+    if self.defaulttimeout is not None and self.defaulttimeout > 0 and hasattr(socket, 'setdefaulttimeout'):
+      # Set the default timeout on sockets
+      socket.setdefaulttimeout(self.defaulttimeout)
     self.logger.disabled = False
     self.logger.disabled = False
+    self.logger.info("Finished TestConnectionRetries test")
 
 
 
 

+ 141 - 5
ambari-agent/src/test/python/TestPuppetExecutor.py

@@ -20,18 +20,154 @@ limitations under the License.
 
 
 from unittest import TestCase
 from unittest import TestCase
 from puppetExecutor import puppetExecutor
 from puppetExecutor import puppetExecutor
+from Grep import Grep
 from pprint import pformat
 from pprint import pformat
+import socket, threading, tempfile
+import os, time
 import sys
 import sys
+from AmbariConfig import AmbariConfig
+from threading import Thread
+
+grep = Grep()
 
 
 class TestPuppetExecutor(TestCase):
 class TestPuppetExecutor(TestCase):
+
+
   def test_build(self):
   def test_build(self):
-    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z")
+    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
     command = puppetexecutor.puppetCommand("site.pp")
     command = puppetexecutor.puppetCommand("site.pp")
-    self.assertEquals("/x/bin/puppet", command[0], "puppet binary wrong")
+    self.assertEquals("puppet", command[0], "puppet binary wrong")
     self.assertEquals("apply", command[1], "local apply called")
     self.assertEquals("apply", command[1], "local apply called")
     self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
     self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
     self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
     self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
     correct")
     correct")
-    
-    
-    
+
+  def test_condense_bad2(self):
+    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
+    puppetexecutor.ERROR_LAST_LINES_BEFORE = 2
+    puppetexecutor.ERROR_LAST_LINES_AFTER = 3
+    string_err = open('dummy_puppet_output_error2.txt', 'r').read().replace("\n", os.linesep)
+    result = puppetexecutor.condenseOutput(string_err, '', 1)
+    stripped_string = string_err.strip()
+    lines = stripped_string.splitlines(True)
+    d = lines[1:6]
+    result_check = True
+    for l in d:
+      result_check &= grep.filterMarkup(l) in result
+    self.assertEquals(result_check, True, "Failed to condence fail log")
+    self.assertEquals(len(result.splitlines(True)), 6, "Failed to condence fail log")
+
+  def test_condense_bad3(self):
+    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
+    string_err = open('dummy_puppet_output_error3.txt', 'r').read().replace("\n", os.linesep)
+    result = puppetexecutor.condenseOutput(string_err, '', 1)
+    stripped_string = string_err.strip()
+    lines = stripped_string.splitlines(True)
+    #sys.stderr.write(result)
+    d = lines[0:31]
+    result_check = True
+    for l in d:
+      result_check &= grep.filterMarkup(l) in result
+    self.assertEquals(result_check, True, "Failed to condence fail log")
+    self.assertEquals(len(result.splitlines(True)), 33, "Failed to condence fail log")
+
+  def test_condense_good(self):
+    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
+    puppetexecutor.OUTPUT_LAST_LINES = 2
+    string_good = open('dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
+    result = puppetexecutor.condenseOutput(string_good, puppetExecutor.NO_ERROR, 0)
+    stripped_string = string_good.strip()
+    lines = stripped_string.splitlines(True)
+    result_check = lines[45].strip() in result and lines[46].strip() in result
+    self.assertEquals(result_check, True, "Failed to condence output log")
+    self.assertEquals(len(result.splitlines(True)), 2, "Failed to condence output log")
+
+  def test_watchdog_1(self):
+    """
+    Tests whether watchdog works
+    """
+    subproc_mock = self.Subprocess_mockup()
+    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
+      "/usr/",
+      "/root/workspace/puppet-install/facter-1.6.10/",
+      "/tmp", AmbariConfig().getConfig(), subproc_mock)
+    _, tmpoutfile = tempfile.mkstemp()
+    _, tmperrfile = tempfile.mkstemp()
+    result = {  }
+    puppetEnv = { "RUBYLIB" : ""}
+    executor_mock.PUPPET_TIMEOUT_SECONDS = 0.1
+    subproc_mock.returncode = None
+    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
+    thread.start()
+    time.sleep(0.1)
+    subproc_mock.finished_event.wait()
+    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
+
+
+  def test_watchdog_2(self):
+    """
+    Tries to catch false positive watchdog invocations
+    """
+    subproc_mock = self.Subprocess_mockup()
+    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
+    "/usr/",
+    "/root/workspace/puppet-install/facter-1.6.10/",
+    "/tmp", AmbariConfig().getConfig(), subproc_mock)
+    _, tmpoutfile = tempfile.mkstemp()
+    _, tmperrfile = tempfile.mkstemp()
+    result = {  }
+    puppetEnv = { "RUBYLIB" : ""}
+    executor_mock.PUPPET_TIMEOUT_SECONDS = 5
+    subproc_mock.returncode = 0
+    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
+    thread.start()
+    time.sleep(0.1)
+    subproc_mock.should_finish_event.set()
+    subproc_mock.finished_event.wait()
+    print(subproc_mock.was_terminated)
+    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
+    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
+
+
+  class  PuppetExecutor_mock(puppetExecutor):
+
+
+
+    def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config, subprocess_mockup):
+      self.subprocess_mockup = subprocess_mockup
+      puppetExecutor.__init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config)
+      pass
+
+    def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
+      self.subprocess_mockup.tmpout = tmpout
+      self.subprocess_mockup.tmperr = tmperr
+      return self.subprocess_mockup
+
+
+  class Subprocess_mockup():
+
+    returncode = 0
+
+    started_event = threading.Event()
+    should_finish_event = threading.Event()
+    finished_event = threading.Event()
+    was_terminated = False
+    tmpout = None
+    tmperr = None
+
+    def communicate(self):
+      self.started_event.set()
+      self.tmpout.write("Dummy output")
+      self.tmpout.flush()
+
+      self.tmperr.write("Dummy err")
+      self.tmperr.flush()
+      self.should_finish_event.wait()
+      self.finished_event.set()
+      pass
+
+    def terminate(self):
+      self.was_terminated = True
+      self.returncode = 17
+      self.should_finish_event.set()
+

+ 57 - 0
ambari-agent/src/test/python/TestPuppetExecutorManually.py

@@ -0,0 +1,57 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from puppetExecutor import puppetExecutor
+from pprint import pformat
+import socket
+import os
+import sys
+import logging
+from AmbariConfig import AmbariConfig
+import tempfile
+
+FILEPATH="runme.pp"
+logger = logging.getLogger()
+
+class TestPuppetExecutor(TestCase):
+
+  def test_run(self):
+    """
+    Used to run arbitrary puppet manifest. Test tries to find puppet manifest 'runme.pp' and runs it.
+    Test does not make any assertions
+    """
+    if not os.path.isfile(FILEPATH):
+      return
+
+    logger.info("***** RUNNING " + FILEPATH + " *****")
+    cwd = os.getcwd()
+    puppetexecutor = puppetExecutor(cwd, "/x", "/y", "/tmp", AmbariConfig().getConfig())
+    result = {}
+    puppetEnv = os.environ
+    _, tmpoutfile = tempfile.mkstemp()
+    _, tmperrfile = tempfile.mkstemp()
+    result = puppetexecutor.runPuppetFile(FILEPATH, result, puppetEnv, tmpoutfile, tmperrfile)
+    logger.info("*** Puppet output: " + str(result['stdout']))
+    logger.info("*** Puppet errors: " + str(result['stderr']))
+    logger.info("*** Puppet retcode: " + str(result['exitcode']))
+    logger.info("****** DONE *****")
+
+

+ 37 - 0
ambari-agent/src/test/python/TestRegistration.py

@@ -0,0 +1,37 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from ambari_agent.Register import Register
+import socket
+import os, pprint, json
+
+class TestRegistration(TestCase):
+
+  def test_registration_build(self):
+    register = Register()
+    data = register.build(1)
+    #print ("Register: " + pprint.pformat(data))
+    self.assertEquals(len(data['hardwareProfile']) > 0, True, "hardwareProfile should contain content")
+    self.assertEquals(data['hostname'] != "", True, "hostname should not be empty")
+    self.assertEquals(data['publicHostname'] != "", True, "publicHostname should not be empty")
+    self.assertEquals(data['responseId'], 1)
+    self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty")
+    self.assertEquals(len(data), 5)

+ 40 - 0
ambari-agent/src/test/python/dummy_puppet_output_error2.txt

@@ -0,0 +1,40 @@
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: Bad connection to FS. command aborted. exception: Call to dev.hortonworks.com/10.0.2.15:8020 failed on connection exception: java.net.ConnectException: Connection refused
+err: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: change from notrun to 0 failed: hadoop --config /etc/hadoop/conf fs -mkdir /mapred returned 255 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp/manifests/init.pp:267
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred::end]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::begin]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::begin]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::end]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:50 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:51 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:52 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 2 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:53 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 3 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:54 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 4 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:55 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 5 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:56 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 6 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:58 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 8 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:59 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 9 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: Bad connection to FS. command aborted. exception: Call to dev.hortonworks.com/10.0.2.15:8020 failed on connection exception: java.net.ConnectException: Connection refused
+err: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: change from notrun to 0 failed: hadoop --config /etc/hadoop/conf fs -mkdir /tmp returned 255 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp/manifests/init.pp:267
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /tmp::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /tmp::end]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::begin]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::begin]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
+warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::end]: Skipping because of failed dependencies
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:14 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:15 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:16 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 2 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:17 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 3 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:18 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 4 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:19 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 5 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
+notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:20 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 6 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)

+ 76 - 0
ambari-agent/src/test/python/dummy_puppet_output_error3.txt

@@ -0,0 +1,76 @@
+ESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp:
+:Package::Process_pkg[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: createdESC[0mESC[1;35merr: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Pack
+age::Process_pkg[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: change from absent to present failed: Execution of '/usr/bin/yum -d 0 -e 0 -y install hdp_mon_nagios_addons' returned 1:
+Error Downloading Packages:
+  hdp_mon_nagios_addons-0.0.2.15-1.noarch: failure: noarch/hdp_mon/hdp_mon_nagios_addons-0.0.2.15-1.noarch.rpm from AMBARI.dev-1.x: [Errno 256] No more mirrors to try.
+ESC[0m
+ESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Process_pkg[nagios-addons]/Anchor[hdp::package::nagios-addons::end]: Dependency Package[hdp_mon_nagios_addons] has failures:
+trueESC[0mESC[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::
+Package::Process_pkg[nagios-addons]/Anchor[hdp::package::nagios-addons::end]: Skipping because of failed dependenciesESC[0mESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Anchor[hdp-nagios::server::packages::end]: Dependency Package[hdp_mon
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
+\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
+\u001B[0;36mnotice: Finished catalog run in 49.63

+ 79 - 0
ambari-agent/src/test/python/examples/debug_testcase_example.py

@@ -0,0 +1,79 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+#from Register import Register
+from ambari_agent.Controller import Controller
+from ambari_agent.Heartbeat import Heartbeat
+from ambari_agent.ActionQueue import ActionQueue
+from ambari_agent import AmbariConfig
+from ambari_agent.NetUtil import NetUtil
+import socket, ConfigParser, logging
+import os, pprint, json, sys
+from threading import Thread
+import time
+import Queue
+
+
+BAD_URL = 'http://localhost:54222/badurl/'
+logger = logging.getLogger()
+
+class TestController():
+
+# This file should be put to ambari-agent/src/main/python/debug_testcase_example.py.
+# After installing python plugin and adjusting test,
+# it may be run in IntelliJ IDEA debugger
+
+  def setUp(self):
+    #logger.disabled = True
+    self.defaulttimeout = -1.0
+    if hasattr(socket, 'getdefaulttimeout'):
+      # Set the default timeout on sockets
+      self.defaulttimeout = socket.getdefaulttimeout()
+
+  def tearDown(self):
+    if self.defaulttimeout is not None and self.defaulttimeout > 0 and hasattr(socket, 'setdefaulttimeout'):
+      # Set the default timeout on sockets
+      socket.setdefaulttimeout(self.defaulttimeout)
+      #logger.disabled = False
+
+  def test_custom(self):
+    '''
+      test to make sure if we can get a re register command, we register with the server
+    '''
+    pass
+
+def main(argv=None):
+  logger.setLevel(logging.INFO)
+  formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
+      %(message)s")
+  stream_handler = logging.StreamHandler()
+  stream_handler.setFormatter(formatter)
+  logger.addHandler(stream_handler)
+
+  test = TestController()
+  test.setUp()
+  test.test_custom()
+  test.tearDown()
+
+if __name__ == '__main__':
+  main()
+
+

Some files were not shown because too many files changed in this diff