Browse Source

Created branch-1.2.3. (yusaku)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/branch-1.2.3@1481258 13f79535-47bb-0310-9956-ffa450edef68
Yusaku Sako 12 years ago
commit
8c75621ea4
100 changed files with 10966 additions and 0 deletions
  1. 1 0
      .gitattributes
  2. 20 0
      .gitignore
  3. 2935 0
      CHANGES.txt
  4. 16 0
      DISCLAIMER.txt
  5. 198 0
      KEYS
  6. 262 0
      LICENSE.txt
  7. 10 0
      NOTICE.txt
  8. 172 0
      ambari-agent/conf/unix/ambari-agent
  9. 55 0
      ambari-agent/conf/unix/ambari-agent.ini
  10. 18 0
      ambari-agent/conf/unix/ambari-env.sh
  11. 41 0
      ambari-agent/etc/init.d/ambari-agent
  12. 363 0
      ambari-agent/pom.xml
  13. 36 0
      ambari-agent/src/examples/query_with3jobs.txt
  14. 35 0
      ambari-agent/src/examples/query_with6jobs.txt
  15. 226 0
      ambari-agent/src/examples/tpcds_ss_tables.sql
  16. 27 0
      ambari-agent/src/main/package/rpm/postinstall.sh
  17. 24 0
      ambari-agent/src/main/package/rpm/preinstall.sh
  18. 25 0
      ambari-agent/src/main/package/rpm/preremove.sh
  19. 48 0
      ambari-agent/src/main/puppet/manifestloader/site.pp
  20. 68 0
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
  21. 23 0
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
  22. 21 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
  23. 76 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
  24. 28 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
  25. 97 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
  26. 37 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
  27. 62 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
  28. 34 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
  29. 71 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
  30. 196 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
  31. 71 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
  32. 540 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
  33. 170 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
  34. 47 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
  35. 141 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
  36. 57 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
  37. 73 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
  38. 62 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
  39. 43 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
  40. 54 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
  41. 41 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
  42. 28 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
  43. 79 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
  44. 43 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp
  45. 44 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp
  46. 36 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
  47. 36 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
  48. 55 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
  49. 153 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
  50. 90 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
  51. 35 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
  52. 230 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  53. 25 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
  54. 24 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
  55. 62 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
  56. 62 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
  57. 132 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
  58. 65 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
  59. 51 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
  60. 56 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
  61. 101 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
  62. 79 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
  63. 36 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
  64. 80 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
  65. 42 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
  66. 83 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
  67. 387 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  68. 94 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
  69. 29 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
  70. 75 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
  71. 230 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  72. 57 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
  73. 28 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
  74. 44 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
  75. 187 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
  76. 120 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
  77. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
  78. 27 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
  79. 27 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
  80. 46 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
  81. 98 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
  82. 94 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
  83. 25 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
  84. 3 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
  85. 104 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  86. 37 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
  87. 37 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
  88. 118 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
  89. 196 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
  90. 3 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
  91. 20 0
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
  92. 26 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
  93. 39 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
  94. 56 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
  95. 143 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
  96. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
  97. 66 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
  98. 102 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
  99. 73 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
  100. 76 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp

+ 1 - 0
.gitattributes

@@ -0,0 +1 @@
+* text=auto

+ 20 - 0
.gitignore

@@ -0,0 +1,20 @@
+.classpath
+.project
+.settings
+.idea/
+.iml/
+.DS_Store
+target
+/ambari-server/derby.log
+/ambari-server/pass.txt
+/ambari-web/npm-debug.log
+/ambari-web/public/
+/ambari-web/node_modules/
+*.pyc
+*.py~
+*.iml
+.hg
+.hgignore
+.hgtags
+derby.log
+pass.txt

+ 2935 - 0
CHANGES.txt

@@ -0,0 +1,2935 @@
+Ambari Change Log
+
+Notes:
+ - Committers should be listed using their login and non-committers
+should be listed by their full name.
+ - Please keep the file to a max of 80 characters wide.
+ - Put latest commits first in each section.
+
+Trunk (unreleased changes):
+
+ INCOMPATIBLE CHANGES 
+
+ NEW FEATURES
+
+ AMBARI-2031. AMBARI-2031. Add clover code coverage profile. 
+ (Giridharan Kesavan via swagle)
+
+ AMBARI-2048. Create ambari agent scripts for historyserver. (swagle)
+
+ AMBARI-2046. Create ambari agent scripts for Hadoop 2.0 installation, node 
+ manager. (swagle)
+
+ AMBARI-2047. Create ambari agent scripts for yarn client. (swagle)
+
+ AMBARI-1679. Create ambari agent scripts for Hadoop 2.0 installation, 
+ configuration and management. (swagle)
+
+ AMBARI-1680. Add Hadoop 2.0 stack definition to Ambari. (swagle)
+
+ AMBARI-1908. HDFS Mirroring: Add Bread Crumbs and Validation. (Arun Kandregula
+ via yusaku)
+
+ AMBARI-1558. Script to add host components to existing hosts.
+ (yusaku via jaimi)
+
+ AMBARI-1936. Support for installing on mixed OS versions install + mgmt.
+ (swagle)
+
+ AMBARI-1924. Allow for users to customize Ganglia gmetad + gmond user 
+ accounts. (Sumit Mohanty via swagle)
+
+ AMBARI-1923. Allow for users to customize Nagios user accounts. 
+ (Sumit Mohanty via swagle)
+
+ AMBARI-1922. Support not root ssh via a user that can sudo in as root. 
+ (Sumit Mohanty via swagle)
+ 
+ AMBARI-1914. Add Nagios alerts for Hue service. (swagle)
+
+ AMBARI-1895. Refactor ajax requests. (srimanth)
+
+ AMBARI-1868. Include stack version as a parameter in manifest. (swagle)
+
+ AMBARI-1847. Make single PUT call for multiple host overrides. (srimanth)
+
+ AMBARI-1857. Capacity Scheduler: field order for Add/Edit popup. (yusaku via
+ srimanth)
+
+ AMBARI-1855. Capacity Scheduler: when adding a new queue, populate 
+ fields. (yusaku via srimanth)
+ 
+ AMBARI-1850. Update unit tests. (yusaku via srimanth)
+
+ AMBARI-1829. HDFS Mirroring: Display Status and handle maintenance operations 
+ like Stop, Suspend, Activate etc. (Arun Kandregula via srimanth)
+
+ AMBARI-1840. For global properties show restart for appropriate services
+ only. (srimanth)
+
+ AMBARI-1800. Add "Admin > Misc" section to Ambari Web to show service user
+ accounts. (yusaku)
+
+ AMBARI-1756. Add ability to install and edit HUE as a service. (srimanth via 
+ yusaku)
+
+ AMBARI-1742. HDFS Mirroring: Edit/Delete Cluster. (Arun Kandregula via yusaku)
+
+ AMBARI-1723. HDFS Mirroring: Edit/Delete Data Set. (srimanth via yusaku)
+
+ AMBARI-1717. Add ability to start and stop all services from Services
+ page. (Xi Wang via yusaku)
+
+ AMBARI-1716. HDFS Mirroring: Add a cluster. (Arun Kandregula via yusaku)
+
+ AMBARI-1710. HDFS Mirroring: Edit/Delete Data Set. (srimanth via yusaku)
+
+ AMBARI-1699. HDFS Mirroring: Side Panel of individual jobs page.
+ (yusaku)
+
+ AMBARI-1698. Host Detail page needs to allow upgrade for host components
+ that failed to upgrade. (yusaku)
+
+ AMBARI-1696. Capacity Scheduler configuration UI. (yusaku)
+
+ AMBARI-1693. HDFS Mirroring: Display Jobs table. (yusaku)
+
+ AMBARI-1691. Add filtering by host-level status on Step 9 of Installer.
+ (Xi Wang via yusaku)
+
+ AMBARI-1668. HDFS Mirroring: Add Data Set Popup. (Arun Kandregula via
+ yusaku)
+
+ AMBARI-1650. Add Oracle and MySQL option for Oozie during Ambari cluster
+ install. (Xi Wang via yusaku)
+
+ AMBARI-1610. Expose ability to customize Hive Metastore log dir.
+ (yusaku)
+
+ AMBARI-1729. Creating smoke test for Hue service. (swagle)
+
+ AMBARI-1776. ZooKeeper Servers needs to store correct kerberos principal 
+ in zookeeper_jaas.conf. (swagle)
+
+ AMBARI-1424. Upgrade enhancements for Ambari 1.3.0. (smohanty)
+
+ AMBARI-1763. Integrate Frontend security work to enable security on
+ HBase and ZooKeeper. (jaimin)
+
+ AMBARI-1754. Add support to ensure that Ambari Server/Agent/Store are all of 
+ compatible version. (smohanty)
+
+ AMBARI-1752. Backend support for MySQL and Oracle for Oozie and Hive. (swagle)
+
+ AMBARI-1751. Ambari oracle-linux as a supported OS type. (swagle)
+
+ AMBARI-1728. Cleanup INFO Logging at the ambari agent to make it more useful 
+ and less verbose. (swagle)
+
+ AMBARI-1676. Ambari upgrade to 1.3.0 (core support). (smohanty)
+
+ AMBARI-1708. Remove all hardcoded ports from agent scripts to read from 
+ configs. (swagle)
+
+ AMBARI-1692. Make changes to agent scripts to support secure HBase and Zk. (swagle)
+
+ AMBARI-1707. Upgrade should check if another upgrade request is active as well as 
+ if any MASTER components have not stopped. (Sumit Mohanty via swagle)
+
+ AMBARI-1673. Configuring Hue to work with a secure HDP cluster and making changes 
+ to the Enable Security feature. (swagle)
+
+ AMBARI-1663. Allow adding host components to existing hosts. (Xi Wang via
+ yusaku)
+
+ AMBARI-1653. HDFS Mirroring: Display DataSets table. (yusaku)
+
+ AMBARI-1658. Implement API/Service Provider for HDFS mirroring. (tbeerbower)
+
+ AMBARI-1704. Add ability for host components to provide their current actual configs. (ncole)
+
+ AMBARI-1422. Allow client to specify a "context" value for asynchronous requests (jspeidel)
+
+ AMBARI-1599. Add ability to report actual configuration applied to a host. (ncole)
+
+ AMBARI-1647. Integrate server and agent changes for upgrade on cluster. 
+ (Sumit Mohanty via swagle)
+
+ AMBARI-1626. API support to upgrade host component. (Sumit Mohanty via swagle)
+
+ AMBARI-1601. Server level action support. (Sumit Mohanty via swagle)
+
+ AMBARI-1620. Add heatmaps for Host and Hbase section. (jaimin)
+ 
+ AMBARI-1634. Integrate Frontend Security work to enable security on
+ Oozie, Hive, and WebHCat Server. (jaimin)
+
+ AMBARI-1633. Reassign Master Wizard - Step 5. (yusaku)
+
+ AMBARI-1585. Creating the agent scripts for Hue server installation and 
+ configuration on the Hue host. (swagle)
+
+ AMBARI-1618. HDFS Mirroring: Create Mapper, Model, Mock Data for Cluster.
+ (Arun Kandregula via yusaku)
+
+ AMBARI-1607. HDFS Mirroring: Create Mapper, Model and Mock Data.
+ (Arun Kandregula via yusaku)
+
+ AMBARI-1602. Edit User - drop the requirement to specify the old 
+ password. (swagle)
+
+ AMBARI-1406. Provide API support for including query string in http message body. (jspeidel)
+
+ AMBARI-1592. Change how configurations are propagated (ncole)
+
+ AMBARI-1593. Change host override JSON to include version tag (ncole)
+
+ AMBARI-1545. Integrate Frontend Security work to enable security on HDFS
+
+ AMBARI-1555. Upgrade should validate that the from->to version is an allowed 
+ combination. (Sumit Mohanty via swagle)
+
+ AMBARI-1568. Update the version of ambari artifacts to 1.3.0 snapshot (ncole)
+
+ AMBARI-1563. API Support:  Host-component resource should include its current 
+ HA active/passive status. (Sumit Mohanty via swagle)
+
+ AMBARI-1560. Upgrade action/task support in server. (Sumit Mohanty via swagle)
+
+ AMBARI-1553. List cluster-level configurations with host-level, if any (ncole)
+
+ AMBARI-1557. Adding Hue service to the HDP stack definition along with the 
+ necessary configuration properties. (swagle)
+
+ AMBARI-1554. API support for current version of Stack and available versions 
+ to upgrade (Sumit Mohanty via swagle) 
+
+ AMBARI-1511. Add ability to override configurations at the host level (ncole)
+
+ AMBARI-1550. Modify existing puppet manifests to allow installing/configuring 
+ multiple masters. (swagle)
+
+ AMBARI-1545. Integrate Frontend Security work to enable security on HDFS
+ and MapReduce installed cluster. (jaimin)
+
+ AMBARI-1528. Upgrade request support at Ambari. (Sumit Mohanty via swagle)
+
+ AMBARI-1541. Upgrade task support in agent. (Sumit Mohanty via swagle)
+
+ AMBARI-1540. Reassign Master Wizard - Steps 3 and 4 (reconfigure
+ component and review). (yusaku)
+
+ AMBARI-1538. Stack Upgrade Wizard - Step 3 (Progress and Retry). (yusaku) 
+
+ AMBARI-1509. Frontend: For service configurations provide ability to 
+ enter host level exceptions (srimanth)
+
+ AMBARI-1508. Introduce a new section "Capacity scheduler" under MapReduce
+ Service in step7 (Configure services) of Installer and Service 
+ Reconfig. (jaimin)
+
+ AMBARI-1490. Implement initial layout for "Add security" wizard. (jaimin)
+
+ AMBARI-1483. Reassign Master Wizard - Step 2. (yusaku)
+
+ AMBARI-1482. Reassign Master Wizard - Step 1. (yusaku)
+
+ AMBARI-1481. Stack Upgrade Wizard - Step 2 (confirm and check all master
+ components are running). (yusaku)
+
+ AMBARI-1469. Allow user to add multiple HBase masters in Install Wizard.
+ (yusaku)
+
+ AMBARI-1468. Stack Upgrade Wizard - Step 1 (show services and versions).
+ (yusaku)
+
+ AMBARI-1459. Add Admin > Cluster page. (yusaku)
+
+ AMBARI-1454. Service page: add "Reassign <Master Component>" action items
+ to "Maintenance" pulldown. (yusaku)
+
+ AMBARI-1349. Expose host-specific Nagios alerts in Ambari Web. (yusaku)
+
+ AMBARI-1294. Add isEmpty() query operator support. (jspeidel)
+
+ AMBARI-1280. Support explicit predicate grouping in API queries. (jspeidel)
+
+ AMBARI-1180. Display host check status results given by the agent as part
+ of host registration. (yusaku)
+
+ AMBARI-1252. Fetch Nagios alerts through Ambari Server and not directly
+ from Nagios Server. (srimanth via yusaku)
+
+ AMBARI-1237. Expose Nagios alerts via Rest API. (Nate Cole via jspeidel)
+
+ AMBARI-1163. During agent registration and heartbeat, send information about
+ various hadoop artifacts back to Ambari. (Nate Cole via mahadev)
+
+ AMBARI-1194. API support for cascade delete of a specified cluster
+ (Tom Beerbower via mahadev)
+
+ AMBARI-1255. Make the agent hostname determination scriptable. 
+ (mahadev)
+
+ AMBARI-1267. Store example Hive Queries somewhere in Ambari that's easily
+ accessible for demo/test purposes. (mahadev)
+
+ AMBARI-1447. Report current Stack version for all host components.
+ (Sumit Mohanty via mahadev)
+
+ IMPROVEMENTS
+
+ AMBARI-2110. Update hive-site.xml, set fs.file.impl.disable.cache=true.
+ (mahadev)
+
+ AMBARI-2070. Changing service directories should popup a confirmation/warning
+ dialog upon save. (yusaku)
+
+ AMBARI-2061. HBase Heatmaps: clean up labels and units. (yusaku)
+
+ AMBARI-2042. Update Ambari logo. (yusaku)
+
+ AMBARI-2040. Customize Services page: reduce padding to prevent tabs from
+ wrapping. (yusaku)
+
+ AMBARI-1388. Document management API. (tbeerbower)
+
+ AMBARI-2030. Make frontend changes to account for the host component status
+ UNKNOWN. (yusaku)
+
+ AMBARI-2028. Customize Services: make the services display consistent.
+ (yusaku)
+
+ AMBARI-2027. Add validation checks for Add Property on custom site configs.
+ (yusaku)
+
+ AMBARI-2017. Admin Misc page tweaks. (yusaku)
+
+ AMBARI-2022. Service Component metric collection API takes over a minute
+ on large cluster. (tbeerbower)
+
+ AMBARI-2005. When adding a component to a host (after cluster deployment),
+ UI should warn that nagios server need to be restarted. (yusaku)
+
+ AMBARI-2004. Background Operation Popup needs label/styling fixes.
+ (yusaku)
+
+ AMBARI-2003. Hosts tab: clicking on red badge should not toggle "Alerts"
+ filter. (yusaku)
+
+ AMBARI-2002. Dashboard: remove "Version" row from HDFS, MapReduce, and
+ HBase dashboard summary. (yusaku)
+
+ AMBARI-1982. Disallow editing Hadoop log/PID directories post install.
+ (yusaku)
+
+ AMBARI-1981. Expose ability to customize the username for running
+ Ganglia daemons. (yusaku)
+
+ AMBARI-1987. Add unit tests for admin/cluster page and cluster loading.
+ (yusaku)
+
+ AMBARI-1929. Make the default stack and version configurable via mvn build.
+ (yusaku)
+
+ AMBARI-1967. Add smoke test user customization toggle via App.supports.
+ (yusaku)
+
+ AMBARI-1964. Add ability to toggle on/off assigning masters for new hosts
+ being added via Add Host wizard. (yusaku)
+
+ AMBARI-1958. Update alert icon legend on Hosts page. (yusaku)
+
+ AMBARI-1955. Add ability to turn on/off HUE support. (yusaku)
+
+ AMBARI-1941. Create a flag to toggle the support for multiple
+ HBase Masters. (jaimin)
+
+ AMBARI-1940. Create a flag to toggle the support for additional database 
+ support in Hive and Oozie. (jaimin)
+
+ AMBARI-1928. Make experimental Ambari Web features toggleable
+ via mvn build. (yusaku via jaimin)
+ 
+ AMBARI-1906. Make experimental Ambari Web features toggleable 
+ (turned on/off via config file). (yusaku via jaimin)
+
+ AMBARI-1905. Test mode is broken. (yusaku via jaimin)
+
+ AMBARI-1904. Update default stack version to 1.3.0. (yusaku via jaimin)
+
+ AMBARI-1921. Change tasks order in popup on deploy page. (jaimin)
+
+ AMBARI-1913. Security Wizard - "Kerberos security is disabled on the cluster"
+ should not be shown in red. (jaimin)
+
+ AMBARI-1403. Remove SPI dependencies on other code. (tbeerbower)
+
+ AMBARI-1892. Restrict user on a Disable security popup while the poccessi
+ is in progress. (jaimin)
+
+ AMBARI-1879. Show error message when hostname is undefined for quick_links.
+ (yusaku)
+
+ AMBARI-1877. Reassign Master Wizard, Step 2: prevent proceed next without
+ changing target host. (yusaku)
+
+ AMBARI-1876. Capacity Scheduler: implement user/group and admin user/group
+ validation rules. (yusaku)
+
+ AMBARI-1864. Remove package dependency on mocha-phantomjs. (yusaku)
+
+ AMBARI-1844. Need ability to update multiple hosts in 1 PUT call.
+ (tbeerbower)
+
+ AMBARI-1845. Server log is being flooded with log messages.
+ (tbeerbower)
+
+ AMBARI-1825. Minor label change for the buttons to start/stop all services
+ in the side nav. (yusaku)
+
+ AMBARI-1824. Background operations popup: progress bar styling. (yusaku)
+
+ AMBARI-1823. Add hover tooltip for the green host health status icon in
+ Hosts page and Host Detail page. (yusaku)
+
+ AMBARI-1815. After modifying custom configs, properties are shown as blank,
+ leading to corrupt core-site.xml upon save. (yusaku)
+
+ AMBARI-1814. Refactor configurations functionality. (yusaku)
+
+ AMBARI-1809. Specify request context for Smoke test. (yusaku)
+
+ AMBARI-1808. Add ability to customize the smoke test user (frontend changes).
+ (yusaku)
+
+ AMBARI-1807. Disallow $ from usernames created via Misc in Install Wizard.
+ (yusaku)
+
+ AMBARI-1805. Minor text change in the Assign Masters page. (yusaku)
+
+ AMBARI-1798. Explicitly state that FQDNs need to be used when specifying
+ hosts to add to the cluster. (yusaku)
+
+ AMBARI-1785. Provide restart service indicator to all services. (srimanth
+ via yusaku)
+
+ AMBARI-1773. Integrate MySQL/Oracle options for Hive and Oozie. (Xi Wang via
+ yusaku)
+
+ AMBARI-1758. Reassign Master Wizard: Integration and hardening. 
+ (srimanth via yusaku)
+
+ AMBARI-1743. Stack Upgade Wizard - integrate with Ambari 1.3.0 repo.
+ (srimanth via yusaku)
+
+ AMBARI-1741. Exceptions thrown when using host overrides during install.
+ (srimanth via yusaku)
+
+ AMBARI-1740. Provide restart message in service config page. (srimanth via
+ yusaku)
+
+ AMBARI-1722. Frontend support for LDAP Group to Ambari Role Mapping.
+ (srimanth via yusaku)
+
+ AMBARI-1718. If there is no HBase master in active status, show proper
+ error message. (jaimin via yusaku)
+
+ AMBARI-1701. Stack Upgrade Wizard: Integrate with API. (yusaku)
+ 
+ AMBARI-1700. HDFS Mirroring: Display Jobs table (refactor). (yusaku)
+
+ AMBARI-1697. UI changes on HBase service page for HBase Multi-master/HA.
+ (yusaku)
+
+ AMBARI-1782. Security wizard navigation: Restrict user on step3 until
+ the decision of the step is reached. (jaimin)
+
+ AMBARI-1652. Background operation display enhancements. (yusaku)
+
+ AMBARI-1686. Implement Test IvoryService to functional test mirroring API.
+ (tbeerbower)
+
+ AMBARI-1672. Security Wizard - integrate with cluster-level config API. (jaimin)
+
+ AMBARI-1669. Security Wizard UI tweaks. (jaimin)
+
+ AMBARI-1630. Expose HUE config parameters through Ambari Web. (yusaku)
+
+ AMBARI-1595. Add Oracle option for Hive Metastore during Ambari
+ cluster install. (Xi Wang via yusaku)
+
+ AMBARI-1632. Provide property filtering capability on service config
+ sections. (yusaku)
+
+ AMBARI-1631. Security Wizard - integrate host progress popup. (yusaku)
+
+ AMBARI-1604. Refactor wizard classes. (yusaku)
+
+ AMBARI-1583. Add unit tests for various Ambari Web components. (yusaku)
+
+ AMBARI-1491. Add task plots to job swimlane diagram. (billie via yusaku)
+
+ AMBARI-1584. Stack Upgrade Wizard - integrate host progress popup.
+ (yusaku)
+
+ AMBARI-1581. Host progress popup - generic component for showing progress
+ on async operations. (yusaku)
+
+ AMBARI-1542. Provide remove, restore default, and cancel actions for
+ service config properties. (srimanth via yusaku)
+
+ AMBARI-1580. Stack Upgrade Wizard - resume upon page refresh / login.
+ (yusaku)
+
+ AMBARI-1578. Add host wizard - support assignment of "ZooKeeper Server"
+ and "HBase Master". (yusaku)
+
+ AMBARI-1546. Improve Cluster Management loading screen. (Xi Wang via yusaku)
+
+ AMBARI-1890. Add configuration to test_api.sh script (ncole)
+
+ AMBARI-2026. Add WebHCat to live status checks. (ncole)
+
+ AMBARI-1975. Add Clover coverage (ncole)
+
+ AMBARI-1537. Constrain the width of all wizard popups. (Xi Wang via yusaku)
+
+ AMBARI-1536. Hosts page layout fixes. (Xi Wang via yusaku)
+
+ AMBARI-1502. Add the ability to assign configuration to a cluster. (Nate Cole via tbeerbower)
+
+ AMBARI-1505. Hosts page: add filtering by host status. (yusaku)
+
+ AMBARI-1496. Make all service properties reconfigurable. (jaimin)
+
+ AMBARI-1477. Improve performance for App.statusMapper. (yusaku)
+
+ AMBARI-1484. Reintroduce client-side paging for Hosts table. (yusaku)
+
+ AMBARI-1473. Further optimization of querying host information from the
+ server. (yusaku)
+
+ AMBARI-1472. Update HBase service/host health status. (yusaku)
+
+ AMBARI-1471. Refactor ajax calls. (yusaku)
+
+ AMBARI-1461. Optimize query for getting service and host component status back
+ from the server. (yusaku)
+
+ AMBARI-1460. Optimize query call for retrieving host information. (yusaku)
+
+ AMBARI-1470. Refactor confirmation popups. (yusaku)
+
+ AMBARI-1414. Expose fine grained HDFS capacity metrics in API. (tbeerbower)
+
+ AMBARI-1444. Make install, service & host configuration pages reuse same
+ configuration code. (srimanth via yusaku)
+
+ AMBARI-1457. Improve Job Diagnostics. (Billie Rinaldi via yusaku)
+ 
+ AMBARI-1453. Move Ambari Web application config from initialize.js to
+ another config file. (yusaku)
+
+ AMBARI-1450. Remove hard-coded stack version. (yusaku)
+
+ AMABRI-1458. Externalize strings from views to messages.js. (yusaku)
+
+ AMBARI-1437. Update stack version. (yusaku)
+
+ AMBARI-1429. Update API docs. (jspeidel)
+
+ AMBARI-1430. Increase UI timeout for long running API operations. (yusaku)
+
+ AMBARI-1427. Add ability to increase the time range for the zoomed-in graphs
+ beyond last one hour. (yusaku) 
+
+ AMBARI-1375. Remove text from templates (main). (jaimin)
+
+ AMBARI-1374. Add filter by alerts on the Hosts page. (jaimin)
+
+ AMBARI-1373. Since there is the ability to log in to Ambari Web as 
+ different users the current user should be indicated. (jaimin)
+
+ AMBARI-1366. Nagios alert tweaks. (jaimin)
+
+ AMBARI-1365. Make Hosts table update dynamically. (jaimin)
+
+ AMBARI-1361. Install progress dialog WARN icon + color. (jaimin)
+
+ AMBARI-1347. Expose host-level alerts via nagios_alerts.php with associated
+ service component names. (yusaku)
+   
+ AMBARI-1348. Externalize strings to messages.js. (yusaku)
+
+ AMBARI-1342. Hive client is not installed on Nagios server host.
+ (jaimin)
+
+ AMBARI-1341. Add Hosts: update the API call for new operator precedence.
+ (yusaku) 
+
+ AMBARI-1340. Enhance Install/Start/Test progress display. (yusaku) 
+
+ AMBARI-1339. Validate usernames in Misc section of Customize Services step
+ in Install Wizard. (yusaku)
+
+ AMBARI-1335. Show validation error when the user specifies target hosts that
+ are already part of the cluster. (yusaku)
+
+ AMBARI-1337. Refactor Job Browser filter. (yusaku)
+
+ AMBARI-1336. Externalize text to messages.js. (yusaku)
+
+ AMBARI-1334. Show hosts that have failed install tasks as "red" to allow the
+ user to easily identify source of failure. (yusaku)
+
+ AMBARI-1333. Add username validation for Ambari local users. (yusaku) 
+
+ AMBARI-1329. Adjust job browser column sizing. (yusaku)
+ 
+ AMBARI-1327. Add Hosts. Remove existig hosts display. (Alexandr Antonenko via jspeidel)
+
+ AMBARI-1326. Remake clearFilters function in app_view (part 3). (srimanth)
+ 
+ AMBARI-1305. Make sure that Ambari Web renders all elements correctly when
+ the browser width is 1024px or narrower (refactor). (Arun Kandregula via 
+ yusaku) 
+
+ AMBARI-1312. Remake clearFilters function in app_view (part2). (Arun Kandregula
+ via yusaku) 
+ 
+ AMBARI-1309. Remove all text from Apps views, controllers, templates to 
+ messages.js. (Arun Kandregula via yusaku)
+
+ AMBARI-1308. Properly display Apps page aggregate summary and data table when
+ there are no data to be show. (Arun Kandregula via yusaku)
+
+ AMBARI-1306. Change color of rack_local_map to #66B366. (yusaku)
+
+ AMBARI-1311. Host health indicator should have a tooltip showing few details - 
+ refactoring. (Arun Kandregula via yusaku)
+
+ AMBARI-1303. Remake clearFilters function in app_view. (Arun Kandregula via
+ yusaku)
+
+ AMBARI-1302. Minor label cleanup on Jobs Charts popup. (Arun Kandregula via
+ yusaku)
+
+ AMBARI-1296. Task log popup footer should be statically placed only the
+ content should scroll vertically. (Jaimin Jetly via yusaku)
+
+ AMBARI-1295. Move cluster name display from the main nav to the top nav.
+ (Jaimin Jetly via yusaku)
+
+ AMBARI-1268. Improve DAG UI. (billie via yusaku)
+
+ AMBARI-1289. App page: remove old ode and fix test mode. (srimanth via
+ yusaku)
+
+ AMBARI-1279. Make sure that Ambari Web renders all elements correctly when
+ the browser width is 1024px or narrower. (srimanth via yusaku)
+
+ AMBARI-1274. Shrink top nav height. (srimanth)
+ 
+ AMBARI-1272. Controller javascripts need comments. (srimanth)
+ 
+ AMBARI-1271. On Confirm Hosts page, add a link to show the Host Checks popup 
+ in the success message. (yusaku via srimanth)
+
+ AMBARI-1193. If Install fails, allow user to go back to any previous step so 
+ that the user can retry install with different configuration parameters.
+ (yusaku via srimanth)
+
+ AMBARI-1265. Job Browser - Filter by Input, output and duration. (yusaku)
+
+ AMBARI-1263. Refactoring of User Management code. (yusaku)
+
+ AMBARI-1254. Modify App Browser to use server-side paging/sorting/filtering.
+ (yusaku)
+
+ AMBARI-1258. Minor refactoring of User Management code. (yusaku)
+
+ AMBARI-1253. Use ember-precompiler-brunch npm plugin. (yusaku)
+
+ AMBARI-1236. Display a progress bar during deploy prep. (yusaku)
+
+ AMBARI-1249. Update mock data to make App.testMode work. (yusaku)
+
+ AMBARI-1239. Host health status should show orange when there is at least one
+ slave component on the host with state!=STARTED. (yusaku)
+
+ AMBARI-1248. Refactoring of update, services and hosts mapper. (yusaku)
+
+ AMBARI-1247. Disable links for previous steps in left nav on Summary step.
+ (yusaku)
+
+ AMBARI-1246. Add user minor improvements. (yusaku)
+
+ AMBARI-1245. Do not let the user go back to the previous step while host
+ bootstrap is in progress. (yusaku)
+
+ AMBARI-1244. Install Options - line up the Target Hosts section with the rest
+ of the page. (yusaku)
+
+ AMBARI-1235. Host health indicator should have a tooltip showing details.
+ (yusaku)
+ 
+ AMBARI-1234. On Heatmap host hover, including list of components running.
+ (yusaku)
+
+ AMBARI-1229. Dashboard - make disk usage pie chart in HDFS summary easier
+ to understand. (yusaku)
+
+ AMBARI-1228. During Install, show "warn" on hosts that have tasks cancelled.
+ (yusaku)
+
+ AMBARI-1225. Add Hosts wizard popup is too small. (yusaku)
+
+ AMBARI-1224. Drop the "all" option from Hosts > Component Filter and
+ Jobs > Users Filter. (yusaku)
+
+ AMBARI-1223. Confirm Hosts page: It looks like hosts disappear if you are
+ on "Fail" filter and click on "Retry Failed" button. (yusaku)
+
+ AMBARI-1222. DAG, Jobs Timeline, and Tasks graphs UI cleanup. (yusaku)
+
+ AMBARI-1221. There is no default sort order on Hosts table and the order
+ changes on every page refresh - should sort by hostname. (yusaku)
+
+ AMBARI-1220. Oozie service summary update. (yusaku)
+
+ AMBARI-1218. Refactor Job Browser User filter. (yusaku)
+
+ AMBARI-1217. Tighten up spacing for the rows in the Hosts table. (yusaku)
+
+ AMBARI-1216. Add filters module. (yusaku)
+
+ AMBARI-1215. Refactor hostComponent isSlaves and isMaster and add update
+ methods for server mapper. (yusaku)
+
+ AMBARI-1214. In any starts fails, "warn" the host and the overall install.
+ (yusaku)
+
+ AMBARI-1204. Install Wizard: Re-enable configuration of user/group names for
+ master component daemons. (yusaku)
+
+ AMBARI-1197. Refactor code for graphs. (yusaku)
+
+ AMBARI-1196. Automatically update host-level popup info/logs. (yusaku)
+
+ AMBARI-1189. Add App.Job class. (yusaku)
+
+ AMBARI-1188. Refactor isClient computed property for HostComponent class.
+ (yusaku)
+
+ AMBARI-1186. Add Run class to represent a job run. (yusaku)
+
+ AMBARI-1185. Refactor the method to check if the user is an admin.
+ (yusaku)
+
+ AMBARI-1183. Directories in the service config textarea should not wrap.
+ (yusaku)
+
+ AMBARI-1182. Clean up table header UI for sorting and filter clear "x" for
+ Jobs table. (yusaku)
+
+ AMBARI-1181. Clean up table header UI for sorting and filter clear "x" for
+ Hosts table. (yusaku)
+
+ AMBARI-1198. Ambari API Performance: Parsing of Ganglia json data is slow.
+ (jspeidel via mahadev)
+
+ AMBARI-1213. Cleanup python test cases and introduce third party library for
+ mock testing python code. (mahadev)
+
+ AMBARI-1206. Expose missing metrics on host components. (tbeerbower via
+ mahadev)
+
+ AMBARI-1205. Cannot persist service configuration when service is started
+ (Siddharth Wagle via mahadev)
+
+ AMBARI-1262. Apache Ambari point to dev url, need fix in pom.xml. 
+ (mahadev)
+
+ AMBARI-1207. Remove /hdp as the httpd conf for any of the nagios urls -
+ should replace it with ambarinagios or something else.
+ (mahadev)
+
+ AMBARI-1277. Failing build due to url moved on Suse. (mahadev)
+
+ AMBARI-1288. Change "authorization" to "authentication" in props setup for
+ LDAP. (mahadev)
+
+ AMBARI-1269. Refactor ResourceProvider SPI. (tbeerbower)
+ 
+ AMBARI-1270. Add predicate objects for checking empty resource category.
+ (tbeerbower)
+
+ AMBARI-1286. Set version number property in gsInstaller cluster resource
+ provider. (tbeerbower)
+
+ AMBARI-1287. Monitor for component/service state for gsInstaller resource provider. (tbeerbower)
+
+ AMBARI-1260. Remove hard coded JMX port mappings. (Siddharth Wagle via
+ mahadev)
+
+ AMBARI-1411. Missing unit test coverage for resource providers. (tbeerbower)
+
+ AMBARI-1433. Allow capacity scheduler to be configurable via the API's.
+ (mahadev)
+
+ AMBARI-1435. L2 Cache does not work due to Eclipse Link exception.
+ (Sid Wagle via mahadev)
+
+ AMBARI-1436. Threads blocking on ClustersImpl.getHost for several minutes.
+ (Sid Wagle via mahadev)
+
+ AMBARI-1438. Add new stack definition for new stacks. (mahadev)
+
+ AMBARI-1448. Enabling stack upgrade via Ambari Server. (mahadev)
+
+ AMBARI-1439. rrd file location should be read from global config. (Siddharth
+ Wagle via mahadev).
+
+ AMBARI-1357. Smoke Tests failing on secure cluster. (Siddharth Wagle via
+ mahadev)
+
+ AMBARI-1343. Service Check fails after secure install due to wrong kinit
+ path. (Siddharth Wagle via mahadev) 
+ 
+ AMBARI-1465. Minimize Read and Write locks for createHosts. (Siddharth Wagle
+ via mahadev)
+
+ AMBARI-1466. Optimize ganglia rrd script to be able to respond within
+ reasonable time to queries made by the UI. (mahadev)
+
+ AMBARI-1474. Upgrade stack definition for HBase for 1.2.2 since the version
+ is upgraded. (mahadev)
+
+ AMBARI-1475. Update the version of ambari artifacts to 1.2.2 snapshot.
+ (mahadev)
+
+ AMBARI-1489. Add hadoop-lzo to be one of the rpms to check for before
+ installation. (mahadev)
+
+ AMBARI-1642. Add ability for maintainence mode in Host Role Component in
+ Ambari. (mahadev)
+
+ AMBARI-1797. For global site properties, need property to services effected
+ map. (mahadev)
+
+ AMBARI-1384. WorkflowJsonService service doesn't use the API framework and is
+ inconsistent with other API's. (billie)
+
+ AMBARI-1871. ambari-agent RPM does not claim ownership of
+ /var/lib/ambari-agent. (Matthew Farrellee via mahadev)
+
+ AMBARI-1870. ambari-agent RPM claims ownership of /usr/sbin. (Matthew
+ Farrellee via mahadev)
+
+ BUG FIXES
+
+ AMBARI-2109. Sanitize KEYS and NOTICE.txt on trunk. (yusaku)
+ 
+ AMBARI-2108. Fix apache rat check issues for ambari (top-level dir).
+ (yusaku)
+
+ AMBARI-2105. Assign Slaves page allows the user to specify a host with no
+ components on it. (jaimin)
+
+ AMBARI-2106. Fix apache rat check issues for ambari-server and ambari-agent.
+ (swagle)
+
+ AMBARI-2102. Confusing message "ls: cannot access /usr/share/java/*oracle*:
+ No such file or directory". (smohanty)
+
+ AMBARI-2104. Fix apache rat check issues for ambari-web. (yusaku)
+
+ AMBARI-2100. HBase throws AccessDeniedException. (yusaku)
+
+ AMBARI-2099. Cluster install failed due to timeout and the user can proceed
+ to cluster management; the user was not presented an option to retry install.
+ (yusaku)
+
+ AMBARI-2101. Hive service check (still) failing with file permissions.
+ (swagle)
+
+ AMBARI-2095. It's possible to get into a state where install retry is not
+ possible if the agent stops heartbeating. (jaimin via yusaku)
+
+ AMBARI-2091. Custom JDK path not used when adding new hosts. (yusaku)
+
+ AMBARI-2089. Post Ambari upgrade, Hive and Oozie fail to start after
+ reconfigure. (Xi Wang via yusaku)
+
+ AMBARI-2084. Wrong host mapping in Assign Masters step. (yusaku)
+
+ AMBARI-2098. Customizing webcat pid run directory fails service status. 
+ (swagle)
+
+ AMBARI-2076. DataNode install failed with custom users. (swagle)
+
+ AMBARI-2085. UI allows user to set empty value for configs in
+ Advanced category. (jaimin)
+
+ AMBARI-2087. Tasks are not filtered by parent request id. (smohanty)
+
+ AMBARI-2086. Agent on host with clients and DATANODE only seems to schedule 
+ STATUS commands for several other services. (swagle)
+
+ AMBARI-2088. Cluster installation times out at server side too fast. (swagle)
+
+ AMBARI-2083. Upgrade fails on Sles. (smohanty)
+
+ AMBARI-2082. Oozie service check fails. (jaimin)
+
+ AMBARI-2081. changeUid.sh failing during installation. (swagle)
+
+ AMBARI-2079. Can't change service configuration if heartbeat lost from
+ service component host. (yusaku)
+
+ AMBARI-2075. Admin role can't be assigned to LDAP user. (yusaku)
+
+ AMBARI-2080. Cluster name and Background operations indicator should
+ disappear on logout. (jaimin)
+
+ AMBARI-2078. Hive Metastore host not changing on Assign Masters page. (jaimin)
+
+ AMBARI-2077. Update stack mock data to make testMode functional on step4 of
+ installer wizard. (jaimin)
+
+ AMBARI-2076. DataNode install failed with custom users. (smohanty)
+
+ AMBARI-2074. Deployment of HDP 1.2.1 fails on Sles. (smohanty)
+
+ AMBARI-2073. After Ambari upgrade to 1.2.3, MapReduce service check fails 
+ because uid of ambari_qa changed. (swagle)
+
+ AMBARI-2067. hive-site.xml cannot be readonly for clients. (swagle)
+
+ AMBARI-2068. "Preparing to install <component>" message needs spacing.
+ (yusaku)
+
+ AMBARI-1979. Last HeartBeat time and heartbeat status for agent take around 2-3 
+ minutes to update on a server restart. (swagle)
+
+ AMBARI-1983. Add new parameters to improve HBase MTTR. HDPLocal fixes. 
+ (swagle)
+
+ AMBARI-2066. HDFS shortcircuit skip checksum should be removed. (smohanty)
+
+ AMBARI-2056. Show proper error message while user tries to save configurations 
+ of partially stopped service. (srimanth)
+
+ AMBARI-2064. Legend for zoomed-in graphs do not render properly in IE9.
+ (yusaku)
+
+ AMBARI-2063. Admin features not available for user with admin rights under
+ certain conditions. (yusaku)
+
+ AMBARI-2060. Initiate a recommission, on success, the operations dialog says
+ decommission, not recommission. (yusaku)
+
+ AMBARI-2058. Host Detail page: if the host component is in INSTALL_FAILED
+ state, we should let the user reinstall it. (yusaku)
+
+ AMBARI-2055. Oozie reconfig forces the user to enter bogus values for two
+ parameters in order to save any changes. (yusaku)
+
+ AMBARI-2054. If "Install from Local Repository" selected in install wizard,
+ Add Host wizard not working. (yusaku)
+
+ AMBARI-2053. Align "add hosts" button vertically with host health filter.
+ (yusaku)
+
+ AMBARI-2052. Fix delete user popup. (yusaku)
+
+ AMBARI-2065. Hadoop group customization does not take affect. (smohanty)
+
+ AMBARI-2062. Service versions shown during install dont match installed
+ versions. (smohanty)
+
+ AMBARI-2038. Services links on Dashboard connected to incorrect pages.
+ (yusaku)
+
+ AMBARI-2059. Add dependency for Nagios server on Hive Client install. (swagle)
+
+ AMBARI-2044. hive-site.xml permission denied exception. (swagle)
+
+ AMBARI-2057. Gmond left in init after install. (smohanty)
+
+ AMBARI-2051. Remove hard-coded ports from agent scripts - Nagios. (swagle)
+
+ AMBARI-2045. Add Unit test to verify, client re-install for install failed 
+ client. (swagle)
+
+ AMBARI-2044. hive-site.xml permission denied exception. (swagle)
+
+ AMBARI-2041. If a host that has a service client installed and the host is down, 
+ service start will fail. (swagle)
+
+ AMBARI-2039. Service check should be scheduled on a client that is on
+ a host in HEALTHY state - use correct state enum. (smohanty)
+
+ AMBARI-2035. "Add local user" button is enabled but nothing happens upon
+ clicking it under certain conditions. (yusaku)
+
+ AMBARI-2034. Disable "Add Component" button in the Host Details page if the
+ host is in UNKNOWN state or !isHeartbeating. (yusaku)
+
+ AMBARI-2033. Decommission DataNode does not have any request context.
+ (yusaku)
+
+ AMBARI-2029. Error when loading /main/services directly. (yusaku)
+ 
+ AMBARI-2039. Service check should be scheduled on a client that is on
+ a host in HEALTHY state. (smohanty)
+
+ AMBARI-2037. Nagios web not installing as expected on Sles11. (swagle)
+
+ AMBARI-1924. Allow for users to customize Ganglia gmetad + gmond user
+ accounts. (smohanty)
+
+ AMBARI-2024. Ambari Server becomes unresponsive after crashing on http reads 
+ on jersey. (swagle)
+
+ AMBARI-2020. Incorrect behavior of "Services" page. (yusaku)
+
+ AMBARI-2018. Hosts page: no filter selection is shown after clicking on
+ "Alerts" filter, navigating away, and coming back to Hosts page. (yusaku)
+
+ AMBARI-2016. Hide Maintenance pulldown if no operation can be performed.
+ (yusaku)
+
+ AMBARI-2015. Host component start/stop causes "Uncaught TypeError: Cannot call
+ method 'call' of undefined". (yusaku)
+
+ AMBARI-2011. Add Hosts gets stuck at 33% (some hosts in the cluster were
+ down). (yusaku)
+
+ AMBARI-2014. Install Wizard/Add Host Wizard Review page: local repo option
+ is always displayed as "No", even when it is enabled. (yusaku)
+
+ AMBARI-2019. Cannot decommission data node (ensure recommission also works).
+ (swagle)
+ 
+ AMBARI-2021. Hadoop installation on cluster with SUSE-11 failed. (smohanty)
+
+ AMBARI-2010. Tasks do not timeout for failed hosts. (swagle)
+
+ AMBARI-2012. Check Ambari-agent process - nagios alert is only being
+ configured on the nagios-server host. (smohanty)
+
+ AMBARI-2001. Filtering on Jobs table does not work under certain situations.
+ (yusaku)
+
+ AMBARI-2000. Undo links still remain after the config changes are saved.
+ (yusaku)
+
+ AMBARI-1999. Clicking on Cancel on the Service Config page should not reload
+ the entire app. (yusaku)
+
+ AMBARI-1998. Action buttons on host details page not formatted properly on
+ Firefox. (yusaku)
+
+ AMBARI-1997. Filtered hosts get out of sync with the filter selection. (yusaku)
+
+ AMBARI-2009. task-log4j.properties file ownership should not be
+ root. (smohanty)
+
+ AMBARI-2008. Using mixed OS overwrites ambari.repo during install. (smohanty)
+
+ AMBARI-1952. hadoop dependency version for ambari-log4j is hardcoded, making
+ it regular expression based to pick latest from the repository. (smohanty)
+
+ AMBARI-2007. Decom DataNode throws JS error. (smohanty)
+
+ AMBARI-1994. Adding component to Host should should wire-up + adjust
+ associated Nagios alerts. (smohanty)
+
+ AMBARI-1753. Puppet paramter configuration not working as expected. (swagle)
+
+ AMBARI-1978. Deploying HDP-1.3.0 results in several alerts - is it related to 
+ hard-coded port. Incremental update. (swagle)
+
+ AMBARI-1990. After successful registration, going back to the Confirm Hosts
+ or re-installing agents from Install Options page causes host registration
+ to fail. (smohanty)
+
+ AMBARI-1991. Remove unused python files from ambari-agent. (smohanty)
+
+ AMBARI-1984. WebHCat log and pid dirs configs should be under WebHCat >
+ Advanced. (yusaku)
+
+ AMBARI-1989. Add component shows the same component again even if the
+ component is already added/installed/started. (yusaku)
+
+ AMBARI-1988. Hostname pattern expression is broken. (yusaku)
+
+ AMBARI-1986. HDFS General section has disappeared from Customize Services 
+ step of the Install Wizard. (yusaku)
+
+ AMBARI-1985. Incorrect behavior of "Undo" button for password fields. (yusaku)
+
+ AMBARI-1702. Ambari/GSInstallers need to set the value of 
+ mapred.jobtracker.completeuserjobs.maximum. New recommended value. (swagle)
+
+ AMBARI-1983. Add new parameters to improve HBase MTTR. (swagle)
+
+ AMBARI-1979. Last HeartBeat time and heartbeat status for agent take around 2-3 
+ minutes to update on a server restart. (swagle)
+
+ AMBARI-1978. Deploying HDP-1.3.0 results in several alerts - is it related to 
+ hard-coded port. (swagle)
+
+ AMBARI-1974. BootStrapTest is failing on the master build. (smohanty)
+
+ AMBARI-1968. Hadoop Classpath is being overwridden which causes hive
+ server/metastore to fail. (smohanty)
+
+ AMBARI-1973. log4j Appender for RCA should be able to write the same database
+ being used for Ambari Server (oracle/MySql). (smohanty)
+
+ AMBARI-1972. Stacks2 api implemenation using the standard framework is not
+ complete - does not show configuration tags. (smohanty)
+
+ AMBARI-1954. Dashboard does not come up if the upgrade stack does not contain
+ a service with the same name. (yusaku)
+
+ AMBARI-1953. On Add Hosts, the request context for the start phase shows up
+ as "Request Name Not Specified". (yusaku)
+
+ AMBARI-1966. Client install tasks are shown twice in progress popup during
+ start phase of install wizard (update API call to include
+ params/reconfigure_client). (yusaku)
+
+ AMBARI-1965. core-site properties are incorrectly populated in Advanced/
+ General category of MapReduce service. (yusaku)
+
+ AMBARI-1963. Deploying progress bar shows 0 tasks after installation failure
+ and going back to a previous step to retry. (yusaku)
+
+ AMBARI-1962. Host Check popup keeps the "rerun check" button disabled even
+ after it is done and its hard to know if its actually run or not. (yusaku)
+
+ AMBARI-1961. Select Services: clicking on "all" selects HUE even when HUE
+ support is toggled off. (yusaku)
+
+ AMBARI-1960. "Back" button can be pressed while host registration is taking
+ process, even though the button seems disabled. (yusaku)
+
+ AMBARI-1959. Cannot login to Ambari after login failure. (yusaku)
+
+ AMBARI-1957. Hosts table: whether the alert filter is in effect or not is
+ not clear. (yusaku)
+
+ AMBARI-1956. Wrong install status shown in Add Service Wizard. (yusaku)
+
+ AMBARI-1951. Ambari agent setup during bootstrap should install the same
+ version of agent as the server. (smohanty)
+
+ AMBARI-1950. Hadoop install was failed on SUSE-11.1sp1 cluster with all 
+ services except Hue. (smohanty)
+
+ AMBARI-1949. Reconfiguration of Services has issues and the configurations 
+ save button does not take affect. (srimanth)
+
+ AMBARI-1948. System logs are not present on tasktracker. (swagle)
+
+ AMBARI-1947. Oozie Smoke test fails with errors on the start services/install 
+ page. (swagle)
+
+ AMBARI-1946. Heatmap memory should not include cached memory as part of
+ "used". (Jeff Sposetti via yusaku)
+
+ AMBARI-1944. All Service Smoke tests fail when run with service start. (swagle)
+
+ AMBARI-1939. Make service restart feedback based on supports functionality. 
+ (srimanth)
+
+ AMBARI-1943. Properties that do not map to any global property are not being
+ sent to server. (jaimin)
+
+ AMBARI-1937. Ambari-web installer wizard doesn't work in test mode. (jaimin)
+
+ AMBARI-1927. In background operations popup, requests with same context
+ are showing hosts/tasks info from last request. (yusaku via jaimin)
+
+ AMBARI-1907. Service check commands are not getting created on a
+ cluster install -> start. (yusaku via jaimin)
+
+ AMBARI-1942. Nagios server failed to start. (swagle)
+
+ AMBARI-1938. Update mock data for stack HDP-1.3.0. (jaimin)
+
+ AMBARI-1934. Security vulnerability with Ganglia and Nagios. (smohanty)
+
+ AMBARI-1933. Test failure : testCascadeDeleteStages. (smohanty)
+
+ AMBARI-1931. params/run_smoke_test=true is not taking effect. (smohanty)
+
+ AMBARI-1919. JobTracker History Server failed to come up on 1.3.0 stack
+ and the request for service stall is stalled. (smohanty)
+
+ AMBARI-1900. Update the DDL update script to modify the table to includei
+ ph_cpu_count. (smohanty)
+
+ AMBARI-1926. One HBase master should have active HA status at all time. 
+ (smohanty)
+
+ AMBARI-1925. Remove "hadoop_deploy" user. (smohanty)
+
+ AMBARI-1915. Client install tasks are shown twice in install progress 
+ popup. (swagle)
+
+ AMBARI-1916. Filter for showing only properties which need restart is 
+ broken. (srimanth)
+
+ AMBARI-1918. Set correct Oozie property for security instead of deprecated
+ property. (jaimin)
+
+ AMBARI-1917. Ambari Core-Site.xml Missing Property for LZO (enabled) -
+ io.compression.codecs (jaimin).
+
+ AMBARI-1889. Added documentation for configuration (ncole)
+
+ AMBARI-1912: HBase master doesn't come up after disabling security. (jaimin)
+
+ AMBARI-1902: RegionServer does not start in secure cluster. (jaimin)
+
+ AMBARI-1903. Host Exception Popup layout and cosmetic issues. (srimanth)
+
+ AMBARI-1901. Add additional tests for verifying request behavior based on 
+ host role command results. (smohanty)
+
+ AMBARI-1899. ambari-reset does not respect -s. (swagle)
+
+ AMBARI-1898. Update stack definitions for 1.3.0. (smohanty)
+
+ AMBARI-1886. Derived properties not being overridden for hosts. (srimanth)
+
+ AMBARI-1896. Disable editing Capacity Scheduler on host configs. (srimanth)
+
+ AMBARI-1894. Refactor configs of Capacity Scheduler category. (srimanth)
+
+ AMBARI-1893. Parsing new alerts format fails. (srimanth)
+
+ AMBARI-1891. Impossibility to scroll metric window after browser width 
+ changing. (srimanth)
+
+ AMBARI-1880. stacks2 API uses "type" to refer to config tags and no longer
+ exposes "filename" as a property. (srimanth via yusaku)
+
+ AMBARI-1873. HUE pid and log dir labels are flip flopped. (yusaku)
+
+ AMBARI-1878. Host overrides functionality broken in wizard Step7 controller.
+ (yusaku)
+
+ AMBARI-1875. Restart Service tooltip overlaps another tooltip. (yusaku)
+
+ AMBARI-1874. Add Service Wizard: remove the ability to install master
+ components for already installed services. (yusaku)
+
+ AMBARI-1872. Ambari FE is not setting proper value for 
+ fs.checkpoint.edits.dir (jaimin)
+
+ AMBARI-1869. Permission on agent site.pp files needs to be 660. (swagle)
+
+ AMBARI-1867. Processing API requests takes too long. (swagle)
+
+ AMBARI-1856. Queries for metrics to populate the dashboard graphs don't work
+ with updated Ganglia. (tbeerbower)
+
+ AMBARI-1862. Nagios credentials are freely available at ambari-agent.log.
+ (smohanty)
+
+ AMBARI-1726. It seems upgrades available at the FE is hard-coded to 1.3.0. 
+ (yusaku via srimanth)
+
+ AMBARI-1854. Wizards available for a non-administrator user. (yusaku via srimanth)
+
+ AMBARI-1852. Upon clicking Services > Service > Config, a call to 
+ "configurations resource is made and the server throws 400. (yusaku via srimanth)
+
+ AMBARI-1851. Ambari Web behaves strangely when there is no Active HBase 
+ Master. (yusaku via srimanth)
+
+ AMBARI-1849. Cosmetic problems on HBase Dashboard. (yusaku via srimanth)
+
+ AMBARI-1848. Install Wizard, Step 7: Oozie Database Derby option should say 
+ "New Derby Database", not "Current Derby Database". (Xi Wang via srimanth)
+
+ AMBARI-1860. Master broken - Cannot deploy services. (smohanty)
+
+ AMBARI-1859. Cannot load Nagios Alerts due to 400 Bad Request. (smohanty)
+
+ AMBARI-1842. Collapsable service restart message section should have pointer 
+ cursor. (srimanth)
+
+ AMBARI-1841. Properties that should be exposed in Advanced category
+ are populated in Custom categories. (jaimin)
+
+ AMBARI-1837. Few core-site properties vanished after seemingly benign 
+ reconfiguration. (jaimin)
+
+ AMBARI-1838. Cluster Management > Services > MapReduce > Config throws JS error
+ and the page comes up blank. (jaimin)
+
+ AMBARI-1836. Remove hard-coded ports from agent scripts. (swagle)
+
+ AMBARI-1834. Reduce the number of states that a host component can be in.
+ (smohanty)
+
+ AMBARI-1789. Stopping and then Starting all services doesn't start 
+ NameNode. (smohanty)
+
+ AMBARI-1822. Hue service link points to wrong URL and no smoke test drop
+ down is shown. (yusaku)
+
+ AMBARI-1821. Upgrading component is not very clear and Upgrade action
+ is not available. (yusaku)
+
+ AMBARI-1820. Installer Step 7 - DataNode hosts, TaskTracker hosts, and
+ RegionServer hosts not displayed correctly. (yusaku)
+
+ AMBARI-1819. Ambari Installer: page refreshes upon hitting enter in text
+ fields (Step 1 and Step 7). (yusaku)
+
+ AMBARI-1813. The back button seems disabled during host registration (step 3),
+ but you can actually click it to go back. (yusaku)
+
+ AMBARI-1812. Unable to re-configure core-site. (yusaku)
+
+ AMBARI-1811. Start/Stop service doesn't work. (yusaku)
+
+ AMBARI-1810. Security Wizard - Progress popup is not filtering tasks
+ correctly. (yusaku)
+
+ AMBARI-1806. Maintenance checks issued from frontend does not have request
+ context set appropriately. (yusaku)
+
+ AMBARI-1804. Reassign master should show only the hosts that do not have
+ another instance of the master for HBase. (yusaku)
+
+ AMBARI-1803. Reassign HBase master menu displays multiple entries with no
+ distinction when there are multiple HBase masters. (yusaku)
+
+ AMBARI-1802. Install wizard and subsequent reconfig screens lose 'confirm'
+ password content and show up as red even if the use is not editing these
+ fields. (yusaku)
+
+ AMBARI-1801. After adding hosts successfully, you need to refresh the hosts
+ page manually to see the new hosts. (yusaku)
+
+ AMBARI-1799. On service reconfig, Save button can be clicked even when there
+ are validation errors. (yusaku)
+
+ AMBARI-1796. Specific custom configs do not display after reload.
+ (srimanth via yusaku)
+
+ AMBARI-1768. Cluster install wizard does not succeed at service start.
+ (yusaku)
+
+ AMBARI-1755. Provide context for background operations. (srimanth via yusaku)
+
+ AMBARI-1744. isAdmin doesn't switch after login/out. (srimanth via yusaku)
+
+ AMBARI-1709. When all hosts are assigned a master component, the last host
+ should have all slave components and clients (Step 6). (srimanth via yusaku)
+
+ AMBARI-1695. Customize Services page - validation error count is not reflect
+ in the service tab for host exceptions. (yusaku)
+
+ AMBARI-1675. ASF license header missing from
+ app/templates/main/admin/security/add/step2.hbs. (yusaku)
+
+ AMBARI-1670. Changing service user name from web UI should also change
+ configuration properties that depends on those user name. (jaimin via yusaku)
+
+ AMBARI-1826. Use service stop and start for Nagios/Ganglia/MySQL rather than
+ puppet artifacts for starting stopping these services. (smohanty)
+
+ AMBARI-1818. HBase master shuts down immediately after start in a secure 
+ cluster. (swagle)
+
+ AMBARI-1816. Security wizard: Add missing secure configs to Hbase service and
+ make "zookeeper" as default primary name for zookeeper principal. (jaimin)
+
+ AMBARI-1791. Can not specify request context for smoke test request. (swagle)
+
+ AMBARI-1788. JMX getSpec error filling up server logs. (swagle)
+
+ AMBARI-1787. Nagios script causes Datanode error. (swagle)
+ 
+ AMBARI-1674. Jobtracker metric for maps_completed shows wrong value
+ (tbeerbower)
+
+ AMBARI-1786. Ambari server start fail after reset. (smohanty)
+
+ AMBARI-1784. MapReduce service damaging after hadoop installation with 
+ custom MapReduce user which contains symbol '-'. (smohanty)
+
+ AMBARI-1774. Ambari does not push the config updates to the client/gateway 
+ node. (swagle)
+
+ AMBARI-1780. POSTing new cluster returns 500 exception. (smohanty)
+
+ AMBARI-1781. Ambari Server should work with MySQL and Oracle where the 
+ Ambari Server data might be stored. (smohanty)
+
+ AMBARI-1775. Security wizard - Javascript error is thrown when zooKeeper
+ is included as a secure service. (jaimin)
+
+ AMBARI-1771. On clicking master component host on Oozie and Hive
+ service page javascript error is encountered.(jaimin)
+
+ AMBARI-1767. Add ability to customize "ambari_qa" user. (smohanty)
+
+ AMBARI-1770. Hue installation fails due to manifest errors. (swagle)
+
+ AMBARI-1764. Unable to get all tasks from more than one request_id by one
+ request (tbeerbower)
+
+ AMBARI-1766. Hide Java Home option on step-7 of Installer wizard. (jaimin)
+
+ AMBARI-1765. Enable the Ganglia rrd files location to be configurable
+ when Ganglia is selected as service. (jaimin)
+
+ AMBARI-1762. SUSE_Unable to start hive. (swagle)
+
+ AMBARI-1761. Update the DDL update script to modify the table to
+ include ph_cpu_count. (smohanty)
+
+ AMBARI-1759. Error in creating host component. (smohanty)
+
+ AMBARI-1757. Add support for Stack 1.2.2 to Ambari. (smohanty)
+
+ AMBARI-1749. Set default heap size for zookeeper. (swagle)
+
+ AMBARI-1748. JDK option on the UI when used is not passed onto the global 
+ parameters. (srimanth)
+
+ AMBARI-1747. Added executable permission of generate monitor/server scripts.
+ (smohanty)
+
+ AMBARI-1747. File ownership needs more consistency for those installations 
+ where root access is hard to get. (smohanty)
+
+ AMBARI-1561. API should return nagios_alerts as a JSON, not a stringified 
+ JSON. (smohanty)
+
+ AMBARI-1507. Should not install HDPHBaseMaster, HDPNameNode and HDPJobTracker
+ ganglia configs on every node. (smohanty)
+
+ AMBARI-1746. Backend support for LDAP Group to Ambari Role Mapping. 
+ (smohanty)
+
+ AMBARI-1506. Installs HBase ganglia configs when HBase not installed.
+ (smohanty)
+
+ AMBARI-1739. HBase and Zk failed to start on secure install. (swagle)
+
+ AMBARI-1732. Oozie service check fails in secure cluster. (jaimin)
+
+ AMBARI 1733. Add service/component specific upgrade puppet files. (smohanty)
+
+ AMBARI-1731. WebHcat smoke test fails for the secure cluster. (jaimin)
+
+ AMBARI-1730. Hive Service check fails in non secure cluster. (jaimin)
+
+ AMBARI-1724. Agent has it hard-coded that HDP repo file can only be 
+ downloaded once. (smohanty)
+
+ AMBARI-1715. Ambari Agent Unit Test Failure: TestFileUtil.py. (smohanty)
+
+ AMBARI-1533. Add Nagios check for ambari-agent process for each host in 
+ the cluster. (smohanty)
+
+ AMBARI-1713. Need to delete private ssh key from /var/run/ambari-server
+ /bootstrap/* on Ambari Server after bootstrap is complete. (swagle)
+
+ AMBARI-1711. Trunk is broken due  to invalid argument to a puppet custom 
+ function hdp_default. (swagle)
+
+ AMBARI-1706. Security wizard: "Done" and "back" buttons on Apply step 
+ should be disabled while step is in progress. (jaimin)
+
+ AMBARI-1705. Remove redundant API calls to update service configuration
+ while disabling security. (jaimin)
+
+ AMBARI-1661. For custom advanced properties, a new config with an empty key
+ can be added. (yusaku)
+
+ AMBARI-1659. Arrows often do not show up on config category expander. (yusaku)
+
+ AMBARI-1645. Undo should not be allowed on component hosts. (yusaku)
+
+ AMBARI-1644. Service summary page flickers. (yusaku)
+
+ AMBARI-1689. 500 Exception creating service component during install. (Sumit 
+ Mohanty via swagle)
+
+ AMBARI-1504. Hosts show physical CPUs, instead of cores. (Sumit Mohanty 
+ via swagle)
+
+ AMBARI-1685. Remove running of smoke tests by default when services or 
+ master components are started. (Sumit Mohanty via swagle)
+
+ AMBARI-1688. API support to return 10 most recent requests. (swagle)
+
+ AMBARI-1439. rrd file location should be read from global config. 
+ New patch for reopened bug. (swagle)
+
+ AMBARI-1667. Starting all services fails on secure cluster (excluding 
+ HBase and ZooKeeper). (swagle)
+
+ AMBARI-1666. Oozie properties for principal and keytab not read from 
+ oozie-site. (swagle)
+
+ AMBARI-1660. Server seems to ignore failures if the prior stage has failed 
+ before the next iteration of the scheduler. (Sumit Mohanty via swagle)
+
+ AMBARI-1657. User directories on HDFS do not get created with custom names 
+ provided from Ambari UI. (swagle)
+
+ AMBARI-2072. Fix to remove actual_configs from cluster response. (ncole)
+
+ AMBARI-2036. Fix to send global configs with status_commands to agents. (ncole)
+
+ AMBARI-2025. Fix to restrict how UNKNOWN is assigned to a host-component. (ncole)
+
+ AMBARI-2013. Fix to delete cluster with components in unknown state. (ncole)
+
+ AMBARI-1977. Honor service configs when there are no matching cluster configs (ncole)
+
+ AMBARI-1976. When host expires, update each component for host with unknown state. (ncole)
+
+ AMBARI-1980. Fix for nagios_alerts element when there is an error. (ncole)
+
+ AMBARI-1865. Fix for upgrade script to copy configurations. (ncole)
+
+ AMBARI-1703. Fix for smoke tests getting configurations. (ncole)
+
+ AMBARI-1678. Fix when there are no service overrides. (ncole)
+
+ AMBARI-1655. DELETE is not successful against ClusterStateEntity (ncole)
+
+ AMBARI-1439. rrd file location should be read from global config. (swagle)
+
+ AMBARI-1648. Hue configuration - DB properties cannot be empty. (swagle)
+
+ AMBARI-1641. Some map and reduce task metrics are missing for the
+ tasktrackers in the API. (tbeerbower)
+
+ AMBARI-1640. Erroneos property is not highlighted while landing on step7
+ of Installer wizard. (jaimin)
+
+ AMBARI-1637. JCE test for policy files fails during secure install. (swagle)
+
+ AMBARI-1621. Config/Reconfig UI should not allow certain configs to have
+ host-level overrides. (yusaku)
+
+ AMBARI-1597. Templeton smoke test fails for secure cluster. (swagle)
+
+ AMBARI-1600. Make component naming consistent. (yusaku)
+
+ AMBARI-1625. Oozie start fails on secure cluster. (swagle)
+
+ AMBARI-1627. Fix to remove host configuration overrides. (ncole)
+
+ AMBARI-1592. Fix configuration propagation.
+
+ AMBARI-1619. Fix for category path separators.
+
+ AMBARI-1616. Error during upgrading Ambari Server from 1.2.0/1.2.1 to 
+ 1.2.2. (Sumit Mohanty via swagle)
+
+ AMBARI-1603. JCE install on ambari-server fails if /tmp/HDP-artifacts does 
+ not exists. (swagle)
+
+ AMBARI-1612. Parameterizing nagios and ganglia monitoring rpm version.
+ (Ashish Singh via yusaku)
+
+ AMBARI-1586. Upgrade of Ambari DB on upgrade to 1.2.2 should restore/keep 
+ the configuration data for MAPREDUCE. (Sumit Mohanty via swagle)
+
+ AMBARI-1594. Ambari UI shows failed services while processes are running 
+ on the server. (swagle) 
+
+ AMBARI-1582. Cannot start hadoop services after hdfs re-configuration 
+ and amabri server restart. (swagle)
+
+ AMBARI-1570. Dashboard - missing translations. (Xi Wang via yusaku)
+
+ AMBARI-1569. Add AMBARI-1536 and 1537 back. (Xi Wang via yusaku)
+
+ AMBARI-1579. Admin page side nav does not use the correct style and does
+ not highlight selection. (yusaku)
+
+ AMBARI-1552. Missing translations on Dashboard. (Xi Wang via yusaku)
+
+ AMBARI-1549. Cluster name displayed incorrectly in the top nav. (Xi Wang
+ via yusaku) 
+
+ AMBARI-1559. Jobs failed count always returns 0 in the jobtracker API metrics.
+ (tbeerbower)
+
+ AMBARI-1577. Apply stage of security wizard throws javascript error 
+ for loadStep function. (jaimin)
+
+ AMBARI-1575. Service should be shown red when any of its master component 
+ is in START_FAILED status. (jaimin)
+
+ AMBARI-1565. Ambari server throws EntityExistsException on transitioning 
+ from INIT to INSTALLED state. (Sumit Mohanty via swagle)
+
+ AMBARI-1564. TestActionManager is failing on master branch. (swagle)
+
+ AMBARI-1561. API should return nagios_alerts as a JSON, not a 
+ stringified JSON. (swagle)
+
+ AMBARI-1492. Add init.d scripts for Ambari server + agent. (swagle)
+
+ AMBARI-1548. Implement Stacks API using the consistent API framework in 
+ Ambari Server with all the get/predicates working. (swagle)
+
+ AMBARI-1544. AmbariManagementControllerTest has extra import that cannot 
+ be resolved. (swagle)
+
+ AMBARI-1539. Stage creation takes on an average 1.5 minutes on large 
+ cluster. (swagle) 
+
+ AMBARI-1485. Server throws exception when trying to stop a service which is 
+ in stopping state. (swagle) 
+
+ AMBARI-1526. State fields are not returned by default for services.
+ (tbeerbower)
+
+ AMBARI-1527. Allow loading of custom configurations in
+ step7 : "Customize Services" (regression). (jaimin)
+
+ AMBARI-1525. ambari.properties file has an invalid character causing
+ ambari-server setup to fail. (jaimin)
+
+ AMBARI-1524. Service summary for Hive does not display clients properly.
+ (yusaku)
+
+ AMBARI-1520. Alerts take around 20-30 seconds to show up everytime you
+ refresh the dashboad. (srimanth via yusaku)
+
+ AMBARI-1523. Ambari API: Resources doesn't always honor partial response
+ fields restrictions. (tbeerbower)
+
+ AMBARI-1519. Ambari Web goes back and forth between frozen and usable state
+ peridocially on a large cluster. (yusaku)
+
+ AMBARI-1499. Add hosts is broken. (yusaku)
+
+ AMBARI-1501. Nagios alerts do not update automatically. (yusaku)
+
+ AMBARI-1503. Ajax call sometimes adds multiple question marks in the query
+ causing bad requests. (yusaku)
+
+ AMBARI-1463. State of HBase region server not updated when instance is shut
+ down on a cluster not installed via Ambari. (tbeerbower)
+ 
+ AMBARI-1494. Browser memory consumption issues. (jaimin)
+
+ AMBARI-1480. Comparison predicate should account for null values. (tbeerbower)
+
+ AMBARI-1467. UI should block on cluster metric api call before making
+ subsequent one. (yusaku)
+
+ AMBARI-1462. PB (petabytes) is shown as "undefined". (yusaku)
+
+ AMBARI-1455. Setting App.testMode=true, alwaysGoToInstaller=true does not
+ render the Dashboard properly upon login. (yusaku)
+
+ AMBARI-1452. Graphs look broken when network connectivity is lost between
+ Ambari Web and Ambari Server. (yusaku)
+
+ AMBARI-1441. Validation for username used in service configs is broken.
+ (yusaku)
+
+ AMBARI-1456. Cannot proceed after bootstrapping in some cases due to a
+ run-time error while running host checks. (yusaku)
+
+ AMBARI-1449. Failure popup shown for reconfiguring HDFS when MapReduce 
+ is not selected. (jaimin)
+
+ AMBARI-1445. Redirect to main app page when testMode flag is set True and
+ alwaysGoToInstaller flag is set False. (jaimin)
+
+ AMBARI-1439. rrd file location should be configurable through UI. (jaimin)
+
+ AMBARI-1479. Query Lexer sometimes fails to properly parse query strings with
+              ignored properties such as 'fields' present. (jspeidel)
+ 
+ AMBARI-1446. URL used by API to invoke Ganglia rrd script may exceed max length 
+              for query string for large clusters. (jspeidel)
+
+ AMBARI-1431. Hosts table no longer allows sorting. (yusaku)
+
+ AMBARI-1376. Wrong calculation of duration filter on apps page. (jaimin via
+ yusaku)
+
+ AMBARI-1165. Change the dashboard graph for HBase since its using cumulative
+ metrics. (yusaku)
+
+ AMBARI-1372. three sorting states on jobs table. (jaimin)
+ 
+ AMBARI-1350. UI screen shifts left-right depending on scrollbar. (jaimin)
+
+ AMBARI-1367. Job# for Mapreduce jobs is seen as x. (jaimin)
+
+ AMBARI-1363. Graphs jump around upon loading. (jaimin)
+
+ AMBARI-1362. Alerts for the hosts with ZooKeeper Server grows on every poll. (jaimin)
+
+ AMBARI-1360. Mouse cursor hover behavior is strange on Job Browser. (jaimin) 
+
+ AMBARI-1359. App Browser rows colours should alternate from dark grey to light 
+ grey and back. (jaimin)
+
+ AMBARI-1356. Error in filtering Configuration properties maintained at UI for 
+ WebHcat service. (jaimin)
+
+ AMBARI-1352. Host-level alert badges should only show the total number
+ of CRIT and WARN alerts for the host excluding OK. (jaimin)
+
+ AMBARI-1355. Inconsistent casing and component name for alert title. (jaimin)
+
+ AMBARI-1354. "No alerts" badge on the Host Detail page should be green, not red. (jaimin)
+
+ AMBARI-1353. "Missing translation" shown in Job Browser. (jaimin)
+
+ AMBARI-1351. Provide consistent ordering of hosts in heatmap. (jaimin)
+
+ AMBARI_1344. mapred.tasktracker.reduce.tasks.maximum in mapred-site.xml is not
+ taking effect. (yusaku)
+
+ AMBARI-1345. Alerts are not showing up at all in Service pages. (yusaku)
+
+ AMBARI-1346. The number of task trackers does not reflect the actual number
+ in MapReduce service summary after new TaskTrackers have been added until
+ page refresh. (yusaku)
+
+ AMBARI-1331. Step 8 hangs on deploy task 2 of 59, server has exception (tbeerbower)
+
+ AMBARI-1164. Disk info ganglia metrics is broken for some OS. (Dmytro Shkvyra via jspeidel)
+
+ AMBARI-1325. Left border is missing from the main nav. (srimanth)
+ 
+ AMBARI-1324. Job Browser default sort order should be Run Date DESC. (srimanth)
+ 
+ AMBARI-1323. Job Browser's column sizing needs to be improved on Firefox. (srimanth)
+
+ AMBARI-1321. Switching out of Jobs page does not launch popup anymore.
+ (srimanth via yusaku) 
+
+ AMBARI-1313. Alert time jumps between 'less than a minute ago' and 'about a
+ minute ago'. (srimanth via yusaku) 
+
+ AMBARI-1304. When switching jobs in timeline + tasks charts, blank charts show.
+ (Arun Kandregula via yusaku) 
+
+ AMBARI-1317. Deploy progress returns to deploy screen (momentarily).
+ (Arun Kandregula via yusaku) 
+
+ AMBARI-1316. Vertical scrollbar shows regardless of how tall the browser height
+ is (content height is always slightly taller than viewport). (Arun Kandregula
+ via yusaku)
+
+ AMBARI-1315. Inconsistent error/warning status in Deploy step; install
+ stalls. (Arun Kandregula via yusaku)
+
+ AMBARI-1281. Heatmap does not show up if the cluster was installed by going
+ back to a previous step from the Deploy step after an install failure.
+ (yusaku)
+
+ AMBARI-1300. Service status / host component status can get stuck in the
+ green blinking state if stop fails - no further operation can be performed.
+ (srimanth via yusaku) 
+
+ AMBARI-1297. Edit User: if "old password" is not specified and "new
+ password" is specified, password update silently fails. (Jaimin Jetly via
+ yusaku)
+
+ AMBARI-1282. Admin user can lose its own admin privilege. (Jaimin Jetly
+ via yusaku)
+
+ AMBARI-1292. Add hosts should skip host checks on existing list of cluster
+ nodes. (srimanth via yusaku)
+
+ AMBARI-1290. Left border is missing from the summary section on Jobs page.
+ (srimanth via yusaku)
+
+ AMBARI-1278. Cannot proceed from Step 3 to Step 4 in App.testMode (Next 
+ button is disabled). (srimanth)
+
+ AMBARI-1276. Job Graphs need to show x-axis ticks for elapsed time since 
+ submission. (srimanth)
+
+ AMBARI-1275. Incorrect displaying "Background operations" window after 
+ changing state of component. (srimanth)
+
+ AMBARI-1273. Edit User: No error message is shown when the user does not 
+ enter the correct "old password". (srimanth)
+
+ AMBARI-1172. Alert status change does not change time for the alerts.
+ (srimanth via yusaku) 
+
+ AMBARI-1264. Service graphs refresh with spinners. (yusaku)
+
+ AMBARI-1257. Separator missing in between Oozie and ZooKeeper. (yusaku)
+
+ AMBARI-1251. Fix routing issues on Add Host Wizard. (yusaku)
+
+ AMBARI-1230. There is a big gap in the lower part of the Jobs table header.
+ (yusaku)
+
+ AMBARI-1212. After successful install with Ambari, the user is taken to the
+ welcome page of the Install Wizard upon browser relaunch if the HTTP session
+ is expired. (yusaku)
+
+ AMBARI-1227. Host-level task popup is not showing the display name for
+ components. (yusaku)
+
+ AMBARI-1226. On Dashboard, links to host components are missing. (yusaku)
+
+ AMBARI-1219. After adding hosts, the number of live TaskTrackers is not
+ updated. (yusaku)
+
+ AMBARI-1176. In some cases, once Add Hosts wizard has run once, it requires
+ a log out before the Add Hosts wizard can be run again. (yusaku)
+
+ AMBARI-1203. mapred-site.xml default system directory is not set
+ to /mapred/system. (yusaku)
+
+ AMBARI-1200. On some clusters, Nagios alerts show up about 30 seconds after
+ page load, while on others the alerts show up immediately. (srimanth via
+ yusaku)
+
+ AMBARI-1190. Detailed log view dialogs are not center-aligned. (yusaku)
+
+ AMBARI-1187. Dashboard > MapReduce mini chart sometimes shows partial graph and hides recent data. (yusaku)
+
+ AMBARI-1184. After adding hosts, the host count shown in the Dashboard is
+ incorrect. (yusaku)
+
+ AMBARI-1178. Fix use of use ip address for JMX metrics request. (tbeerbower
+ via mahadev)
+
+ AMBARI-1191. Datatable API needs work. (Billie Rinaldi via mahadev)
+
+ AMBARI-1211. Ability to configure the same username for all the services in
+ Ambari. (mahadev)
+
+ AMBARI-1231. Replace sudo with su in the ambari setup script since ambari
+ server setup is already run as root. (mahadev)
+
+ AMBARI-1201. Improve Agent Registration and Heartbeat json. (Nate Cole via
+ mahadev)
+
+ AMBARI-1238. AmbariMetaInfoTest getServices() acceptance test failure. 
+ (Siddharth Wagle via mahadev)
+
+ AMBARI-1243. Remove unwanted import causing the builds to fail on linux.
+ (mahadev)
+
+ AMBARI-1233.  Directory permissions on httpd /var/www/cgi-bin should not be
+ touched by Ambari. (mahadev)
+
+ AMBARI-1170. For live status checks we should only look at the run
+ directories that we get from the server (only for hadoop and its eco system)
+ and not all. (mahadev)
+
+ AMBARI-1250. Upgrade the posgres connector to 9.1.
+ (mahadev)
+
+ AMBARI-1259. Fix the host roles live status not go back to INSTALLED if it
+ was in START_FAILED state. (mahadev)
+
+ AMBARI-1210. Allow capacity scheduler to be attached to host role configs for
+ CS configurability in the API's. (mahadev)
+
+ AMBARI-1256. Host registration can fail due to mount point info not fitting
+ ambari.hosts::disks_info column. (Sumit Mohanty via mahadev)
+
+ AMBARI-1266. Agent checks packages as part of host check but doesn't tell
+ which ones are needed or conflicting. (mahadev)
+
+ AMBARI-1291. Incorrect directory for MySQL component on SLES-11.1sp1.
+ (mahadev)
+
+ AMBARI-1301. Live status checks dont get triggered on server restart.
+ (mahadev)
+
+ AMBARI-1285. Some host Ganglia metrics may be missing in some cases. (tbeerbower)
+
+ AMBARI-1310. Get rid of mvn warnings. (Arun Kumar via mahadev)
+
+ AMBARI-1314. Hostname test is failing in some environments. (Nate Cole via
+ mahadev) 
+
+ AMBARI-1330. Cluster missing hosts after successful install and restart.
+ (mahadev)
+
+ AMBARI-1358. Clean up alert messages. (Yusaku Sako via mahadev)
+
+ AMBARI-1432. Ambari Agent registration hangs due to Acceptor bug in Jetty for
+ not reading through accepted connections. (mahadev)
+
+ AMBARI-1434. Change state to installed from start_failed if there is any
+ issue in starting a host component. (mahadev)
+
+ AMBARI-1476. Change webhcat-env.sh to export HADOOP_HOME
+ (mahadev)
+
+ AMBARI-1486. Fix TestHostName to take care of issues when gethostname and
+ getfqdn do not match. (mahadev)
+
+ AMBARI-1495. Out of Memory Issues on Ambari Server when server is running on
+ single core. (mahadev)
+
+ AMBARI-1487. Fix alerts at host level if MapReduce is not selected not to
+ alert for tasktrackers not running. (mahadev)
+
+ AMBARI-1488. Nagios script causes unwanted Datanode logs. (mahadev)
+
+ AMBARI-1497. Fix start up option for ambari-server where there is a missing
+ space. (mahadev)
+
+ AMBARI-1498. Hive service check fails on secure HDP cluster. (Siddharth Wagle
+ via mahadev)
+
+ AMBARI-1299. Bootstrap can hang indefinitely. (mahadev)
+
+ AMBARI-1547. Fix ambari agent test cases that are failing due to missing
+ directory. (mahadev)
+
+ AMBARI-1617. Host check is broken because of changing the serialization from
+ jackson to gson. (mahadev)
+
+AMBARI-1.2.0 branch:
+
+ INCOMPATIBLE CHANGES
+ 
+ NEW FEATURES
+
+ AMBARI-1108. PUT call to change the state on host_components collection
+ returns 200 (no op), even though GET with the same predicate returns a number
+ of host_components. (Tom Beerbower via mahadev)
+
+ AMBARI-1114. BootStrap fails but the api says thats its done and exit status
+ is 0. (Nate Cole via mahadev)
+
+ AMBARI-1136. Add gsInstaller resource provider. (Tom Beerbower via mahadev)
+
+ AMBARI-1202. Unncessary use of xml tree python library in ambari-server
+ setup. Its not being used. (Siddharth Wagle via mahadev)
+
+ AMBARI-1769. Python REST client to invoke REST calls. (Subin M via mahadev)
+
+ IMPROVEMENTS
+
+ BUG FIXES
+
+ AMBARI-1613.  ConfigurationResourceProvider doesn't properly handle OR
+               predicate. (jspeidel) 
+
+ AMBARI-1179. ambari-web does not compile due to less-brunch package update.
+ (yusaku)
+
+ AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via
+ mahadev)
+
+AMBARI-666 branch:
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  AMBARI-1147. Handling Hive/HCat/WebHCat configuration parameters with
+  Ambari Web. (yusaku)
+
+  AMBARI-946. Support retrieving information for multiple requests.
+  (hitesh via mahadev)
+
+  AMBARI-1065. Provide Datanode decommission & recommission capability in UI.
+  (Srimanth Gunturi via mahadev)
+
+  AMBARI-985. Support OR in API query. (Tom Beerbower via mahadev)
+
+  AMBARI-1029. Add api support for updating multiple host_component resources
+  for multiple hosts in a single request. (John Speidel via mahadev)
+
+  AMBARI-1018. Add API support for creating multiple sub-resources to
+  multiple resources in a single request. (John Speidel via mahadev)
+
+  AMBARI-950. Provide API support for 'OR' predicate. (John Speidel via
+  mahadev)
+
+  AMBARI-935. Provide API support for updates of multiple resources in a
+  single request. (John Speidel via mahadev)
+
+  AMBARI-926. Provide API support for asynchronous requests.
+  (John Speidel via mahadev)
+
+  AMBARI-1054. Implement retrying of bootstrap on confirm host page.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1048. Integrate slave configuration parameters with respective
+  service on step7 of installer wizard. (Jaimin Jetly via yusaku)
+
+  AMBARI-1031. Check for host registration at step3 of installer wizard 
+  and retrieve information for RAM and no. of cores. (Jaimin Jetly via
+  yusaku)
+
+  AMBARI-1022. Integrate Heatmap UI to backend API. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1015. Create HBase summary section in Dashboard & Service
+  pages. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1014. Hook service summary sections in service pages to API.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1008. Populate dashboard>MapReduce section with API data.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1006. Populate dashboard>HDFS section with API data.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1004. Allow properties entered in custom config
+  (ex: hdfs-site.xml) to override existing or create new properties.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1002. Integrate Installer with config APIs. (Jaimin Jetly
+  via yusaku)
+
+  AMBARI-989. Show task logs for each host in the Deploy step of the
+  wizard. (yusaku)
+
+  AMBARI-976.  Hook HDFS/MapReduce/HBase/Host graphs to backend API
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-964. Implement summary page of installer wizard. (Jaimin Jetly
+  via yusaku)
+
+  AMBARI-974. Decommissioning of datanodes. (Jitendra Pandey via mahadev)
+
+  AMBARI-975. Fix support for cascading updates to configs. (Hitesh Shah
+  via mahadev)
+
+  AMBARI-971. Add api support for creating multiple resources in a single
+  request. (John Speidel via mahadev)
+
+  AMBARI-970. Add additional Ganglia metrics and JMX properties. (Tom
+  Beerbower via mahadev)
+
+  AMBARI-967. Enhance predicate comparison. (Tom Beerbower via mahadev)
+
+  AMBARI-954. Support installation of Ganglia master and slaves via
+  Ambari Web. (yusaku)
+
+  AMBARI-980. Allow installation of various service components. (yusaku)
+
+  AMBARI-949. Provide metric graphs for individual hosts. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-948. Invoke service starts after services are installed in the
+  wizard. (yusaku)
+
+  AMBARI-942. Integrate Install and Start APIs with the installer wizard.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-936. Provide HBase service specific graphs. (Srimanth Gunturi
+  via yusaku)
+
+  AMBARI-933. Provide service-specific alerts in the service summary
+  pages. (Srimanth Gunturi via yusaku)
+
+  AMBARI-938. Hardcode service name and client component mapping while
+  awaiting meta data integration. (hitesh)
+
+  AMBARI-927. Provide metrics graphs on the MapReduce services page. 
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-916. Provide metrics graphs in HDFS services page. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-908. Add ui option to either create a Postgres database for Hive
+  and Oozie or choose existing database. (Jaimin Jetly via yusaku)
+
+  AMBARI-915. Implement App Browser for displaying and navigating Pig/Hive
+  workflows. (yusaku)
+
+  AMBARI-907. Add support for getting multiple objects in controller.
+  (hitesh)
+
+  AMBARI-906. Util to extract hosts for various components. (jitendra)
+
+  AMBARI-903. Various fixes for config handling integration. (Hitesh Shah via 
+  mahadev)
+
+  AMBARI-900. Add configuration mapping support. (Nate Cole via mahadev)
+
+  AMBARI-895. Need name consistency for metrics. (Tom Beerbower via mahadev)
+
+  AMBARI-893. provide api support for temporal queries. (John Speidel via 
+  mahadev)
+
+  AMBARI-897. Operations request object and skeleton management methods.
+  (jitendra)
+
+  AMBARI-894. TestHeartBeatMonitor fails intermittently. (jitendra)
+
+  AMBARI-892. Add puppet executor at the agent to be able to run various
+  commands from the server. (mahadev)
+
+  AMBARI-887. Ability to save configuration. (Nate Cole via mahadev)
+
+  AMBARI-877. Refactor resource provider implementation for changes to
+  management interface. (Tom Beerbower via mahadev)
+
+  AMBARI-876. Put metrics under metrics category. (Tom Beerbower via 
+  mahadev)
+
+  AMBARI-890. Add client library option to step6 (Assign slaves) of
+  installer wizard. Also add indicator and popovers for hosts with
+  master component. (Jaimin Jetly via yusaku)  
+
+  AMBARI-889. Provide cluster metric graphs on Ambari main dashboard.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-886. Support filters in controller get* apis. (hitesh)
+
+  AMBARI-880. Implement Review Page (Step 8) for the Ambari Installer
+  Wizard. (Jaimin Jetly via yusaku)
+
+  AMBARI-872. Hookup Nagios alerts section in Ambari UI to backend
+  server. (Srimanth Gunturi via yusaku)
+
+  AMBARI-871. Integrate basic set of rest APIs with ambari-web
+  installer wizardi. (Jaimin Jetly via yusaku)
+
+  AMBARI-884. Implement Dashboard/Service summary. (yusaku)
+
+  AMBARI-882. Group-based DataNode/TaskTracker/RegionServer overrides.
+  (yusaku)
+
+  AMBARI-881. Implement Add Hosts Wizard. (yusaku)
+
+  AMBARI-869. Util to deserialize ExecutionCommand. (jitendra)
+
+  AMBARI-874. Fix hostinfo reporting at the server and add a unit test for
+  deserilization for the host information from agent. (mahadev)
+
+  AMBARI-873. Support for multiple objects' updates in controller. (hitesh)
+
+  AMBARI-870. Support metric types other than string (Tom Beerbower via
+  mahadev)
+
+  AMBARI-868. Clean up site.pp generation on the agent and remove the imports
+  in the sample site.pp. (mahadev)
+
+  AMBARI-862. API query against /clusters doesn't return any data.
+  (John Speidel via mahadev)
+
+  AMBARI-866. Add ORM layer for the FSM's in the server. (mahadev)
+
+  AMBARI-853. Add more complete JMX metrics. (Tom Beerbower via mahadev)
+
+  AMBARI-852. Improve REST API functionality regarding query and partial
+  response (John Speidel via mahadev)
+
+  AMBARI-865. Add unit test for action queue on the agent. (mahadev)
+
+  AMBARI-851. Hook up Ganglia property provider. (Tom Beerbower via mahadev)
+
+  AMBARI-863. Fix mvn tests to be able to run the python tests cleanly.
+  (mahadev)
+
+  AMBARI-849. Stage planner implementation. (jitendra)
+
+  AMBARI-860. Remove code that adds fake stages for testing. (jitendra)
+
+  AMBARI-856. Add cluster heatmap. (yusaku)
+
+  AMBARI-855. Create the skeleton for a custom data adapter in Ambari Web.
+  (yusaku)
+
+  AMBARI-854. Serve ambari-web from jetty. (Jaimin Jely via yusaku)
+ 
+  AMBARI-850. Flatten ExecutionCommand structure. (jitendra)
+  
+  AMBARI-848. Various tests for FSM and Controller impl. (hitesh)
+
+  AMBARI-847. Run pyunit tests from maven test target and also autogenerated
+  openssl passphrase. (mahadev)
+
+  AMBARI-845. Fix NPE in the server to be able to run the server api's.
+  (mahadev)
+
+  AMBARI-844. Mock JMX provider for manual tests. (Tom Beerbower via mahadev)
+
+  AMBARI-841. Fix comparison predicates in case where resource does not have
+  property value. (Tom Beerbower via mahadev)
+
+  AMBARI-833. Add missing Path annotation to rest services for
+  put/post/delete. (John Speidel via mahadev)
+
+  AMBARI-838. HACK: Add a thread in server to inject requests for testing.
+  (Jitendra via mahadev)
+
+  AMBARI-835. Update JMXPropertyProvider. (Tom Beerbower via hitesh)
+
+  AMBARI-832. Merge ambari-api with ambari-server (mahadev)
+
+  AMBARI-822. Implement an agent simulator for unit testing. (jitendra)
+  
+  AMBARI-829. Add unit tests for ResourceProviderImpl. (Tom Beerbower via
+  mahadev)
+
+  AMBARI-831. Move manifest generation into the ambari agent directory.
+  (mahadev)
+
+  AMBARI-828. Manifest generation for various actions from the server.
+  (mahadev)
+
+  AMBARI-827. Add clusterName to the status of the commands run by the agent.
+  (mahadev)
+
+  AMBARI-824. Provide basic management functionality (create/update) in the
+  rest api (John Speidel via mahadev)
+
+  AMBARI-826. Bug in processing command reports. (jitendra)
+
+  AMBARI-825. Controller layer implementation part 3. (hitesh)
+
+  AMBARI-823. Fix security filter on the server agent ports and remove
+  duplication on servlet contexts for certs signing. (mahadev)
+
+  AMBARI-821. Implement basic service state update and verify flow to
+  ActionManager. (hitesh)
+
+  AMBARI-812. In API , improve partial response support to drill down n levels
+  (John Spiedel)
+
+  AMBARI-791. Add unit tests and java docs for SPI code. (Tom Beerbower)
+
+  AMBARI-820. Remove JAXB dependencies in Server Agent protocol and move to
+  POJO based jackson serializer. (mahadev)
+
+  AMBARI-819. Management controller implemenation work. (hitesh)
+
+  AMBARI-811. Bug fix in jaxb serialization for maps. (jitendra)
+
+  AMBARI-810. Controller layer implementation part 1. (hitesh)
+
+  AMBARI-807. Fix Action scheduler tests because of fsm interface changes.
+  (jitendra)
+
+  AMBARI-806. Remove State object as configs/stack version/running state are
+  handled as standalone entities. (hitesh)
+
+  AMBARI-805. Add requestId tracking objects for management spi. (hitesh)
+
+  AMBARI-803. FSM initial refactoring for eventual live/desired objects. (hitesh)
+
+  AMBARI-800. Hack to add a stage for testing in in-memory db. (jitendra)
+  
+  AMBARI-801. Fix heartbeat message from the agent which is causing NPE at the
+  server. (mahadev)
+
+  AMBARI-778. Ensure data flows across all steps in installer wizard.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-799. Prototype for management spi part 3. (hitesh)
+
+  AMBARI-797. Prototype for management spi interface continued. (hitesh)
+
+  AMBARI-795. Fix failing tests for AgentResource and BootStrap. (mahadev)
+
+  AMBARI-793. Make MapReduce, Nagios, and Ganglia optional during cluster
+  install. (yusaku)
+
+  AMBARI-794. Add log4j properties for logging at the server. (mahadev)
+
+  AMBARI-790. OK in registration response. (jitendra)
+
+  AMBARI-787. Registration throws HostNotFoundException for new hosts. (jitendra)
+  
+  AMBARI-788. Fix server and agent startup for end to end testing. (mahadev)
+
+  AMBARI-785. Action response unit test. (jitendra)
+
+  AMBARI-783. Fix guice injection in the server. (mahadev)
+
+  AMBARI-784. Add Resource download API on the server. (mahadev)
+
+  AMBARI-781. Registration unit test. (jitendra)
+
+  AMBARI-754. Heartbeat handler: Registration response should query component 
+  status. (jitendra)
+
+  AMBARI-755. Heartbeat handler: Update state as reported in heartbeat. 
+  (jitendra)
+
+  AMBARI-756. Heartbeat handler: Handle heartbeat timeout. (jitendra)
+
+  AMBARI-767. Add bootstrap script to ssh in parallel and setup agents on a
+  list of hosts. (mahadev)
+
+  AMBARI-764. Integrate REST API (Tom Beerbower via mahadev)
+
+  AMBARI-762. Implement Confirm Hosts page for Ambari installer
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-763. Implement Installer Step 6 (Assign Slaves). (yusaku)
+
+  AMBARI-760. Fix injection in data access objects to use guice provider.
+  (mahadev)
+
+  AMBARI-759. Add puppet scripts to the agent for configuring/installing
+  various services and add security aspects to api's and server/agent.
+  (mahadev)
+
+  AMBARI-749. Complete Java side implementation of bootstrapping agent hosts.
+  (mahadev)
+
+  AMBARI-757. Implement Installer Step 4 (Select Services). (yusaku)
+
+  AMBARI-751. Re-structure servicecomponenthost fsm layout. (hitesh)
+
+  AMBARI-732. Action scheduler unit tests. (jitendra)
+
+  AMBARI-739. Cluster fsm implementation. (hitesh)
+
+  AMBARI-738. s/Node/Host/g. (hitesh)
+
+  AMBARI-737. ServiceComponentNode FSM implementation. (hitesh)
+
+  AMBARI-722. Action scheduler implementation. (jitendra)
+  
+  AMBARI-733. Add Jersey Resource for BootStrapping and JAXB elements for API
+  entities. (mahadev)
+
+  AMBARI-730. Add unit tests for jersey apis on the server. (mahadev)
+
+  AMBARI-725. Add commandstatus/result/error objects into the rest API between
+  server and agent. (mahadev)
+
+  AMBARI-723. Implement Installer Welcome page and Install Options page
+ (Jaimin Jetly via yusaku)
+
+  AMBARI-726. ORM-based data access layer for new design (Jaimin Jetly via hitesh)
+
+  AMBARI-728. Initial work on ServiceComponentNode FSM. (hitesh)
+
+  AMBARI-724. Add tabs, dynamic form generation, validation errors, and info
+  popovers for Customize Services page in Installer (yusaku) 
+
+  AMBARI-714. Job FSM Impl and tests. (hitesh)
+
+  AMBARI-721. Remove Hardwareprofile class since its not needed anymore.
+  (mahadev)
+
+  AMBARI-720. Tweak basic styles for Installer. (yusaku)
+
+  AMBARI-719. Enable Responsive Design. (yusaku)
+
+  AMBARI-716. Add back TestNodeImpl and fix memory types and disk info
+  serialization. (mahadev)
+
+  AMBARI-717. Starter implementation for Installer Customize Services page.
+  Stylize top nav and implement static footer.  Stylize login page. (yusaku)
+
+  AMBARI-711. Create utility functions related to localStorage for first two
+  steps: cluster name and Install options. Also develop view logic with
+  preliminary validations for these two steps. (Jaimin Jetly via yusaku)
+
+  AMBARI-715. Integrate domain objects and Rest serialized objects. (mahadev)
+
+  AMBARI-713. Initial work on Job FSM. (hitesh)
+
+  AMBARI-712. Action manager skeleton. (jitendra)
+
+  AMBARI-710. Basic registration and heartbeat protocol implementation between
+  the server and the agent. (mahadev)
+
+  AMBARI-709. Getting hardware info on disks/cpu/others using facter and using
+  it during registeration. (mahadev)
+
+  AMBARI-707. More work on Node FSM and additional tests/cleanup. (hitesh)
+
+  AMBARI-706. Basic tests for Node FSM. (hitesh)
+
+  AMBARI-705. Initial work on Node FSM. (hitesh)
+
+  AMBARI-703. Heartbeat handler classes. (jitendra)
+
+  AMBARI-702. Add skeleton for Ambari agent that talks to the server and
+  collects information for host. (mahadev)
+
+  AMBARI-696. Add interface for ActionManager to access live state. (hitesh)
+
+  AMBARI-698. Add a simple server and artifact generation to run a server with
+  a simple api check. (mahadev)
+
+  AMBARI-697. Ambari Web (browser-based UI) skeleton. (Jaimin Jetly and yusaku)
+
+  AMBARI-695. More basic class restructuring for new design. (hitesh)
+
+  AMBARI-694. Class to encapsulate stage. (jitendra)
+
+  AMBARI-691. More basic classes for new design. (hitesh)
+
+  AMBARI-693. Classes for request objects. (jitendra)
+
+  AMBARI-685. Basic classes. (hitesh via jitendra)
+
+  AMBARI-676. Seperate directory for ambari-server. (jitendra)
+
+  IMPROVEMENTS
+
+  AMBARI-1159. Check the log/run dir locations to make sure its an abs path.
+  (yusaku)
+
+  AMBARI-1156. Dashboard > HDFS pie chart should hover with details. (yusaku)
+
+  AMBARI-1154. The check boxes to check/uncheck one of the members in a multi
+  artifact graphs is not very readable. It should be more apparent on which
+  one the user clicked on. (yusaku)
+
+  AMBARI-1106. User-specified custom configs (such as hdfs-site.xml overrides)
+  should be persisted to maintain what the user specified.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1103. Need to be able to reliably recover from the case when the browser
+  is closed during deploy (Step 8 post submission, Step 9) of the wizard.
+  (Arun Kandregula via yusaku)
+
+  AMBARI-1099. Hive Service Summary needs to show service components better.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1097.  Restrict user to proceed ahead of step 7 (Service configuration)
+  when properties specified in custom-box are already exposed on the page.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1102. Error handling when errors are encountered during preparation
+  for deploy. (Arun Kandregula via yusaku)
+
+  AMBARI-1096. Create heatmap legend entries for missing data/invalid hosts.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1078. Improve graph message when data is not available.
+  (Srimanth Gunturi via yusaku)
+ 
+  AMBARI-1146. Exclude hosts and include hosts config parameters need 
+  clarification. (yusaku)
+  
+  AMBARI-1074. CPU Usage chart needs better idle time display. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1072. Change text on alerts "about XX hours ago". (Srimanth Gunturi
+  via yusaku)
+
+  AMBARI-1145. Cluster Management refactoring. (yusaku)
+
+  AMBARI-984. Add support for exposing filesystem type for mount points
+  at host level. (hitesh via mahadev)
+
+  AMBARI-973. Ensure zookeeper service check run after zookeeper start.
+  (hitesh via mahadev)
+
+  AMBARI-965. Stop events should be handled at all valid points for safe
+  recovery. (hitesh via mahadev)
+
+  AMBARI-959. Handle passing repo information from server to agent.
+  (hitesh via mahadev)
+
+  AMBARI-951. Integrate meta data to be able to run service checks after
+  a start of a service. (hitesh via mahadev)
+
+  AMBARI-932. Add initial hooks to trigger smoke tests on service starts.
+  (hitesh via mahadev)
+
+  AMBARI-924. Rename job to action. (hitesh via mahadev)
+
+  AMBARI-922. Use stack name and version in fsm layer. (hitesh via mahadev)
+
+  AMBARI-982. Add ability to set rack info for a host.
+  (Nate Cole via mahadev)
+
+  AMBARI-981. Add configuration to host_component request.
+  (Nate Cole via mahadev)
+
+  AMBARI-931. Support for /hosts end point. (Nate Cole via mahadev)
+
+  AMBARI-912. Test case for ConfigurationService.
+  (Nate Cole via mahadev)
+
+  AMBARI-1021. Ambari-agent init script needs to be aware of already
+  running/not running agent process. (Pramod Thangali via mahadev)
+
+  AMBARI-1019. Add methods to get postgres user name/password or any
+  regex validated string input from user.
+  (Pramod Thangali via mahadev)
+
+  AMBARI-1007. Add aggregate IO stats to workflow data web service
+  (Pramod Thangali via mahadev)
+
+  AMBARI-1000. Use FQDN instead of hostname when registering an agent with
+  Ambari server. (Pramod Thangali via mahadev)
+
+  AMBARI-1066. Rename Charts section to Heatmaps. (Srimanth Gunturi via
+  mahadev)
+
+  AMBARI-1056. Expose CapacityRemaining JMX metric to NAMENODE
+  ServiceComponentInfo. (Tom Beerbower via mahadev)
+
+  AMBARI-1055. Refactor SPI Request interface to remove PropertyId.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1049. Tune Ganglia request. (Tom Beerbower via mahadev)
+
+  AMBARI-1037. Implement an efficient way to provide Ganglia data for
+  heatmap and other cluster visualization tools.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1001. Cluster level Network, Load, CPU and Memory metrics in API
+  needed for dashboard page. (Tom Beerbower via mahadev)
+
+  AMBARI-996. Expose metrics and properties for UI. (Tom Beerbower via mahadev)
+
+  AMBARI-972. Refactor resource provider implementation to move inner classes.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-943. Add Host metrics. (Tom Beerbower via mahadev)
+
+  AMBARI-929. Show HBASE_REGIONSERVER metrics. (Tom Beerbower via mahadev)
+
+  AMBARI-928. Enable end to end testing of Request and Task resources.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-925. Return RequestStatus through ClusterController.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-923. ResourceProvider changes for Request and Task resources.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-911. Implement an efficient way to provide Ganglia data for heatmap
+  and other cluster visualization tools. (Tom Beerbower via mahadev)
+
+  AMBARI-930. Map update to PUT and create to POST.
+  (John Speidel via mahadev)
+
+  AMBARI-1053. Dashboard page loads very slow due to hosts?fields=* API call
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1051. Dashboard page takes long time to load. (Srimanth Gunturi via
+  yusaku)
+
+  AMBARI-1041. Additional metrics need to be added to Heatmap UI. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1040. Cluster heatmap: green should always mean "good". (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1039. Improve Nagios alerts time display. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1036. Service Info/Quick Links do not display external hostnames.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1035. Aggregate creation of multiple services and assignment of host
+  to cluster. (Jaimin Jetly via yusaku)
+
+  AMBARI-1034. Metric Charts - display local time rather than UTC.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1033. Nagios and Ganglia links should use public host names in URLs.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1030. Metrics links in web ui should link to Ganglia UI. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1025. Display total install and start services time on summary page
+  and polish summary page ui. (Jaimin Jetly via yusaku)
+
+  AMBARI-1023. Dashboard page should handle API sending JSON as strings and
+  object. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1011. Create 2 missing HDFS service graphs. (Srimanth Gunturi via
+  yusaku)
+
+  AMBARI-1003. Nagios sections should use backend API to populate. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1062. Convert Apache license header comment style in Handlebars files
+  to Handlebars comments rather than JavaScript comments. (yusaku)
+
+  AMBARI-1061. Data loading refactoring for cluster management. (yusaku)
+
+  AMBARI-1060. Data loading for App Browser. (yusaku)
+
+  AMBARI-993. Hook up login with server authentication. (yusaku)
+
+  AMBARI-1059. Refactor cluster management. (yusaku)
+
+  AMBARI-1058. Implement data loading. (yusaku)
+
+  AMBARI-956. On unavailability of non-master components, host with least
+  number of master components should install all slave and client components. 
+  (Jaimin Jetly via yusaku)
+ 
+  AMBARI-990. Refactor App Browser. (yusaku)
+
+  AMBARI-979. More refactoring of App Browser code. (yusaku)
+
+  AMBARI-947. Make it easier to test Deploy (Install, Start + Test) step
+  of the wizard. (yusaku)
+
+  AMBARI-978. Refactor App Browser code. (yusaku)
+
+  AMBARI-977. Refactor Wizard and Cluster Management code. (yusaku)
+
+  AMBARI-941. More refactoring of Wizards in Ambari Web. (yusaku)
+
+  AMBARI-919. Partial refactoring and consolidation of code for various
+  wizards. (yusaku)
+
+  AMBARI-918. Update styles in Cluster Management. (yusaku)
+
+  AMBARI-917. Update layout and flow for App Browser. (yusaku)
+
+  AMBARI-888. Add more tests for controller implementation. (hitesh)
+
+  AMBARI-891. Initial work to refactor the Wizards in Ambari Web. (yusaku)
+
+  AMBARI-883. Improve user interactions on Confirm Hosts page of the
+  Installer. (yusaku)
+
+  AMBARI-859. Tighten up the layout for the Install page of the Installer.
+  (yusaku)
+
+  AMBARI-857. Refactor Install Options page for the Install Wizard. (yusaku)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+  AMBARI-1628. Tasktracker remains in STARTING state preventing Ambari 
+  to display proper status. (Sumit Mohanty via swagle)
+
+  AMBARI-1160. Cannot add a hostname that has a number next to . in it.
+  (yusaku)
+
+  AMBARI-1139. Disable Misc section in Customize Services page of the Install
+  Wizard. (Srimanth Gunturi via yusaku) 
+
+  AMBARI-1158. Fiters are not working correctly on Hosts page. (yusaku)
+
+  AMBARI-1157. Host component operation causes lags in status/action pulldown
+  update. (yusaku)
+
+  AMBARI-1144. Cannot save changes to ZooKeeper configuration.
+  (Arun Kandregula via yusaku)
+
+  AMBARI-1155. Change "Save and apply changes" button on configs section to
+  "Save". (yusaku)
+
+  AMBARI-1153. Host jams in status 'Preparing' if host name is wrong.
+  (Arun Kandregula via yusaku)
+
+  AMBARI-1132. Stopping service doesn't cause blinking status until refresh.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1143. tmpfs filesystem being added to the list in the dir used by
+  Ambari. (Arun Kandregula via yusaku) 
+
+  AMBARI-1142. On Notification Popup, clicking "go to nagios UI" doesn't
+  load nagios UI. (Arun Kandregula via yusaku)
+
+  AMBARI-1125. Graphs "degrade" over time. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1124. Boxes on services page need min height or something to keep
+  it from visually cutting off info. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1123. Ambari heatmaps and host information shows infinity for disk
+  space used. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1141. In some cases, clicking "Register and Confirm" button does
+  not do anything. (Arun Kandregula via yusaku)
+ 
+  AMBARI-1140. Resuming deploy for Installer/Add Hosts does not work if the
+  browser is shut down during the start phase of deploy.
+  (Arun Kandregula via yusaku)
+  
+  AMBARI-1120. Key spikes in HDFS IO missing from IO summary graphs.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1119. Service Summary pages no longer show service-specific info.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1118. Dashboard > HDFS title's free capacity doesn't match summary.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1115. Host component live status is broken. (Srimanth Gunturi via
+  yusaku)
+
+  AMBARI-1113. Install Wizard: Confirm host stuck at Preparing stage.
+  (Arun Kandregula via yusaku)
+
+  AMBARI-1112. Add hosts fails second time around. (Srimanth Gunturi via
+  yusaku)
+
+  AMBARI-1111. Install wizard step 9 gets stuck at 0% and other issues on
+  page refresh. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1110. After clicking the deploy button on the Add Hosts wizard, the
+  user is always taken to the Installer Wizard Step 8 upon login.
+  (Arun Kandregula via yusaku)
+
+  AMBARI-1152. Add Hosts wizard - Retry button does not trigger call to
+  backend. (yusaku)
+
+  AMBARI-1104. Webhcat configuration not setting templeton-libjars.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1151. Reconfigure fails silently; it's not firing any API calls due
+  to a JS error. (yusaku)
+
+  AMBARI-1098. Switching services does not update various UI elements.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1150. Installer Wizard - Retry feature in Deploy step (Step 9) is
+  broken. (yusaku)
+
+  AMBARI-1092. dashboard > Summary > capacity pie chart keeps changing colors.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1091. 2 parallel requests for service information resulting in JS
+  exception. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1090. Restrict user to apply service configuration when custom box
+  properties are already exposed on the management config page.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-1149. HIVE_METASTORE needs to be started as a Hive component.
+  (yusaku)
+
+  AMBARI-1088. HDFS capacity chart numbers are incorrect. (Srimanth Gunturi
+  via yusaku)
+
+  AMBARI-1084. Heatmap displays NaN. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1081. HDFS disk capacity on dashboard is seen as negative number.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1148. Fix incorrect labels for configuration parameters. (yusaku)
+
+  AMBARI-1080. Host disk & memory graphs have incorrect values.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1077. The value for dead nodes is not getting populated on UI.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1073. Remove cluster name entry from top titlebar. (Srimanth Gunturi
+  via yusaku)
+
+  AMBARI-1071. Nagios alerts not updating in UI. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1063. Workflow Web Service potentially leaks DB connections upon
+  exceptions. (yusaku)
+
+  AMBARI-962. Update of host components fail when service name is not passed
+  in. (hitesh via mahadev)
+
+  AMBARI-945. Fix 500 errors on get resources api. (hitesh via mahadev)
+
+  AMBARI-944. Fixes for meta info layer. (hitesh via mahadev)
+
+  AMBARI-913. Fix all apis to return correctly filled status response.
+  (hitesh via mahadev)
+
+  AMBARI-999. RUBYLIB env variable expanding forever. (Pramod Thangali via
+  mahadev)
+
+  AMBARI-1069. HDFS Disk Capacity in HDFS Service Summary is totally off.
+  (Srimanth Gunturi via mahadev)
+
+  AMBARI-1068. Dashboard cluster level graphs showing only 45 minutes of data.
+  (Srimanth Gunturi via mahadev)
+
+  AMBARI-1067. Service > MapReduce map slots reserved / occupied are backwards.
+  (Srimanth Gunturi via mahadev)
+
+  AMBARI-1057. Can't reset ambari-server due to fail drop/create database DDLs.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1052. UnsupportedPropertyException thrown from update.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1050. Host metric values coming in with 0 values.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1047. Create Configuration API call is throwing 500:
+  UnsupportedPropertyException. (Tom Beerbower via mahadev)
+
+  AMBARI-1044. API is not returning Ganglia metrics for one of the hosts
+  in the cluster. (Tom Beerbower via mahadev)
+
+  AMBARI-1043. Updates with queries that contain non primary key fields
+  may update resources that don't satisfy the query.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1038. Ganglia setup does not allow for subsequent changes to the
+  cluster. (Tom Beerbower via mahadev)
+
+  AMBARI-1027. Fix missing block metrics for NAMENODE.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1026. Resolve overlap between JMX and Ganglia metrics.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1020. Start time, end time and resolution not set correctly for
+  rrd.py call in Ganglia property. (Tom Beerbower via mahadev)
+
+  AMBARI-1016. Initial API calls after Ambari Web install resulted in 500
+  (ArrayIndexOutOfBoundsException); Ambari Web stuck at "Loading..."
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1012. Fix race condition in DefaultProviderModule.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1010. Fix extra comma in rrd.py output. (Tom Beerbower via mahadev)
+
+  AMBARI-1005. No Ganglia/JMX metrics data are coming through.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-997. Fix HBASE JMX properties. (Tom Beerbower via mahadev)
+
+  AMBARI-994. Host metrics API servlet not filtering on given fields.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-988. Update resource drops property values.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-969. GET on temporal data throws 500. (Tom Beerbower via mahadev)
+
+  AMBARI-968. Task resources not returned under request resources.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-961. Sub-resources and metrics through API are broken.
+  (Tom Beerbower via mahadev)
+
+  AMBARI-1046. Heatmap with no numbers on the hover. (Srimanth Gunturi via
+  yusaku)
+
+  AMBARI-1045. Service summary sections have incorrect values displayed.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1042. Heatmap UI fluctuates between white and green colors
+  intermittently. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1032. Host hover in cluster heatmap showing mock data. (Srimanth
+  Gunturi via yusaku)
+ 
+  AMBARI-1028. MapReduce & HDFS summaries should use ServiceComponentInfo
+  values. (Srimanth Gunturi via yusaku)
+
+  AMBARI-1017. Alerts not showing up in Ambari UI due to model refactoring.
+  (Srimanth Gunturi via yusaku)
+
+  AMBARI-1013. Host metrics charts should use live data. (Srimanth Gunturi
+  via yusaku)
+
+  AMBARI-1009. Cluster level graphs need to use API for data. (Srimanth
+  Gunturi via yusaku)
+
+  AMBARI-1064. App Browser fixes. (yusaku)
+
+  AMBARI-995. Deploy logs not shown for failed tasks. (yusaku)
+
+  AMBARI-992. Logout does not clean application state properly. (yusaku)
+
+  AMBARI-957. Adding a host whose hostname is the same as the one the user 
+  is accessing Ambari Web with breaks the Installer. (yusaku)
+
+  AMBARI-953. Fix navigation issues in installer wizard due to regression.
+  (Jaimin Jetly via yusaku)
+
+  AMBARI-899. Use slf4j-api 1.5.5 rather than 1.6.6. (yusaku)
+
+  AMBARI-902. Fix ZooKeeper badge allocation logic for single host and 
+  muliple hosts installation in step5 installer wizard. (Jaimin Jetly via
+  yusaku)
+
+  AMBARI-896. Resolve all navigation related issues for Step6 (Slave and
+  Client component) of installer wizard. (Jaimin Jetly via yusaku)
+
+  AMBARI-914. Fix issues related to Slave Component Group in Installer.
+  (yusaku)
+
+  AMBARI-909. Pass correct cluster info to Action Manager. (hitesh)
+
+  AMBARI-904. Ensure state changes only happen after actionmanager persists
+  actions. (hitesh)
+
+  AMBARI-905. Fix puppet site creation with flattening of execution commands
+  send from the server. (mahadev)
+
+  AMBARI-885. Fix miscellaneous issues related to Ambari Web. (yusaku)
+
+  AMBARI-879. Installer skips Confirm Hosts page of the wizard when testing
+  locally without Ambari Server. (yusaku)
+
+  AMBARI-878. Various tests for FSM, controller and state objects. (hitesh)
+
+  AMBARI-858. Installer -> Select Services page: warning popups are no longer
+  appearing. (yusaku)
+
+  AMBARI-846. Select Masters Page: make ZooKeeper addition/removal UI more
+  organized. (Jaimin Jetly via yusaku)
+
+  AMBARI-840. Hitting browser refresh should not clear present step data that 
+  had already been persisted to local DB. (Jaimin Jetly via yusaku)
+
+  AMBARI-843. Fix more null pointers for partial request objects. (hitesh)
+
+  AMBARI-842. Fix null point exception during adding of hosts to cluster. (hitesh)
+
+  AMBARI-839. Temporary fix for server start order. (hitesh)
+
+  AMBARI-837. Fix basic injection issues for controller impl. (hitesh)
+
+  AMBARI-836. Fix generation of requestId to be unique across restarts. (hitesh)
+
+  AMBARI-834. Use RoleCommand instead of ServiceComponentHostEventType for HostAction
+  in Controller. (hitesh)
+
+  AMBARI-830. Various fixes and tests for controller implementation. (hitesh)
+
+  AMBARI-808. Handle appropriate start/stop/install/.. events at their respective
+  failed states. (hitesh)
+
+  AMBARI-798. Fix import issue due to move of Predicate class. (hitesh)
+
+  AMBARI-780. Make FSM related changes for heartbeat handler. (hitesh)
+
+  AMBARI-774. Renable and fix AgentResourceTest. (mahadev) 
+
+  AMBARI-773. Change Host FSM as per new requirements of heartbeat handler. (hitesh)
+
+  AMBARI-753. Fix broken compile as a result of re-factor of FSM layout. (hitesh)
+
+  AMBARI-752. Add missing license header to TestServiceComponentHostState. (hitesh)
+
+  AMBARI-718. Fix installer navigation. (yusaku)
+
+  AMBARI-684. Remove non-required dependencies from pom files (hitesh via jitendra)
+
+  AMBARI-680. Fix pom structure. (hitesh)
+

+ 16 - 0
DISCLAIMER.txt

@@ -0,0 +1,16 @@
+Apache Ambari is an effort undergoing incubation at the Apache Software 
+Foundation (ASF), sponsored by the Apache Incubator PMC. 
+
+Incubation is required of all newly accepted projects until a further review 
+indicates that the infrastructure, communications, and decision making process 
+have stabilized in a manner consistent with other successful ASF projects. 
+
+While incubation status is not necessarily a reflection of the completeness 
+or stability of the code, it does indicate that the project has yet to be 
+fully endorsed by the ASF.
+
+For more information about the incubation status of the Apache Ambari project you
+can go to the following page:
+
+http://incubator.apache.org/ambari/
+

+ 198 - 0
KEYS

@@ -0,0 +1,198 @@
+pub   4096R/876EF43B 2012-07-02
+uid                  Vikram Dixit <vikram@apache.org>
+sub   4096R/5403F44B 2012-07-02
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2.0.18 (Darwin)
+
+mQINBE/x5NsBEADYgkrr80ezsl80Zn7D9bNk5IjFMjMLYlUMR1vVcF55UPe0eFce
+ZMWkOsdRxRl9oTYY9fYm3R6ph2gl7GaiDItTfr1NzIM8jwfGGQWD0bt8bMDzGGWh
+gcs/XUYbmYoisoC6LfElAmUktCvZH7H6nVNQjCWFtNcg5EWWAOwnmsUJiJSDbnaN
+XKmqzBCrtel4hJduXGG5aWwDGTJR9qvtKK/ROWho4RUNBBWg3UARCl0PGGcZ3oyE
+uAtbm5xGjmhi1H6JloAm/zQEABIYt8Jaqt6yn8pQWBoTWlt8cVSDCU63VnPC+v7v
+ydnweaLPuKT+tDekaoeLP5OhL72pp5Z+ncRQrbcy2p0XZSUTgUZ7l6LYIFMnDadU
+QRI/mXxtaDQOE6VzwH33GjXMMnweqiUYOWCfMBQUhLL5WGSs16lLOl1OdA7YCYEy
+uWQ8KewGkghcUM0HhTKI9FDAl4skMeDRpadtJmCttV0QpQpvf4AePzkzwZ+nq8Yg
+1scf3O/nVIK2xDxmOarCVVtI67DCkgEMg8evMy7Qi/6CA/arLz0IulczRAQ7+Xp/
+/iWowVbM5dRs9xOWMJLU4pU6EL2HFUyAbbXsvsJ3cX2z50i4yhFR6f9HipyK57M9
+CEReUkW8biwbdmxAl8fNNz9eBBiH0jgveSrTjMNqXwBBivwJHZrUlRmYRQARAQAB
+tCBWaWtyYW0gRGl4aXQgPHZpa3JhbUBhcGFjaGUub3JnPokCOAQTAQIAIgUCT/Hk
+2wIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQqbtzWodu9DvXXRAAsb/T
+N3u0rFaMqBre1+l53yRqUpSfFbgz83gZdJnT5UcqlPReyKKpFCDSkkFGL9jtre3z
+5NRKKyCnqkiWrfVWiBN4S30ByWo3nFY8Tjk2sve32PbHRzyPF7UiPoXf1DgNHGsu
+0FGjhHrNV8+Bfs11JCRAbZD3FMmplUTjxpRXgHnR3Rj1ZSfynDwNfdmFbwezZ7VB
+8VR4m/YYejU4q0YRhN8PkNqBuNJaNaSNJLr2f7MUVSJBfZ/Mim87o7/FM41QyZwC
+GYCJngTNwVYursbMrXwHlhFT4AdGZEzohTL95TGMGCz2ENddIJchhO94W/Qi7i++
+seDndFjucZC5f69f8ao/ztutdC8KFZm5TnrKOjs1j5L23PKENsgjuSDPLy+hFFSl
+wasQ2QF9XhCMd7X09ApvxUKig7uZqxHWIopIpJNiL2zMfykcNIbgGrq1YoMDJsAC
+TYuo9nlzG9MiYv6n+8waMgdISNh9S0fM2xqhTmtSkenh3gstpYbQrh9YIv8N//RO
+GselFgcefgo8f0cG8ke/LQXVMFaB7Rc0N48NvszHJdUh4+qFwcWLRCS12tLNK0Y6
+sRGCn+4AJucyLNE/TtNFqKyzsTkFaFiMhGJ+n/tC6t0ZXpVN9VOEhTXMBopBImrC
+jS2PuQl7wBt4vhq3YFJwvVnpTBcqeOND4ytLBAaJAhwEEAECAAYFAk/x5qIACgkQ
+vlqqC6IQwJWG4BAAoSPx09cWfTaOxdsXKIJH3rFyU1bMSBstAu3mE88DcnxSdcCl
+z00BKMSW9ErOMjGP5t3heQM3ojkF7Z5Gs+jLFRimWWztgo8n7Dr6Advs+vLRBc/z
+AXgfmCZlWwe+OiSQfu0WFjVCGPa9NYGv2X6b537XkCAkpfK0pCR3EHCzjt8DRyup
+Itllb6Yf4n0MoEBX+8XGGXpybpP5oy/xti96Qm/EtSIOdZTYUIEIHIgxqICKfa43
+WpL+Dw4uWMtrB4O7aUZ4bT7w/UKpSAyJgDf6SaDM9h/KZsNGPi5+YLNIVaL6XtHu
+nsn/MMVssE2UjFQf2kErkh5cfUqkRT9G7HCNVlrRFmaxTL5ZzVmqpgUnVmJSJeWU
+hyN7CM7UPY8NhZwgPr8T1/JA+L66bwdfx+WIHqFVwTf1qU2nLNFV8MDz2Tf2fgB4
+R4tEwF1jgNXmwaZqKNttR4EwROy0vv1vbxOfjtczrneiDBKKF+g/ygH8t6/q+t3p
+uvg+7M0wV6oPQgWqThTcA6geM+NJBa/pwUVJVPUDOiouybJpG4wacxYc39YXEgbQ
+K+tSOu37ETmmWcW9ytu4UdZSHn+L+hf9SznRd7hmUKi2AIf+8w7jXn2WRkB8FC/E
+1z+8oNLcJJaWfhfVLXrMPXsvirG7tfdPYmSDtUimbjIOQ6e7PzaO7l7mw+G5Ag0E
+T/Hk2wEQAKKeSNhAfQWWBRWm0T6dRk+oA3OZs6wNUWM138sFdivKGP+r6RotLjV6
+OKBJ1RlVMnCSdQ2e0UKcu+X4HfpFannbgAxbP33/B8xRZssIgGQ7yEimTyVcSWsf
+6gOFnN62ahvpU0BSdlFv0dxb6zgmJK9YWrWzPHDpu1EwPE6BbDGF81Hp1xTuiCkW
+Cy7ZD8tZne/mPksh1zePHL+e4QzScgEJFgfEDfwXNKnkFVoI7NkTfvDBQgZnVZeI
+QUQv5xItFidW8clJRJTuq3t6JVlrjSE/5fhrbCM77r1LK1G7FUfX222i1sPP5UZJ
+rYNpXoDSR4HKF31t3q0qWl8xWLOfYlL7ZkPTC4nunjQzLd2DCXX6Ej3pgP2m9KBw
+5L3kNvv9BUrHLHZHJ21kfxu0hhJ5vKDFtmHcx2K4pi4MqZ6TL5mzTwllevKbdfV3
+yITzFbWby4sEhPEpSCDTzbr4KO0SyaS26r5MmOy/OVArFrS+R88NkidTnoMMi7Rz
+fvnIm/6s0zfYw1/vH/ROaLKqAzXJ+GL8xpxP78TOoXdBXVxgAnXM2QwqKSjuYMbW
+sSF7RnJ/sUjHderRjFqEdxB+04SMT9rzmOgnd24+pvkioXQ3p1WzbHErBRVZOqGf
+DZLYFbWG6EimusKTHrwIIcrJ9QvMbu/ecIIVBv+ykAc62vUhrpRfABEBAAGJAh8E
+GAECAAkFAk/x5NsCGwwACgkQqbtzWodu9Dsw9xAAttckPz9L024vOlBhXNexWKX9
+UPkuR+3T8y0DqJryTMFt/3YEiQhmkc/0qywHUt9kO+J6cutTBjJfiZuOW8BBI5pl
+oE9rYLXpoamAOaE7ZzDq6/qKbo9f1Iz/H8SOh2qwrtfZT/8A66/mBJRL5j5H9RsD
++HYWHNEDs2Dp94RGI0k74NHJ5j0ui4e/X97D43Nk3Pvdo272dGAlF5r4dJ03MVYK
+HPa4NsscRFafjvzQEo7rIHJduplrllfEuqitJkVAZVwdecwZP0DSMCQCcP71CExN
+hYs8wkRjayfEDLU/KW3C31CS/9fJzNE+3NLDa4mcovK8nMm3T8+pz6wzYGo6nH0H
+P9AbVlLqhaG8MfSOG8p6MsxcfV+WP5ITVmisHy70v+pY2y/rPwZD74s2WxNt3mU2
+WQ9DhbbensIyZnrqmUOStipGy7diMS9wt6s34vYVfMQudpWDBlkZZP+1bXbbNkMy
+zWxIjrDoB4QNJAcUqdsPoYo26731gQhpfc0WRHnXWb4AX6nOCgCmCPpUnWB8fzzV
+q8NjvW09GT7Ls9llrf4IXG8kjX2PZRIfaGSa556PJjdD3xJWgTEP78i0zJTWQLku
+9Iu+B/4xqFS/wF4572o7n3K4b+zA3ZIN6h5SlT+DpMwy8+rymDgkf7Gk3whvl9Hq
+IWC8DKsT+np6ZGfoE58=
+=zb0j
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub   4096R/8EE2F25C 2011-10-24
+uid                  Mahadev Konar (CODE SIGNING KEY) <mahadev@apache.org>
+sub   4096R/1F35DF4C 2011-10-24
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: SKS 1.1.0
+
+mQINBE6lnjMBEADP0KP2/sPGL/wDz9a8zYB0z1jyMxCXg9s5FFRNEbzPb9UGfyPtvkhC4yay
+Mn72Adq4XO+PbEWCG/aNopGC28kFYOCFDurx8LkwIQQ7/JiBZymvEcUNhgP4NNK770MKUNqZ
+FD5XJGNqJXLbNhrzJxTeqmuqjFy8uwiIvB2DNKWeW5kpxuzI9sAYpDZ3VDLsxl2lFINk8+PZ
+Fc531iqL0lOBR1q6deBL6bAP4os/Qz3Tx58GBv5MS9yg6GjAubCHD9WokAoOw7DECROOsoZA
+WQo5rVCQC8h10Az7nzJSS1W/f7XcK7BkMMN2oNDG+VTDqaohw/cGfFiQMnoIdgD9PZHF9Z8O
+Um1jLKHjGFS/gQW0i1F7IlwCsWDa7xBnnWZpo2Y+QKbI4UBDL8yfss/3m82xKeeODn7mhXsj
+iVOohAWUHczv3QgsIB6VXjloO7FelnR0QUxqvOfBdpuApLa/eSBGz0WD/OMXRPzNMz9xNfGm
+zbJMWcCZbD/m/f8OQxEJq3Psg6j1cWtzgUgeB/E6wt5zd/6IJEt7yqa03iR+0F6lwcN5NE5b
+AnAIbQUWZNNeDCdGP9dyDHtl95ATzbZo1vNC70YGNR2Xolt2luZ3YcL3ml/enrEpbYuk7WFS
+BNPJNHe+Yz3+Z/TI+8X/4CrY4X4OZ/q2SPGllLZhf3Pjgnjp9QARAQABtDVNYWhhZGV2IEtv
+bmFyIChDT0RFIFNJR05JTkcgS0VZKSA8bWFoYWRldkBhcGFjaGUub3JnPokCHAQQAQoABgUC
+TqX/iAAKCRD60FwqxSaUuIOmEACJcD7+qzpWYUBjfbRtUUs8y2H7D286UkPprDmHof7Gp2L8
+T57jXans/g1cxBVX8YBBGV1LE3fIjpW02xFElGAKBJzlT8ovTHvJk2bDQTfjPeK8Kdd0a61x
+u2Zuij7YVkyIZzdTOTz+g/QeSzH60jkEg7z5n0pNwLUbF2q2mM2RYLaIwSuGyP1Z2Rx2kC7I
+9/v7/RHUuSjT1QKZSOLf7IRR+v8InlkYGKFGdpmr4Gc3CI7xTGP89nn7eV1Y4/s0vE2WqoPV
+Pt+NLbEoPTlM6EI4Ejuz5SwmcB6AvRt53haBpOyCyjz9jn+dgSSay/hBeTQzrtmWa5ZEx8gg
+RntGhHx3cnNfs4UrXXmZ79tLaEpjJf3TnRUeEI8rfapSmITq0ZudWk8oidHaoNqOgKJdTELd
+AS94Cx6flqHt/I1XiNH7+VwKLMLqQYYjZ9H8dJSbwVqGy9E2H2hkiSGIcaiv9UrFoakhoFyd
+TGMnH9e9Q/puDcEmCZBvHZJwLpmog/XorgoNH3aenOZvhMFEaSopXR4JWmLRkrdtF2mOa1N1
++6/NNuEIcGgswjIdVN0TCcns/yD7Oj3yiiLVpfZ4ropker75f6jKvOGyVCP8lrnw38lTPHgz
+xTI3Sw91dHeUEfloiIkzArOBeKXAG1lHvIVUydveYewcXl6nwQ3mU6r7ra17EIkCHAQQAQoA
+BgUCTqZWogAKCRASCefxPQySuRaQD/9Gq/qthzQjsVdNnFMNzcm14fQngyP0aJTHhSVAv+G5
+5pTdh8Xwgc4RNoNKzr3jCcRyDakGwkKgpcS86AbKnFXF4+bHftCEZvXGB2a2RxOmwaDt/cyR
+QzMW5FoxMFN3Wan9hIAMzya9KL6zf5Ok+IgBJ4vZvbtamkSCokLtVH74CvxvaoCiFcPLe7Io
+pkvmXXM2W25V3HC0hUG+e1DvJO4AFOnOluyxp56mLL2AoDxZKKCqq8GWYsn1wfE6YYeg3yzs
+FFpTM6D7YVhjz4LnGBkesbSUXffzR0O44CP/R94Rh4ZzisRFKB+2TivMtneWXZ+xIOgDoki5
+JnZezGeEADPjs+cXCZVlL7een8xVkexO42/NBRHFnXFmAfg8aEWC5ECIOYELJX6WKmRCVJZs
+uqbTNjUcdSLXXgG8Kk5Q1zPoCRvkICaQ0ZsMz/fop1zalsGau+GozicDD6ETW5yEC+ZejvOc
+zEEET0gTpWYUXE1b/SXw2/30X3MPj2XToj/5BYbMUvRLSF3D5srNYisDw/jXXxNkmNkklDsQ
+sIMfdyzh/8ePfBtw2ozn+QhYh1aq8PX4ykGbpiI4gIcPNIbiMJfb2Pza1PA+kdq3qF7EhVqm
+jrhIRUkoHs8AErPIAdaR91ZtOLKR7DZpzls7EYf1OYw4JnevRR9sLY/xHbZ0GNDpSokCOAQT
+AQIAIgUCTqWeMwIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQDf9JLY7i8lxC/Q//
+SDU98cEzcAOj+i4aM5HYQlixtX5twk/bc/UnteklN2Kj/v+a12JVZO/PUxEIl+Bjo2Vqg4i0
+lQq6izHQTSesy1X9/XB3KqKujfVzSxtxgJ8TadcqYF02jt+dtg9DxlnOSU6D+KJJiYXMCsWD
+7cRMA7IRckXjuM+4adfgrYKYbIQaxNkzgwuUVMf++iHo40jM/xz/GrWaqqLiKhoCKlCtoW7/
+TsA174BxbtcYo2733uAiGbV3fFjS6fJ5uF1qKNWqpnMIacVPj2K+rHXEbauYN76fCqjU8PsK
+oO/M4FjnQHXp2LfOaVEhg6NTI1/kwTuiNHartGzUPi2kEZALml3/H9rcaSDnN6iifynMgzHh
+gFg1HCticWHuDtdPSVUnGRvTZ6aLjNd2KdVezy8yOTh1UFVNwRC1uRMqGZilXXL3azhl29T3
+c1OLcK+/QdyRSo27Q55NdHR1F+UJy65o1w8VsXwO1FQ8GFWWCcGTmPMTj33on12ujImCrPEE
+NzZXbj4K1QvpnuIXN9mnK4YGzhdxCc0aPUwXn5bD/SMurKn0WR0MUC63N9dVzZYqJNQ0Uoq1
+HwJfXVaOYKIr46ca4R8QGsMWjMKm4Dfp9R95Tw4a79WL/GU1+fSejYcJL6uExLTNlMohQFEw
+3c2UvsPM2QeF3sWKCFLcc2sSN7EuZj2dXhO5Ag0ETqWeMwEQAO0w6KIsF8ktj1If0x4NoNdV
+RN8/zHuVbmCOxa9djvYQJVi+Rj1iBypH7ndTSPZc+61wARfBGolLZwOKSOy5OEeXFCHhI6Px
+ony0nD/r+t6IL5m1l8CJgzAL0H2cEiLCPP+PkDNfpAOeOnyD/jBzy6+sHR7bIUAilGeDGO9T
+V3PGrNhKGeXhqbCbTyNjrRA7wdFJzRU3UZr3Yh393BVLAhOvG1zvXWuOehNutfEJ5lpeblEV
+PN1ik46uRqXQIxn/2bqh7Y3PmZEPe0/X9p1AFqGKDOlbT4EmiSLzYEhtuyK3FgAZi7N4jkNC
+AaNO0o9obu22ZsVtXkEb77ilomgCUsUSll32YYJOJXE4Jkne4fb9F5WUxVmRPyI19X4v2kMO
+x8/DnH12vvZXO/9W3yL/PJSn00ZJYJ4ZgFw1XTMelZLSVm2ddZAuontWcBWFUh01/KWgCV5g
+oz7jwYmEfr+TAFpqG2EpG6h0f76AP0RdzB+BK/SmErauAR9p8Jjlc0G8U7iqi/qD78f88DPf
+G4PChir31Ir6h8yJUPWZawlwesYrMsTEXiQ1QZUjK+xGIHCJGLa0crzXRxeQC7rDe5s/gn/d
+/qUcqLc8MeiOYdJosag2XydE589Up3vHvsKlIzZcSwti5D0ygKO+T/T8PHBxLGGRrFzzxSEP
+iq8NsTCHU1HfABEBAAGJAh8EGAECAAkFAk6lnjMCGwwACgkQDf9JLY7i8lx/kw//Xm3rS74H
++mBGJVZVMn6MRnOzemPCgh7xe54mm/G2yZUf+xMfngL8axFEce7BeHSVTvmXnzGbcLgW0L4E
+uboSQE1xPbJgH91/6NS6qMXk4dqi71asSW4lEW73PcI6ndCoh7XIcBmD4jv78KktNThNIIW1
+eVj4EeCceJjQ3tkwyDeny5WYeS/qk8BFnBaDR8A6n5AToR/SdsDIFTbQlsaweFjDUt1fEr2x
+7euZm9VzblMXle0FCzNVrIHpi/uE+QuI1dbXKML06VhQ25FdBe3rKtYcxXs0+OumvdpKDP/b
+DhKGfjhYvsPlechIAUntL3QDQzGIJT8DaI1m+laSwL5hwULtNB1kWtXwsGzdnqKaGYVSfKzt
+LKq8cMsd1I+a1TMmF+G85Rs6/ehy0B3P7IA5z9HUd2/jWck6GyaXoek9gILMZRvJB/mW+rKR
+PDjMykmAhIrc7XeE91gH9mAuxRDfzRL8NSS6afjYE6YBFHuZvmfJD1PDyWo1Bx/hPIe0+7aT
+dBoiSpylJQu8D/iHUrPCqPKgcPk20I55bZi0A56mxKu0hTr6a1dFLXBKQdw668SilPzSKoYS
+M1W8Qb/rtySBESobUQYeTceavNl+hXBoGBDQXnSWSIHMFdVc+euwLPgHKVLiXqMtOha/XnE+
+7kmI6Gh95bLf1om+Bc8ajpm9Zsw=
+=OYSX
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub   4096R/3ABE18B3 2013-03-21
+uid                  Yusaku Sako (CODE SIGNING KEY) <yusaku@apache.org>
+sub   4096R/F74E5802 2013-03-21
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG/MacGPG2 v2.0.18 (Darwin)
+Comment: GPGTools - http://gpgtools.org
+
+mQINBFFKjM8BEACsAehi8WEZQP+PCWOIpHNjnDTWpw9sKysh67Hyl4WixfDpBFdH
+3C631y1FZU+XUD5v+mr0o3tS10zwq5KMIxE/roDZ4GZyQsGW2NPuZ4PN6rouMg7c
+jYMFcEemr/WrhSlaTnCwfyzxGGiSW7BWnEGjBsHnjzNVEQAjcxNTLeG1dDK6TqTN
+LRfNDliC+zFyBVs0jq6p/MLfxa9Y5U3CNTzcReax7txH/EaLNNUSnAWAodHiTXIE
+19OPjO9ivpq5xnsdbUp6EUp3O0wMkhZjsROT+cj/XyaoRu38kPMULC+YNQnrkiFs
+6bfx+yCaw3v8V6JB/pi0SFcLxIp5FxERgVaumQG8wcNQQW7JRayYmevj0r6mPJWS
++0ks5U/BE30MYBWk8YEfSBUXGbahBgSN9NbVGiUT2GNAz5Md1jyLqUcpt5oPHpMy
+WbbLhRtbI+VWEPoL42WX6G3nJXuBT85do15agycR9insW8y4jW5C8sAJsCJG2K2s
+9DB7W141W7isU6430c0KOLs0dGWbdWAcHiGGHmmvUvRBZGZzcD7EL45ODqzkTmUD
+Mkhg9ErvlhZBi81GqdHURj2QIjsustFgq2ZQjFRzS3Joc2yR057VpTfymqS2fUvs
+Zcw2tiA6SEm12X/W91fre0Uh/BbRIpJKW3LxdxKyGbotnY3w6BaAf/qJqwARAQAB
+tDJZdXNha3UgU2FrbyAoQ09ERSBTSUdOSU5HIEtFWSkgPHl1c2FrdUBhcGFjaGUu
+b3JnPokCNwQTAQIAIQIbAwIeAQIXgAUCUUqOXwULCQgHAwUVCgkICwUWAgMBAAAK
+CRBMOqJyOr4Ys0eUD/9n+aEFiWlCWJtO9aLLKaVXJ0f1IcMktChg6NeD6eNMGzMH
+Gc9W8XaTWT0M1Rfs5mJRqwEAFgDArYlbvlBeRxfs8P7gG15lSWUvEdsD3P+yt8CT
+qK5Z/gtwfQqvuokyPICrAqNuUcsHuixzc91poQjMfTDIVWwtIITWUR/bPrnwdZmd
+oF1yDoytMtktR9uJo3zLMj/u2BNO6A5brE2HMhZOfhnNQbbI4ckMPGigolt5JG/Y
+2797AZk+svF2E8dCCIifrigCMNqNhsQszeeAC8EsqUOLQod3j1vvPQlUuk/xrLnf
+YQlE1FsUzOHoPxP/PZNmUrdZ8bKgh1fo+ESyLnMdCzmnmiZex7l7/ifF7iVcV3MB
+or8WhwYtmMeCQXBT9+tXWEWtVaS6CLE1B9QLP12UdUhkKOgiPNRWXy7N3Bk3CnVn
+EAp0dpJy5qTyXeiYwntt7UV8JYwlt9QkgzptGFTi4SiA75Y2PutySWs2NyHD/cdl
+m+QySnLyn6evlVeCT48keFwNqTvQAiNlm7kEIvupkURjZXzuON9qpzQqkvEsRmfT
+HscUgztb4QWGJ2ONuWHGonn8/Q+ISh1pbq3u11m4Ak5lfZEH35dBpLWz3FfgH1Cs
+0CfW1Wam75/wk8UXCPDUD+MpNzNo2O7URivnd7co/e1+YuC1r61RnlWoUV7gf7kC
+DQRRSozPARAA2Fe1LNgN8q3fx1pbwZXA8wVds5ryVno3HAXf7tYvOSL7lHfYTSGj
+8KIucQA16vz11H2h3YagGkhc+JrdomwSdp1GupK2WcTr3ineS4zym0QXOJcfLKqo
+hkMCepDb4dIEPMqG/ACneSxSBzekT2FueIPFSk30Y2TqVMsiOR2ewo/FNQxLXrb7
+b7UdFotwiKfQRoedjATMZf8xXTfFrIDuyGZYUOfwH5BRAawVm96yfongp/6UYqdY
+h0Voo0f2kRGfORT+9D6Y7ezG9Y+RcxXxHrZe2Fzy0kiJ1/sXuvzEQdaeboi7g3Rs
+s8B94kvwrxfRdkB7XQzxc0IilUhfBbanRirpb3b+lTzeTrSBVf3kj5k5YlQeGr5n
+crzf5JfjuRyOZhpYZkSV2QPSjLqhuQY8S/TRQurd6DfNZDCaJIptZFIc4ucDcPn7
+KGh4mw93RB429aiv/JcuQ77HZgnxYLFhHhzXjPeMVKR9WTz4EXhuc87VB8yJuq+G
+AT0VBagqbSo5+Tako7fuSTagzGUOA0JE3XR8kfXptuwF4kYc5/WxHWsGR/Eti6ib
+9ls1mKD/13yT4rdWSyBo1WLlk8q/g5tTwy471OcHuzouj4je74zzC/xcURnMHM91
+y6PsG1ECAq0rmeGTQxjuLaOaJfj3NAGRMHgpWW6ar2ULL/zKep3am+8AEQEAAYkC
+HwQYAQIACQUCUUqMzwIbDAAKCRBMOqJyOr4YswWWD/9QFyHVOC9yvjY9TI66zfgj
+g1scibK/x41DqMeo3h8F+lkmydrsWBXny3WUv2g9MqliOXsMDZiVs1oHIis1JJgj
+YcvTP6AXQExd+WFOxzW+tZT0ZlJlQ32nrjGvnsCpGEOKMzDf1xLQZ+0VGQpVsgk5
+zQQ6AxRwXB3diYDHRsC6z1seGF8CJgmIRP+Fd5HehvLiyKI+87slUEsMIgSpmg/X
+G2xhXjiLFpR902BWECFJOfpYg3PG+7k5WfcVLdvPvt0WoYal+Wiv8bKqroC73gwA
+avcJWATNpaNvl8sZyaEIh2tHDmLudBxz1F7aloyIymAoWtqNfuHlrlfq3bPrj/tU
+7u8qxzPiPdtSl8d57B8EXShXN4HW+oSyWGbZ17jJEt+xWlE+bwHmW1hXSeU6h4e9
+NK6910/cgyJq1VgAeE5HG82cR7W+2oOaDLs3Eu71X3bphVS7zlD56GviRnZFyjM3
+6FtAsKKyeEQLzwEzTGMGijgaQfre/Fy7XrsaEcSEDTFWEs1oFtQm0Ny4auC2Tr+z
+VCaqH5uFM5jJUgxSBK30zn/MqT3u48/Ow3mTdoMEudaw7pffe4TavSHGSeh25iuk
+pE7N7T0mS0q7k/k2yMjcc5D6sWqnWPqjxNvgVkz4OZx/Zii8oAYqIoMStZFiNNAd
+Ww+qLwIsYPC6XQNrVJpbzg==
+=dBi7
+-----END PGP PUBLIC KEY BLOCK-----
+

+ 262 - 0
LICENSE.txt

@@ -0,0 +1,262 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE AMBARI SUBCOMPONENTS:
+
+The Apache Ambari project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the stdlib in puppet modules 
+
+Copyright (C) 2011 Puppet Labs Inc
+
+and some parts:
+
+Copyright (C) 2011 Krzysztof Wilczynski
+
+Puppet Labs can be contacted at: info@puppetlabs.com
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+For ambari-common/src/test/python:
+
+Copyright (c) 2003-2012, Michael Foord
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+

+ 10 - 0
NOTICE.txt

@@ -0,0 +1,10 @@
+Apache Ambari
+Copyright 2011-2013 The Apache Software Foundation
+
+This product includes software developed at The Apache Software
+Foundation (http://www.apache.org/).
+
+Component ambari-common/src/test/python are under the following copyright:
+
+Copyright (c) 2003-2012, Michael Foord
+All rights reserved.

+ 172 - 0
ambari-agent/conf/unix/ambari-agent

@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+# description: ambari-agent daemon
+# processname: ambari-agent
+
+# /etc/init.d/ambari-agent
+
+export PATH=/usr/lib/ambari-server/*:$PATH
+export AMBARI_CONF_DIR=/etc/ambari-server/conf:$PATH
+
+AMBARI_AGENT=ambari-agent
+PIDFILE=/var/run/ambari-agent/$AMBARI_AGENT.pid
+LOGFILE=/var/log/ambari-agent/ambari-agent.out
+AGENT_SCRIPT=/usr/lib/python2.6/site-packages/ambari_agent/main.py
+OK=1
+NOTOK=0
+
+
+if [ -a /usr/bin/python2.6 ]; then
+  PYTHON=/usr/bin/python2.6
+fi
+
+if [ "x$PYTHON" == "x" ]; then
+  PYTHON=/usr/bin/python
+fi
+
+# Trying to read the passphrase from an environment
+if [ ! -z $AMBARI_PASSPHRASE ]; then
+  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
+fi
+
+# Reading the environment file
+if [ -a /var/lib/ambari-agent/ambari-env.sh ]; then
+  . /var/lib/ambari-agent/ambari-env.sh
+fi
+
+if [ -z $RESOLVED_AMBARI_PASSPHRASE ] &&  [ ! -z $AMBARI_PASSPHRASE ]; then
+  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
+  # If the passphrase is not defined yet, use the value from the env file
+elif [ -z $RESOLVED_AMBARI_PASSPHRASE ]; then
+  # Passphrase is not defined anywhere, set the default value
+  RESOLVED_AMBARI_PASSPHRASE="DEV"
+fi
+
+export AMBARI_PASSPHRASE=$RESOLVED_AMBARI_PASSPHRASE
+
+#echo $AMBARI_PASSPHRASE
+
+# check for version
+check_python_version ()
+{
+  echo "Verifying Python version compatibility..."
+  majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
+  minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
+  numversion=$(( 10 * $majversion + $minversion))
+  if (( $numversion < 26 )); then
+    echo "ERROR: Found Python version $majversion.$minversion. Ambari Agent requires Python version > 2.6"
+    return $NOTOK
+  fi
+  echo "Using python " $PYTHON
+  return $OK
+}
+
+case "$1" in
+  start)
+        check_python_version
+        if [ "$?" -eq "$NOTOK" ]; then
+          exit -1
+        fi
+        echo "Checking for previously running Ambari Agent..."
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            echo "$PIDFILE found with no process. Removing $PID..."
+            rm -f $PIDFILE
+          else
+            tput bold
+            echo "ERROR: $AMBARI_AGENT already running"
+            tput sgr0
+            echo "Check $PIDFILE for PID."
+            exit -1
+          fi
+        fi
+        echo "Starting ambari-agent"
+        nohup $PYTHON $AGENT_SCRIPT > $LOGFILE 2>&1 &
+        sleep 2
+        PID=$!
+        echo "Verifying $AMBARI_AGENT process status..."
+        if [ -z "`ps ax -o pid | grep $PID`" ]; then
+          echo "ERROR: $AMBARI_AGENT start failed for unknown reason"
+          exit -1
+        fi
+        tput bold
+        echo "Ambari Agent successfully started"
+        tput sgr0
+        echo "Agent PID at: $PIDFILE"
+        echo "Agent log at: $LOGFILE"
+        ;;
+  status)
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          echo "Found $AMBARI_AGENT PID: $PID"
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            echo "$AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
+          else
+            tput bold
+            echo "$AMBARI_AGENT running."
+            tput sgr0
+            echo "Agent PID at: $PIDFILE"
+            echo "Agent log at: $LOGFILE"
+          fi
+        else
+          tput bold
+          echo "$AMBARI_AGENT currently not running"
+          tput sgr0
+          echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
+        fi
+        ;;
+  stop)
+        check_python_version
+        if [ "$?" -eq "$NOTOK" ]; then
+          exit -1
+        fi
+        if [ -f $PIDFILE ]; then
+          PID=`cat $PIDFILE`
+          echo "Found $AMBARI_AGENT PID: $PID"
+          if [ -z "`ps ax -o pid | grep $PID`" ]; then
+            tput bold
+            echo "ERROR: $AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
+            tput sgr0
+          else
+            echo "Stopping $AMBARI_AGENT"
+            $PYTHON $AGENT_SCRIPT stop
+          fi
+          echo "Removing PID file at $PIDFILE"
+          rm -f $PIDFILE
+          tput bold
+          echo "$AMBARI_AGENT successfully stopped"
+          tput sgr0
+        else
+          tput bold
+          echo "$AMBARI_AGENT is not running. No PID found at $PIDFILE"
+          tput sgr0
+        fi
+        ;;
+  restart)
+        echo -e "Restarting $AMBARI_AGENT"
+        $0 stop
+        $0 start
+        ;;     
+  *)
+        tput bold
+        echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
+        tput sgr0
+        exit 1
+esac
+
+exit 0

+ 55 - 0
ambari-agent/conf/unix/ambari-agent.ini

@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=/var/lib/ambari-agent/data
+;loglevel=(DEBUG/INFO)
+loglevel=INFO
+
+[stack]
+installprefix=/var/ambari-agent/
+upgradeScriptsDir=/var/lib/ambari-agent/upgrade_stack
+
+[puppet]
+puppetmodules=/var/lib/ambari-agent/puppet
+ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370
+puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9
+facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=/var/lib/ambari-agent/keys
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[services]
+pidLookupPath=/var/run/
+
+[heartbeat]
+state_interval=6
+dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie,
+  /etc/sqoop,/etc/ganglia,/etc/nagios,
+  /var/run/hadoop,/var/run/zookeeper,/var/run/hbase,/var/run/templeton,/var/run/oozie,
+  /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive,
+  /var/log/nagios
+rpms=nagios,ganglia,
+  hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j

+ 18 - 0
ambari-agent/conf/unix/ambari-env.sh

@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# To change a passphrase used by the agent adjust the line below. This value is used when no passphrase is
+# given through environment variable
+AMBARI_PASSPHRASE="DEV"

+ 41 - 0
ambari-agent/etc/init.d/ambari-agent

@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+# chkconfig: 345 20 80
+# description: ambari-agent daemon
+# processname: ambari-agent
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+case "$1" in
+  start)
+        /usr/sbin/ambari-agent $@
+        ;;
+  stop)
+        /usr/sbin/ambari-agent $@
+        ;;
+  status)
+        /usr/sbin/ambari-agent $@
+        ;;
+  restart)
+        $0 stop
+        $0 start
+        ;;
+  *)
+        echo "Usage: $0 {start|stop|status|restart}"
+        exit 1
+esac
+
+exit 0

+ 363 - 0
ambari-agent/pom.xml

@@ -0,0 +1,363 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.ambari</groupId>
+    <artifactId>ambari-project</artifactId>
+    <version>1.3.0-SNAPSHOT</version>
+    <relativePath>../ambari-project</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari</groupId>
+  <artifactId>ambari-agent</artifactId>
+  <packaging>pom</packaging>
+  <version>1.3.0-SNAPSHOT</version>
+  <name>Ambari Agent</name>
+  <description>Ambari Agent</description>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <final.name>${project.artifactId}-${project.version}</final.name>
+    <package.release>1</package.release>
+    <package.prefix>/usr</package.prefix>
+    <package.log.dir>/var/log/ambari-agent</package.log.dir>
+    <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
+    <skipTests>false</skipTests>
+    <facter.tar>http://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gz</facter.tar>
+    <puppet.tar>http://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz</puppet.tar>
+    <install.dir>/usr/lib/python2.6/site-packages/ambari_agent</install.dir>
+    <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz</ruby.tar>
+    <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
+    <python.ver>python &gt;= 2.6</python.ver>
+  </properties>
+  <profiles>
+    <profile>
+      <id>suse11</id>
+      <properties>
+        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11/ruby-1.8.7-p370.tar.gz</ruby.tar>
+      </properties>
+    </profile>
+    <profile>
+      <id>centos5</id>
+      <properties>
+        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5/ruby-1.8.7-p370.tar.gz</ruby.tar>
+      </properties>
+    </profile>
+  </profiles>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/packages/tarball/all.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2</version>
+        <executions>
+          <execution>
+            <configuration>
+              <executable>python2.6</executable>
+              <workingDirectory>src/test/python</workingDirectory>
+              <arguments>
+                <argument>unitTests.py</argument>
+              </arguments>
+              <environmentVariables>
+                <PYTHONPATH>${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python/ambari_agent:$PYTHONPATH</PYTHONPATH>
+              </environmentVariables>
+              <skip>${skipTests}</skip>
+            </configuration>
+            <id>python-test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+          <execution>
+            <configuration>
+              <executable>python2.6</executable>
+              <workingDirectory>target/ambari-agent-${project.version}</workingDirectory>
+              <arguments>
+                <argument>${project.basedir}/src/main/python/setup.py</argument>
+                <argument>clean</argument>
+                <argument>bdist_dumb</argument>
+              </arguments>
+              <environmentVariables>
+                <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
+              </environmentVariables>
+            </configuration>
+            <id>python-package</id>
+            <phase>package</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <!-- unbinds rpm creation from maven lifecycle -->
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <copyright>2012, Apache Software Foundation</copyright>
+          <group>Development</group>
+          <description>Maven Recipe: RPM Package.</description>
+          <requires>
+            <require>openssl</require>
+            <require>zlib</require>
+            <require>${python.ver}</require>
+          </requires>
+          <postinstallScriptlet>
+            <scriptFile>src/main/package/rpm/postinstall.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </postinstallScriptlet>
+          <preinstallScriptlet>
+            <scriptFile>src/main/package/rpm/preinstall.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </preinstallScriptlet>
+          <preremoveScriptlet>
+            <scriptFile>src/main/package/rpm/preremove.sh</scriptFile>
+            <fileEncoding>utf-8</fileEncoding>
+          </preremoveScriptlet>
+
+          <needarch>x86_64</needarch>
+          <autoRequires>false</autoRequires>
+          <mappings>
+            <mapping>
+              <directory>${install.dir}</directory>
+              <sources>
+                <source>
+                  <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>${lib.dir}</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>${project.build.directory}/lib</location>
+                </source>
+              </sources>
+            </mapping>
+             <mapping>
+              <directory>${lib.dir}/examples</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>src/examples</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/puppet</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>src/main/puppet</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/upgrade_stack</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>src/main/upgrade_stack</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/etc/ambari-agent/conf</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-agent.ini</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/usr/sbin</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-agent</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/ambari-agent</directory>
+              <filemode>700</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>conf/unix/ambari-env.sh</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>${package.pid.dir}</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/data</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/keys</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>${package.log.dir}</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>/var/ambari-agent</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+            </mapping>
+            <mapping>
+              <directory>/etc/rc.d/init.d</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>etc/init.d/ambari-agent</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
+              <directory>/var/lib/${project.artifactId}/data</directory>
+              <filemode>755</filemode>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>../version</location>
+                </source>
+              </sources>
+            </mapping>
+            <!-- -->
+          </mappings>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.github.goldin</groupId>
+        <artifactId>copy-maven-plugin</artifactId>
+        <version>0.2.5</version>
+        <executions>
+          <execution>
+            <id>create-archive</id>
+            <phase>package</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <resources>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${ruby.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${facter.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+                <resource>
+                  <targetPath>${project.build.directory}/lib</targetPath>
+                  <file>${puppet.tar}</file>
+                  <unpack>true</unpack>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/examples/*</exclude>
+            <exclude>src/test/python/dummy*.txt</exclude>
+            <exclude>src/main/python/ambari_agent/imports.txt</exclude>
+            <exclude>src/main/puppet/modules/stdlib/**</exclude>
+            <exclude>**/*.erb</exclude>
+            <exclude>**/*.json</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+    </plugins>
+    <extensions>
+      <extension>
+        <groupId>org.apache.maven.wagon</groupId>
+        <artifactId>wagon-ssh-external</artifactId>
+      </extension>
+    </extensions>
+  </build>
+</project>

+ 36 - 0
ambari-agent/src/examples/query_with3jobs.txt

@@ -0,0 +1,36 @@
+SELECT 
+   i_item_id,
+   s_state, 
+   avg(ss_quantity) agg1,
+   avg(ss_list_price) agg2,
+   avg(ss_coupon_amt) agg3,
+   avg(ss_sales_price) agg4
+FROM
+   (SELECT /*+ MAPJOIN(item) */ i_item_id, s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price
+   FROM item
+   JOIN
+      (SELECT /*+ MAPJOIN(customer_demographics) */ s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk
+      FROM customer_demographics
+      JOIN
+         (SELECT /*+ MAPJOIN(store) */ s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk
+         FROM store
+         JOIN
+            (SELECT /*+ MAPJOIN(date_dim) */ ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk, ss_store_sk
+             FROM date_dim
+             JOIN store_sales
+	     ON (store_sales.ss_sold_date_sk = date_dim.d_date_sk) WHERE d_year = 2002) a
+         ON (a.ss_store_sk = store.s_store_sk)
+         WHERE s_state in ('TN', 'SD')) b
+      ON (b.ss_cdemo_sk = customer_demographics.cd_demo_sk)
+      WHERE
+         cd_gender = 'M' and
+         cd_marital_status = 'S' and
+         cd_education_status = 'College') c
+   ON (c.ss_item_sk = item.i_item_sk)) d
+GROUP BY
+   i_item_id,
+   s_state
+ORDER BY
+   i_item_id,
+   s_state
+LIMIT 100;

+ 35 - 0
ambari-agent/src/examples/query_with6jobs.txt

@@ -0,0 +1,35 @@
+SELECT 
+   i_item_id,
+   s_state, 
+   avg(ss_quantity) agg1,
+   avg(ss_list_price) agg2,
+   avg(ss_coupon_amt) agg3,
+   avg(ss_sales_price) agg4
+FROM
+   (SELECT i_item_id, s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price
+   FROM item
+   JOIN
+      (SELECT  s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk
+      FROM customer_demographics
+      JOIN
+         (SELECT  s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk
+         FROM store
+         JOIN
+            (SELECT  ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk, ss_store_sk
+             FROM date_dim
+             JOIN store_sales  ON (store_sales.ss_sold_date_sk = date_dim.d_date_sk) WHERE d_year = 2002) a
+         ON (a.ss_store_sk = store.s_store_sk)
+         WHERE s_state in ('TN', 'SD')) b
+      ON (b.ss_cdemo_sk = customer_demographics.cd_demo_sk)
+      WHERE
+         cd_gender = 'M' and
+         cd_marital_status = 'S' and
+         cd_education_status = 'College') c
+   ON (c.ss_item_sk = item.i_item_sk)) d
+GROUP BY
+   i_item_id,
+   s_state
+ORDER BY
+   i_item_id,
+   s_state
+LIMIT 100;

+ 226 - 0
ambari-agent/src/examples/tpcds_ss_tables.sql

@@ -0,0 +1,226 @@
+create  table store_sales
+(
+    ss_sold_date_sk           int,
+    ss_sold_time_sk           int,
+    ss_item_sk                int,
+    ss_customer_sk            int,
+    ss_cdemo_sk               int,
+    ss_hdemo_sk               int,
+    ss_addr_sk                int,
+    ss_store_sk               int,
+    ss_promo_sk               int,
+    ss_ticket_number          int,
+    ss_quantity               int,
+    ss_wholesale_cost         float,
+    ss_list_price             float,
+    ss_sales_price            float,
+    ss_ext_discount_amt       float,
+    ss_ext_sales_price        float,
+    ss_ext_wholesale_cost     float,
+    ss_ext_list_price         float,
+    ss_ext_tax                float,
+    ss_coupon_amt             float,
+    ss_net_paid               float,
+    ss_net_paid_inc_tax       float,
+    ss_net_profit             float                  
+)
+row format delimited fields terminated by '|' ;
+
+create  table customer_demographics
+(
+    cd_demo_sk                int,
+    cd_gender                 string,
+    cd_marital_status         string,
+    cd_education_status       string,
+    cd_purchase_estimate      int,
+    cd_credit_rating          string,
+    cd_dep_count              int,
+    cd_dep_employed_count     int,
+    cd_dep_college_count      int 
+)
+row format delimited fields terminated by '|' ;
+
+create  table date_dim
+(
+    d_date_sk                 int,
+    d_date_id                 string,
+    d_date                    timestamp,
+    d_month_seq               int,
+    d_week_seq                int,
+    d_quarter_seq             int,
+    d_year                    int,
+    d_dow                     int,
+    d_moy                     int,
+    d_dom                     int,
+    d_qoy                     int,
+    d_fy_year                 int,
+    d_fy_quarter_seq          int,
+    d_fy_week_seq             int,
+    d_day_name                string,
+    d_quarter_name            string,
+    d_holiday                 string,
+    d_weekend                 string,
+    d_following_holiday       string,
+    d_first_dom               int,
+    d_last_dom                int,
+    d_same_day_ly             int,
+    d_same_day_lq             int,
+    d_current_day             string,
+    d_current_week            string,
+    d_current_month           string,
+    d_current_quarter         string,
+    d_current_year            string 
+)
+row format delimited fields terminated by '|' ;
+
+create  table time_dim
+(
+    t_time_sk                 int,
+    t_time_id                 string,
+    t_time                    int,
+    t_hour                    int,
+    t_minute                  int,
+    t_second                  int,
+    t_am_pm                   string,
+    t_shift                   string,
+    t_sub_shift               string,
+    t_meal_time               string
+)
+row format delimited fields terminated by '|' ;
+
+create  table item
+(
+    i_item_sk                 int,
+    i_item_id                 string,
+    i_rec_start_date          timestamp,
+    i_rec_end_date            timestamp,
+    i_item_desc               string,
+    i_current_price           float,
+    i_wholesale_cost          float,
+    i_brand_id                int,
+    i_brand                   string,
+    i_class_id                int,
+    i_class                   string,
+    i_category_id             int,
+    i_category                string,
+    i_manufact_id             int,
+    i_manufact                string,
+    i_size                    string,
+    i_formulation             string,
+    i_color                   string,
+    i_units                   string,
+    i_container               string,
+    i_manager_id              int,
+    i_product_name            string
+)
+row format delimited fields terminated by '|' ;
+
+create  table store
+(
+    s_store_sk                int,
+    s_store_id                string,
+    s_rec_start_date          timestamp,
+    s_rec_end_date            timestamp,
+    s_closed_date_sk          int,
+    s_store_name              string,
+    s_number_employees        int,
+    s_floor_space             int,
+    s_hours                   string,
+    s_manager                 string,
+    s_market_id               int,
+    s_geography_class         string,
+    s_market_desc             string,
+    s_market_manager          string,
+    s_division_id             int,
+    s_division_name           string,
+    s_company_id              int,
+    s_company_name            string,
+    s_street_number           string,
+    s_street_name             string,
+    s_street_type             string,
+    s_suite_number            string,
+    s_city                    string,
+    s_county                  string,
+    s_state                   string,
+    s_zip                     string,
+    s_country                 string,
+    s_gmt_offset              float,
+    s_tax_precentage          float                  
+)
+row format delimited fields terminated by '|' ;
+
+create  table customer
+(
+    c_customer_sk             int,
+    c_customer_id             string,
+    c_current_cdemo_sk        int,
+    c_current_hdemo_sk        int,
+    c_current_addr_sk         int,
+    c_first_shipto_date_sk    int,
+    c_first_sales_date_sk     int,
+    c_salutation              string,
+    c_first_name              string,
+    c_last_name               string,
+    c_preferred_cust_flag     string,
+    c_birth_day               int,
+    c_birth_month             int,
+    c_birth_year              int,
+    c_birth_country           string,
+    c_login                   string,
+    c_email_address           string,
+    c_last_review_date        string
+)
+row format delimited fields terminated by '|' ;
+
+create  table promotion
+(
+    p_promo_sk                int,
+    p_promo_id                string,
+    p_start_date_sk           int,
+    p_end_date_sk             int,
+    p_item_sk                 int,
+    p_cost                    float,
+    p_response_target         int,
+    p_promo_name              string,
+    p_channel_dmail           string,
+    p_channel_email           string,
+    p_channel_catalog         string,
+    p_channel_tv              string,
+    p_channel_radio           string,
+    p_channel_press           string,
+    p_channel_event           string,
+    p_channel_demo            string,
+    p_channel_details         string,
+    p_purpose                 string,
+    p_discount_active         string 
+)
+row format delimited fields terminated by '|' ;
+
+create  table household_demographics
+(
+    hd_demo_sk                int,
+    hd_income_band_sk         int,
+    hd_buy_potential          string,
+    hd_dep_count              int,
+    hd_vehicle_count          int
+)
+row format delimited fields terminated by '|' ;
+
+create  table customer_address
+(
+    ca_address_sk             int,
+    ca_address_id             string,
+    ca_street_number          string,
+    ca_street_name            string,
+    ca_street_type            string,
+    ca_suite_number           string,
+    ca_city                   string,
+    ca_county                 string,
+    ca_state                  string,
+    ca_zip                    string,
+    ca_country                string,
+    ca_gmt_offset             float,
+    ca_location_type          string
+)
+row format delimited fields terminated by '|' ;
+

+ 27 - 0
ambari-agent/src/main/package/rpm/postinstall.sh

@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+if [ -f $BAK ];
+then
+  SERV_HOST=`grep -e hostname\s*= $BAK | sed -r -e 's/hostname\s*=//' -e 's/\./\\\./g'`
+  sed -i -r -e "s/(hostname\s*=).*/\1$SERV_HOST/" $ORIG
+  rm $BAK -f
+fi
+exit 0

+ 24 - 0
ambari-agent/src/main/package/rpm/preinstall.sh

@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+getent group puppet >/dev/null || groupadd -r puppet
+getent passwd puppet >/dev/null || /usr/sbin/useradd -g puppet puppet
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+[ -f $ORIG ] && mv -f $ORIG $BAK
+
+exit 0

+ 25 - 0
ambari-agent/src/main/package/rpm/preremove.sh

@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# WARNING: This script is performed not only on uninstall, but also
+# during package update. See http://www.ibm.com/developerworks/library/l-rpm2/
+# for details
+
+if [ "$1" -eq 0 ]; # Action is uninstall
+then
+    mv /etc/ambari-agent/conf /etc/ambari-agent/conf.save
+fi
+
+exit 0

+ 48 - 0
ambari-agent/src/main/puppet/manifestloader/site.pp

@@ -0,0 +1,48 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class manifestloader () {
+    file { '/etc/puppet/agent/modules.tgz':
+      ensure => present,
+      source => "puppet:///modules/catalog/modules.tgz",  
+      mode => '0755',
+    }
+
+    exec { 'untar_modules':
+      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
+      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    } 
+
+    exec { 'puppet_apply':
+      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
+      timeout   => 1800,
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true"
+    }
+
+    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
+}
+
+node default {
+ stage{1 :}
+ class {'manifestloader': stage => 1}
+}
+

+ 68 - 0
ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp

@@ -0,0 +1,68 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# Generates xml configs from the given key-value hash maps
+#
+# Config file format:
+#
+# <configuration>
+#   <property>
+#     <name>name1</name><value>value1</value>
+#   </property>
+#     ..
+#   <property>
+#     <name>nameN</name><value>valueN</value>
+#   </property>
+# </configuration>
+#
+# Params:
+# - configname - name of the config file (class title by default)
+# - modulespath - modules path ('/etc/puppet/modules' by default)
+# - module - module name
+# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
+#
+# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
+#
+
+define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration, $owner = "root", $group = "root", $mode = undef) {
+  $configcontent = inline_template('<!--<%=Time.now.asctime %>-->
+  <configuration>
+  <% configuration.each do |key,value| -%>
+  <property>
+    <name><%=key %></name>
+    <value><%=value %></value>
+  </property>
+  <% end -%>
+</configuration>')
+ 
+
+debug("Generating config: ${modulespath}/${filename}")
+
+file {"${modulespath}/${filename}":
+  ensure  => present,
+  content => $configcontent,
+  path => "${modulespath}/${filename}",
+  owner => $owner,
+  group => $group,
+  mode => $mode
+}
+} 

+ 23 - 0
ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp

@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class configgenerator() {
+}

+ 21 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp

@@ -0,0 +1,21 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard::dashboard::service_check(){}

+ 76 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp

@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard(
+  $service_state = $hdp::params::cluster_client_state,
+  $opts = {}
+) inherits hdp-dashboard::params
+{
+   if ($service_state == 'no_op') {
+   } elsif ($service_state == 'uninstalled') {
+    hdp::package { 'dashboard' :
+      ensure => 'uninstalled',
+      java_needed => 'false',
+      size   => 64
+    }
+    hdp::directory_recursive_create { $conf_dir :
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir]
+
+   } elsif ($service_state in ['running','installed_and_configured','stopped']) {
+      hdp::package { 'dashboard' :
+        java_needed => 'false',
+        size => 64
+       }
+     $conf_dir =  $hdp-dashboard::params::conf_dir
+  
+     hdp::directory_recursive_create { $conf_dir :
+       service_state => $service_state,
+       force => true
+     }
+ 
+     hdp-dashboard::configfile { 'cluster_configuration.json' : }
+     Hdp-Dashboard::Configfile<||>{dashboard_host => $hdp::params::host_address}
+  
+     #top level does not need anchors
+     Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir] -> Hdp-Dashboard::Configfile<||> 
+    } else {
+     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+   }
+}
+
+###config file helper
+define hdp-dashboard::configfile(
+  $dashboard_host = undef
+)
+{
+  
+  hdp::configfile { "${hdp-dashboard::params::conf_dir}/${name}":
+    component      => 'dashboard',
+    owner          => root,
+    group          => root,
+    dashboard_host => $dashboard_host
+  }
+}
+
+

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp

@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard::params()
+{
+  
+  $conf_dir = "/usr/share/hdp/dashboard/dataServices/conf/" #cannot change since hard coded in rpm
+
+  $hdp_cluster_name = hdp_default("hadoop/cluster_configuration/hdp_cluster_name")
+  $scheduler_name = hdp_default("hadoop/cluster_configuration/scheduler_name")
+}

+ 97 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb

@@ -0,0 +1,97 @@
+{
+  "config_version": 1,
+  "stack_version": "1.0.2",
+  "overall": {
+    "cluster_name": "<%=scope.function_hdp_template_var("hdp_cluster_name")%>",
+    "dashboard_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
+    "dashboard_port": 80,
+    "dataservices_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
+    "dataservices_port": 80,
+    "ganglia" : {
+      "web_host": "<%=scope.function_hdp_host("public_ganglia_server_host")%>",
+      "web_port": 80,
+      "web_root": "/ganglia/?t=yes",
+      "grid_name": "HDP_GRID"
+    },
+    "nagios": {
+      "nagiosserver_host": "<%=scope.function_hdp_host("public_nagios_server_host")%>",
+      "nagiosserver_port": 80,
+      "web_root": "/nagios"
+    },
+    "jmx": {
+      "timeout": 3
+    },
+    "services": {
+	  "HDFS" : [
+        {
+          "installed": true,
+          "name": "HDFS",
+          "namenode_host": "<%=scope.function_hdp_host("public_namenode_host")%>",
+          "namenode_port": 50070,
+          "snamenode_host": "<%=scope.function_hdp_host("public_snamenode_host")%>",
+          "snamenode_port": 50090,
+          "total_datanodes": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "namenode": "HDPNameNode"
+          }
+        }
+      ],
+      "MAPREDUCE" : [
+        {
+          "installed": true,
+          "name": "MAPREDUCE",
+          "jobtracker_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
+          "jobtracker_port": 50030,
+          "total_tasktrackers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "jobhistory_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
+          "jobhistory_port": 51111,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "jobtracker": "HDPJobTracker"
+          },
+          "scheduler_type": "<%=scope.function_hdp_template_var("scheduler_name")%>"
+        }
+      ],
+      "HBASE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_hbase_master_hosts")%>,
+          "name": "HBASE",
+          "hbasemasters_hosts": "<%=scope.function_hdp_host("public_hbase_master_hosts")%>",
+          "hbasemasters_port": 60010,
+          "total_regionservers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "hbasemasters": "HDPHBaseMaster"
+          }
+        }
+      ],
+      "ZOOKEEPER" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_zookeeper_hosts")%>,
+          "name": "ZOOKEEPER"
+        }
+      ],
+      "HIVE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_hive_server_host")%>,
+          "name": "HIVE"
+        }
+      ],
+      "TEMPLETON" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_webhcat_server_host")%>,
+          "name": "TEMPLETON"
+        }
+      ],
+      "OOZIE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_oozie_server")%>,
+          "name": "OOZIE",
+          "oozie_host": "<%=scope.function_hdp_host("public_oozie_server")%>",
+          "oozie_port": 11000
+        }
+      ]
+    }
+  }
+}

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh

@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

+ 34 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh

@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init

@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

+ 196 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh

@@ -0,0 +1,196 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init

@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

+ 540 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh

@@ -0,0 +1,540 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

+ 170 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py

@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+import os
+import rrdtool
+import sys
+import time
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+    args.extend(["-s", start])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    for tuple in rrdMetric[2]:
+      if tuple[0] is not None:
+        sys.stdout.write(str(tuple[0]))
+        sys.stdout.write("\n")
+  else:
+    value = None
+    idx   = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx-=1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[AMBARI_DP_END]\n")
+  return
+
+def stripList(l):
+  return([x.strip() for x in l])
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+host_metrics = ["boottime", "bytes_in", "bytes_out", "cpu_aidle", "cpu_idle",
+                "cpu_nice", "cpu_num", "cpu_speed", "cpu_system", "cpu_user",
+                "cpu_wio", "disk_free", "disk_total", "load_fifteen", "load_five",
+                "load_one", "mem_buffers", "mem_cached", "mem_free", "mem_shared",
+                "mem_total", "part_max_used", "pkts_in", "pkts_out", "proc_run",
+                "proc_total", "swap_free", "swap_total"]
+
+for cluster in clusterParts:
+  for path, dirs, files in os.walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    if len(hostParts) == 0 or pathParts[-1] in hostParts:
+      for file in files:
+        for metric in metricParts:
+          if file.endswith(metric + ".rrd"):
+            if not (metric in host_metrics):
+              printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
+            else:
+              if (cluster == "HDPSlaves"):
+                 printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
+                
+sys.stdout.write("[AMBARI_END]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

+ 47 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh

@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

+ 141 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh

@@ -0,0 +1,141 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

+ 57 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh

@@ -0,0 +1,57 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        gmetadRunningPid=`getGmetadRunningPid`;
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

+ 73 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh

@@ -0,0 +1,73 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+  
+        gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

+ 43 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh

@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

+ 54 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh

@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

+ 41 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh

@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh

@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

+ 79 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp

@@ -0,0 +1,79 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::config(
+  $ganglia_server_host = undef,
+  $service_state = $hdp::params::cluster_service_state
+)
+{
+ if ($service_state in ['running','installed_and_configured','stopped']) {
+    #TODO: divide into what is needed on server vs what is needed on monitored nodes
+    $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
+
+    hdp::directory_recursive_create { $shell_cmds_dir :
+      owner => root,
+      group => root
+    } 
+
+     hdp-ganglia::config::init_file { ['gmetad','gmond']: }
+
+     hdp-ganglia::config::shell_file { $shell_files: }                       
+
+     hdp-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
+       ganglia_server_host => $ganglia_server_host
+     }
+ 
+     anchor{'hdp-ganglia::config::begin':} -> Hdp::Directory_recursive_create[$shell_cmds_dir] -> Hdp-ganglia::Config::Shell_file<||> -> anchor{'hdp-ganglia::config::end':}
+     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::Init_file<||> -> Anchor['hdp-ganglia::config::end']
+     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::File<||> -> Anchor['hdp-ganglia::config::end']
+  }
+}
+
+define hdp-ganglia::config::shell_file()
+{
+  file { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    source => "puppet:///modules/hdp-ganglia/${name}", 
+    mode => '0755'
+  }
+}
+
+define hdp-ganglia::config::init_file()
+{
+  file { "/etc/init.d/hdp-${name}":
+    source => "puppet:///modules/hdp-ganglia/${name}.init", 
+    mode => '0755'
+  }
+}
+
+### config files
+define hdp-ganglia::config::file(
+  $ganglia_server_host = undef
+)
+{
+  hdp::configfile { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    component           => 'ganglia',
+    owner               => root,
+    group               => root
+  }
+  if ($ganglia_server_host != undef) {
+    Hdp::Configfile<||>{ganglia_server_host => $ganglia_server_host}
+  }
+}

+ 43 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp

@@ -0,0 +1,43 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp-ganglia::config::generate_monitor(
+  $ganglia_service,
+  $role,
+  $owner = 'root',
+  $group = $hdp::params::user_group
+)
+{
+  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m -o ${owner} -g ${group}",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name} -o ${owner} -g ${group}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t -o ${owner} -g ${group}",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}

+ 44 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp

@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp-ganglia::config::generate_server(
+  $ganglia_service,
+  $role,
+  $owner = 'root',
+  $group = $hdp::params::user_group
+)
+{
+  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m -o ${owner} -g ${group}",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name} -o ${owner} -g ${group}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t -o ${owner} -g ${group}",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::hdp-gmetad::service_check() 
+{
+  
+  anchor { 'hdp-ganglia::hdp-gmetad::service_check::begin':}
+
+  exec { 'hdp-gmetad':
+    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp-ganglia::hdp-gmetad::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp-ganglia::hdp-gmetad::service_check::end':}
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::hdp-gmond::service_check() 
+{
+  
+  anchor { 'hdp-ganglia::hdp-gmond::service_check::begin':}
+
+  exec { 'hdp-gmond':
+    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp-ganglia::hdp-gmond::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp-ganglia::hdp-gmond::service_check::end':}
+}

+ 55 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp

@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia(
+  $service_state
+)
+{
+  if (($service_state != 'no_op') or ($service_state != 'uninstalled')) {
+    include hdp-ganglia::params
+    $gmetad_user = $hdp-ganglia::params::gmetad_user
+    $gmond_user = $hdp-ganglia::params::gmond_user
+
+    group { $gmetad_user :
+      ensure => present
+    }
+
+    if ($gmetad_user != $gmond_user) {
+      group { $gmond_user :
+        ensure => present
+      }
+    }
+
+    hdp::user { $gmond_user: 
+      gid    => $gmond_user,
+      groups => ["$gmond_user"]
+    }
+  
+    if ( $gmetad_user != $gmond_user) {
+      hdp::user { $gmetad_user: 
+        gid    => $gmetad_user,
+        groups => ["$gmetad_user"]
+      }
+    }
+
+    anchor{'hdp-ganglia::begin':} -> Group<|title == $gmond_user or title == $gmetad_user|> -> User<|title == $gmond_user or title == $gmetad_user|> ->  anchor{'hdp-ganglia::end':}
+  }
+}
+

+ 153 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp

@@ -0,0 +1,153 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::monitor(
+  $service_state = $hdp::params::cluster_service_state,
+  $ganglia_server_host = undef,
+  $opts = {}
+) inherits hdp-ganglia::params
+{
+  if  ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {     
+
+   hdp::package { 'ganglia-monitor':         
+       ensure      => 'uninstalled', 
+      java_needed => false      
+   }
+
+  } else {
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      class { 'hdp-ganglia':
+       service_state => $service_state
+      }
+    }
+
+    hdp::package { 'ganglia-monitor': }
+
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
+    }
+
+    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
+     class { 'hdp-hadoop::enable-ganglia': }
+   }
+
+    if ($service_exists['hdp-hbase::master'] == true) {
+      class { 'hdp-hbase::master::enable-ganglia': }
+    }
+  
+    if ($service_exists['hdp-hbase::regionserver'] == true) {
+      class { 'hdp-hbase::regionserver::enable-ganglia': }
+    }
+
+    class { 'hdp-ganglia::monitor::config-gen': }
+  
+    class { 'hdp-ganglia::monitor::gmond': ensure => $service_state}
+
+    class { 'hdp-ganglia::monitor::ownership': }
+
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      Class['hdp-ganglia'] -> Hdp::Package['ganglia-monitor'] -> Class['hdp-ganglia::config'] -> 
+      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond'] -> Class['hdp-ganglia::monitor::ownership']
+    } else {
+      Hdp::Package['ganglia-monitor'] ->  Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond'] -> Class['hdp-ganglia::monitor::ownership']
+    }
+  }
+}
+
+
+class hdp-ganglia::monitor::config-gen()
+{
+
+  $service_exists = $hdp::params::service_exists
+
+   #FIXME currently hacking this to make it work
+
+#  if ($service_exists['hdp-hadoop::namenode'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
+#  }
+#  if ($service_exists['hdp-hadoop::jobtracker'] == true){
+#    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
+#  }
+#  if ($service_exists['hdp-hbase::master'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+#  }
+#  if ($service_exists['hdp-hadoop::datanode'] == true) {
+#    hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
+#  }
+
+  if ($hdp::params::is_namenode_master) {
+    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
+  }
+  if ($hdp::params::is_jtnode_master) {
+    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
+  }
+  if ($hdp::params::is_hbase_master) {
+    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+  }
+  hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
+
+  Hdp-ganglia::Config::Generate_monitor<||>{
+    ganglia_service => 'gmond',
+    role => 'monitor'
+  }
+   # 
+  anchor{'hdp-ganglia::monitor::config-gen::begin':} -> Hdp-ganglia::Config::Generate_monitor<||> -> anchor{'hdp-ganglia::monitor::config-gen::end':}
+}
+
+class hdp-ganglia::monitor::gmond(
+  $ensure
+  )
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmond service" :
+      command => $command,
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}
+
+class hdp-ganglia::monitor::ownership() {
+
+  file { "${hdp-ganglia::params::ganglia_dir}/conf.d":
+    owner  => 'root',
+    group  => $hdp::params::user_group
+  }
+
+  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/modgstatus.conf":
+    owner => 'root',
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/multicpu.conf":
+    owner => 'root',
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-ganglia::params::ganglia_dir}/gmond.conf":
+    owner => 'root',
+    group => $hdp::params::user_group
+  }
+}

+ 90 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp

@@ -0,0 +1,90 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::monitor_and_server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-ganglia::params
+{
+  $ganglia_shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+  $ganglia_conf_dir = $hdp-ganglia::params::ganglia_conf_dir
+  $ganglia_runtime_dir = $hdp-ganglia::params::ganglia_runtime_dir
+
+  #note: includes the common package ganglia-monitor
+  class { 'hdp-ganglia':
+    service_state => $service_state
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['uninstalled']) {
+    class { 'hdp-ganglia::server::packages':
+      ensure => 'uninstalled'
+      }
+
+    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
+      service_state => $service_state,
+      force => true
+    }
+    
+    class { 'hdp-ganglia::config':
+      service_state => $service_state
+    }
+
+    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> 
+      Hdp::Directory[$ganglia_conf_dir] -> Hdp::Directory[$ganglia_runtime_dir] ->
+      Class['hdp-ganglia::config']
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    class { 'hdp-ganglia::server::packages': }
+
+    class { 'hdp-ganglia::config': 
+     ganglia_server_host => $hdp::params::host_address,
+     service_state       => $service_state
+     }
+
+    class {'hdp-ganglia::monitor::config-gen': }      
+
+    class {'hdp-ganglia::server::config-gen': }      
+    
+    hdp-ganglia::config::generate_server { 'gmetad':
+      ganglia_service => 'gmetad'
+    }
+
+    class { 'hdp-ganglia::service::gmond': 
+      ensure => $service_state
+    }
+
+    class { 'hdp-ganglia::server::services' : 
+      service_state => $service_state,
+      monitor_and_server_single_node => true
+    }
+
+    class { 'hdp-ganglia::service::change_permission':
+      ensure => $service_state
+    }
+
+    #top level no anchors needed
+    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
+      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::server::config-gen'] -> Hdp-ganglia::Config::Generate_server['gmetad'] ->
+      Class['hdp-ganglia::service::gmond'] -> Class['hdp-ganglia::server::services'] ->
+      Class['hdp-ganglia::service::change_permission']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 35 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp

@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::params() inherits hdp::params
+{
+  $ganglia_conf_dir = "/etc/ganglia/hdp"
+  $ganglia_dir = "/etc/ganglia"
+  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
+
+  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
+  
+  $gmetad_user = $hdp::params::gmetad_user
+  $gmond_user = $hdp::params::gmond_user
+
+  $webserver_group = hdp_default("webserver_group","apache")
+  $rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+  $rrdcached_base_dir = hdp_default("rrdcached_base_dir", "/var/lib/ganglia/rrds")
+}

+ 230 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -0,0 +1,230 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-ganglia::params
+{
+  $hdp::params::service_exists['hdp-ganglia::server'] = true
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+
+   class { 'hdp-ganglia::server::packages':
+      ensure => 'uninstalled',
+      service_state => $service_state
+   }
+
+   class { 'hdp-ganglia::server::files':
+      ensure => 'absent'
+   }
+
+  } else {
+  class { 'hdp-ganglia':
+    service_state => $service_state
+  }
+
+  class { 'hdp-ganglia::server::packages':
+    ensure => 'present',
+    service_state => $service_state
+  }
+
+  class { 'hdp-ganglia::config': 
+    ganglia_server_host => $hdp::params::host_address,
+    service_state       => $service_state 
+  }
+
+  if ($hdp::params::hbase_master_hosts) {
+    hdp-ganglia::config::generate_server { 'HDPHBaseMaster':
+      ganglia_service => 'gmond',
+      role => 'server'
+    }
+  }
+  
+  hdp-ganglia::config::generate_server { ['HDPJobTracker','HDPNameNode','HDPSlaves']:
+    ganglia_service => 'gmond',
+    role => 'server'
+  }
+
+  hdp-ganglia::config::generate_server { 'gmetad':
+    ganglia_service => 'gmetad',
+    role => 'server'
+  }
+
+  class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
+
+  class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
+
+  if ($service_state == 'installed_and_configured') {
+    $webserver_state = 'restart'
+  } elsif ($service_state == 'running') {
+    class { 'hdp-ganglia::server::delete_default_gmond_process': }
+    $webserver_state = 'running'
+  } else {
+    # We are never stopping httpd
+    #$webserver_state = $service_state
+  }
+
+  class { 'hdp-monitor-webserver': service_state => $webserver_state}
+
+  class { 'hdp-ganglia::server::files':
+     ensure => 'present'
+  }
+
+  file { "${hdp-ganglia::params::ganglia_dir}/gmetad.conf":
+    owner => 'root',
+    group => $hdp::params::user_group
+  }
+
+  #top level does not need anchors
+  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] ->
+ Hdp-ganglia::Config::Generate_server<||> ->
+ Class['hdp-ganglia::server::gmetad'] -> File["${hdp-ganglia::params::ganglia_dir}/gmetad.conf"] -> Class['hdp-ganglia::service::change_permission'] -> Class['hdp-ganglia::server::files'] -> Class['hdp-monitor-webserver']
+ }
+}
+
+class hdp-ganglia::server::packages(
+  $ensure = present,
+  $service_state = 'installed_and_configured'
+)
+{
+  hdp::package { ['ganglia-server','ganglia-gweb','ganglia-hdp-gweb-addons']: 
+    ensure      => $ensure,
+    java_needed => false,
+    require => Hdp::Package ['rrdtool-python']
+  }
+
+  # Removing conflicting packages only once to workaround "/bin/rpm -e absent-absent-absent.absent" bug (BUG-2881)
+  if ($service_state == 'installed_and_configured' and $hdp::params::hdp_os_type == 'centos5') {
+    # Remove conflicting 32bit package
+    hdp::package { ['rrdtool-devel']:
+      ensure      => 'absent',
+      java_needed => false,
+      before => Hdp::Package ['rrdtool']
+    }
+
+    # Remove conflicting 32bit package
+    hdp::package { ['rrdtool']:
+      ensure      => 'absent',
+      java_needed => false,
+      before => Hdp::Package ['rrdtool-python']
+    }
+  }
+
+  hdp::package { ['rrdtool-python']:
+    ensure      => $ensure,
+    java_needed => false
+  }
+
+}
+
+class hdp-ganglia::server::files(
+  $ensure = present 
+)
+{
+  $rrd_py_path = $hdp::params::rrd_py_path [$hdp::params::hdp_os_type]
+  hdp::directory_recursive_create{$rrd_py_path:
+    ensure => "directory", 
+    override_owner => false 
+  }
+
+  $rrd_py_file_path = "${rrd_py_path}/rrd.py"
+
+  file{$rrd_py_file_path :
+    ensure => $ensure,
+    source => "puppet:///modules/hdp-ganglia/rrd.py",
+    mode   => '0755'
+  }
+
+  anchor{ 'hdp-ganglia::server::files::begin' : } -> Hdp::Directory_recursive_create[$rrd_py_path] -> File[$rrd_py_file_path] -> anchor{ 'hdp-ganglia::server::files::end' : }
+
+  $rrd_files_dir = $hdp-ganglia::params::rrdcached_base_dir
+  $rrd_file_owner = $hdp-ganglia::params::gmetad_user
+  $rrdcached_default_file_dir = $hdp-ganglia::params::rrdcached_default_base_dir
+
+  ## If directory is different fr omdefault make sure it exists
+  if ($rrdcached_default_file_dir != $rrd_files_dir) {
+    hdp::directory_recursive_create{ $rrd_files_dir :
+      ensure => "directory",
+      owner => $rrd_file_owner,
+      group => $rrd_file_owner,
+      mode => '0755'
+    }
+
+    file { $rrdcached_default_file_dir :
+      ensure => link,
+      target => $rrd_files_dir,
+      force => true
+    }
+
+    File[$rrd_py_file_path] -> Hdp::Directory_recursive_create[$rrd_files_dir] -> File[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
+  }
+  elsif ($rrd_file_owner != $hdp::params::NOBODY_USER) {
+    #owner of rrdcached_default_file_dir is 'nobody' by default 
+    #need to change owner to gmetad_user for proper gmetad service start
+    
+    hdp::directory { $rrdcached_default_file_dir:
+      owner => $rrd_file_owner,
+      group => $rrd_file_owner,
+      override_owner => true
+    }
+    
+    File[$rrd_py_file_path] -> Hdp::Directory[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
+  }
+}
+
+
+class hdp-ganglia::service::change_permission(
+  $ensure
+)
+{
+  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
+    hdp::directory_recursive_create { '/var/lib/ganglia/dwoo' :
+      mode => '0777',
+      owner => $hdp-ganglia::params::gmetad_user
+    }
+  }
+}
+
+class hdp-ganglia::server::gmetad(
+  $ensure
+)
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmetad service" :
+      command => "$command",
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}
+
+class hdp-ganglia::server::delete_default_gmond_process() {
+  hdp::exec { "delete_default_gmond_process" :
+    command => "chkconfig --del gmond",
+    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    require => Class['hdp-ganglia::server::packages']
+  }
+}

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb

@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+    HDPSlaves       		<%=scope.function_hdp_host("ganglia_server_host")%>  8660
+    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
+    HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
+    HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb

@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
+GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
+WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb

@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
+GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
+RRDCACHED_BASE_DIR=<%=scope.function_hdp_template_var("rrdcached_base_dir")%>;
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

+ 132 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties

@@ -0,0 +1,132 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+

+ 65 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb

@@ -0,0 +1,65 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+module Puppet::Parser::Functions
+  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
+  
+    dir = args[0]
+
+    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
+    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
+    
+    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
+    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
+    
+    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
+    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
+    
+    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
+    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
+    
+    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
+    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
+    
+    modes = []
+    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
+    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
+    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
+    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
+    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
+
+    modes_grouped = {}
+    modes.each do |item|
+      if modes_grouped[item[:dir]].nil?
+        modes_grouped[item[:dir]]=[]
+      end
+      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
+    end
+
+    modes_max = {}
+    
+    modes_grouped.each_key do |key|
+      modes_max[key] = modes_grouped[key].max
+    end
+
+    modes_max[dir]
+  end
+end

+ 51 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb

@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+module Puppet::Parser::Functions
+  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
+  
+    dir = args[0]
+    
+    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
+    oozie_user = lookupvar("::hdp::params::oozie_user") 
+
+    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
+    hcat_user = lookupvar("::hdp::params::hcat_user") 
+
+    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
+    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
+
+    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
+    hive_user = lookupvar("::hdp::params::hive_user") 
+
+    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
+    smoke_user = lookupvar("::hdp::params::smokeuser") 
+
+    dirs_to_owners = {}
+    dirs_to_owners[oozie_dir] = oozie_user
+    dirs_to_owners[hcat_dir] = hcat_user
+    dirs_to_owners[webhcat_dir] = webhcat_user
+    dirs_to_owners[hive_dir] = hive_user
+    dirs_to_owners[smoke_dir] = smoke_user
+
+    dirs_to_owners[dir]
+  end
+end

+ 56 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp

@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::client(
+  $service_state = $hdp::params::cluster_client_state
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::client'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/${smokeuser}.headless.keytab",
+        keytabfile => "${smokeuser}.headless.keytab",
+        owner => $smokeuser,
+        hostnameInPrincipals => 'no'
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 101 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp

@@ -0,0 +1,101 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::datanode(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params 
+{
+
+  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
+  
+    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
+      $a_namenode_on_node = true
+    } else {
+      $a_namenode_on_node = false
+    }
+
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'datanode_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/dn.service.keytab",
+        keytabfile => 'dn.service.keytab',
+        owner => $hdp-hadoop::params::hdfs_user
+      }
+    }
+
+  
+    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
+      service_state => $service_state
+    }
+
+    if ($a_namenode_on_node == true){
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp-hadoop::service{ 'datanode':
+      ensure         => $service_state,
+      user           => $hdp-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+    
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['datanode']
+    Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::datanode::create_data_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp::directory_recursive_create_ignore_failure { $dirs :
+    owner => $hdp-hadoop::params::hdfs_user,
+    mode => '0750',
+    service_state => $service_state,
+    force => true
+  }
+
+}

+ 79 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp

@@ -0,0 +1,79 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hadoop::hdfs::copyfromlocal(
+  $service_state,
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false,
+  $dest_dir = undef,
+  $kinit_if_needed = undef
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
+    ## exec-hadoop does a kinit based on user, but unless does not
+    hdp-hadoop::exec-hadoop { $copy_cmd:
+      command => $copy_cmd,
+      unless => "${kinit_if_needed} hadoop fs -ls ${dest_dir} >/dev/null 2>&1",
+      user => $owner
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
+      }
+      hdp-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd,
+        user => $owner
+      }
+      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
+      }
+      hdp-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd,
+        user => $owner
+      }
+      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hdfs::decommission(
+) inherits hdp-hadoop::params
+{
+  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
+    hdp_fail("There is no path to exclude file in configuration!")
+  }
+
+  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
+
+  hdp::exec{"hadoop dfsadmin -refreshNodes":
+      command => "hadoop dfsadmin -refreshNodes",
+      user => $hdp::params::hdfs_user,
+      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
+    }
+  
+}

+ 80 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp

@@ -0,0 +1,80 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: unset should br changed to undef; just to be consistent
+define hdp-hadoop::hdfs::directory(
+  $service_state = 'running',
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false
+) 
+{
+ 
+  if ($service_state == 'running') {
+  
+  
+    if $stack_version in ("2.0.1") {
+      $mkdir_cmd = "fs -mkdir -p ${name}"
+    } else {
+      $mkdir_cmd = "fs -mkdir ${name}"
+    }
+    hdp-hadoop::exec-hadoop { $mkdir_cmd:
+      command => $mkdir_cmd,
+      unless => "hadoop fs -ls ${name} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${name}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${name}"
+      }
+      hdp-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chmod == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${name}"
+      }
+      hdp-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

+ 42 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp

@@ -0,0 +1,42 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+define hdp-hadoop::hdfs::generate_exclude_file()
+{
+  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
+  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
+  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
+  if (hdp_is_empty($configuration) == false and
+    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
+    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
+    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
+    ##Create file with list of excluding hosts
+    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
+    file { $exlude_file_path :
+      ensure => file,
+      content => template('hdp-hadoop/exclude_hosts_list.erb')
+    }
+  }
+}
+
+
+
+

+ 83 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp

@@ -0,0 +1,83 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::hdfs::service_check()
+{
+  $unique = hdp_unique_id_and_date()
+  $dir = '/tmp'
+  $tmp_file = "${dir}/${unique}"
+
+  $safemode_command = "dfsadmin -safemode get | grep OFF"
+
+  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
+  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while omamnd does not
+  $cleanup_cmd = "fs -rm ${tmp_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${tmp_file}"
+
+  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
+    command   => $safemode_command,
+    tries     => 40,
+    try_sleep => 15,
+    logoutput => true,
+    user      => $hdp::params::smokeuser,
+    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
+    command   => $create_dir_cmd,
+    unless    => $test_dir_exists,
+    tries     => 3,
+    try_sleep => 5,
+    user      => $hdp::params::smokeuser,
+    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    user      => $hdp::params::smokeuser,
+    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
+    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
+  }
+
+  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    user      => $hdp::params::smokeuser,
+    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
+    #notify      => Hdp-hadoop::Exec-hadoop['hdfs::service_check::cleanup']  #TODO: put in after testing
+    before      => Anchor['hdp-hadoop::hdfs::service_check::end'] #TODO: remove after testing
+  }
+
+   #TODO: put in after testing
+ #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
+ #   command     => $cleanup_cmd,
+ #   refreshonly => true,
+ #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
+ #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
+  #}
+  anchor{ 'hdp-hadoop::hdfs::service_check::end':}
+
+}

+ 387 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -0,0 +1,387 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
+define hdp-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
+}
+
+class hdp-hadoop::initialize()
+{
+  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
+  } else {
+    $hdp::params::component_exists['hdp-hadoop'] = true
+  }
+  hdp-hadoop::common { 'common':}
+  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
+
+# Configs generation  
+
+debug('##Configs generation for hdp-hadoop')
+
+
+  if has_key($configuration, 'mapred-queue-acls') {
+    configgenerator::configfile{'mapred-queue-acls': 
+      modulespath => $hdp-hadoop::params::conf_dir,
+      filename => 'mapred-queue-acls.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['mapred-queue-acls'],
+      owner => $hdp-hadoop::params::mapred_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
+      owner => $hdp-hadoop::params::mapred_user,
+      group => $hdp::params::user_group
+    }
+  }
+  
+  if has_key($configuration, 'hadoop-policy') {
+    configgenerator::configfile{'hadoop-policy': 
+      modulespath => $hdp-hadoop::params::conf_dir,
+      filename => 'hadoop-policy.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hadoop-policy'],
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/hadoop-policy.xml":
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  if has_key($configuration, 'core-site') {
+      configgenerator::configfile{'core-site': 
+        modulespath => $hdp-hadoop::params::conf_dir,
+        filename => 'core-site.xml',
+        module => 'hdp-hadoop',
+        configuration => $configuration['core-site'],
+        owner => $hdp-hadoop::params::hdfs_user,
+        group => $hdp::params::user_group
+      }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/core-site.xml":
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  if has_key($configuration, 'mapred-site') {
+    configgenerator::configfile{'mapred-site': 
+      modulespath => $hdp-hadoop::params::conf_dir,
+      filename => 'mapred-site.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['mapred-site'],
+      owner => $hdp-hadoop::params::mapred_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/mapred-site.xml":
+      owner => $hdp-hadoop::params::mapred_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  $task_log4j_properties_location = "${conf_dir}/task-log4j.properties"
+
+  file { $task_log4j_properties_location:
+    owner   => $hdp-hadoop::params::mapred_user,
+    group   => $hdp::params::user_group,
+    mode    => 644,
+    ensure  => present,
+    source  => "puppet:///modules/hdp-hadoop/task-log4j.properties",
+    replace => false
+  }
+
+  if has_key($configuration, 'capacity-scheduler') {
+    configgenerator::configfile{'capacity-scheduler':
+      modulespath => $hdp-hadoop::params::conf_dir,
+      filename => 'capacity-scheduler.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['capacity-scheduler'],
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group,
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/capacity-scheduler.xml":
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  } 
+
+
+  if has_key($configuration, 'hdfs-site') {
+    configgenerator::configfile{'hdfs-site': 
+      modulespath => $hdp-hadoop::params::conf_dir,
+      filename => 'hdfs-site.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdfs-site'],
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hadoop::params::conf_dir}/hdfs-site.xml":
+      owner => $hdp-hadoop::params::hdfs_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  if has_key($configuration, 'hdfs-exclude-file') {
+    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
+  }
+
+  hdp::package {'ambari-log4j':
+    package_type  => 'ambari-log4j'
+  }
+
+  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
+    ensure => 'link',
+    target => '/usr/lib/hadoop/hadoop-tools.jar',
+    mode => 755,
+  }
+
+  file { "${hdp-hadoop::params::conf_dir}/configuration.xsl":
+    owner => $hdp-hadoop::params::hdfs_user,
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-hadoop::params::conf_dir}/fair-scheduler.xml":
+    owner => $hdp-hadoop::params::mapred_user,
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-hadoop::params::conf_dir}/masters":
+    owner => $hdp-hadoop::params::hdfs_user,
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-hadoop::params::conf_dir}/ssl-client.xml.example":
+    owner => $hdp-hadoop::params::mapred_user,
+    group => $hdp::params::user_group
+  }
+
+  file { "${hdp-hadoop::params::conf_dir}/ssl-server.xml.example":
+    owner => $hdp-hadoop::params::mapred_user,
+    group => $hdp::params::user_group
+  }
+}
+
+class hdp-hadoop(
+  $service_states  = []
+)
+{
+  include hdp-hadoop::params
+  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
+  $mapred_user = $hdp-hadoop::params::mapred_user  
+  $hdfs_user = $hdp-hadoop::params::hdfs_user  
+
+  anchor{'hdp-hadoop::begin':} 
+  anchor{'hdp-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
+  } else {
+    
+    hdp-hadoop::package { 'hadoop':}
+
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true,
+      owner => $hdfs_user,
+      group => $hdp::params::user_group
+    }
+ 
+    hdp::user{ $hdfs_user:
+      groups => [$hdp::params::user_group]
+    }
+    if ($hdfs_user != $mapred_user) {
+      hdp::user { $mapred_user:
+        groups => [$hdp::params::user_group]
+      }
+    }
+
+    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
+    hdp::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
+    hdp::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    #taskcontroller.cfg properties conditional on security
+    if ($hdp::params::security_enabled == true) {
+      file { "${hdp::params::hadoop_bin}/task-controller":
+        owner   => 'root',
+        group   => $hdp::params::user_group,
+        mode    => '6050',
+        require => Hdp-hadoop::Package['hadoop'],
+        before  => Anchor['hdp-hadoop::end']
+      }
+      $tc_owner = 'root'
+      $tc_mode = '0400'
+    } else {
+      $tc_owner = $hdfs_user
+      $tc_mode = undef
+    }
+    hdp-hadoop::configfile { 'taskcontroller.cfg' :
+      tag   => 'common',
+      owner => $tc_owner,
+      mode  => $tc_mode
+    }
+
+    $template_files = [ 'hadoop-env.sh', 'health_check', 'commons-logging.properties', 'log4j.properties', 'slaves']
+    hdp-hadoop::configfile { $template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
+      tag   => 'common', 
+      owner => $hdfs_user,
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->  Hdp::Directory_recursive_create[$hadoop_config_dir] 
+    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
+  }
+}
+
+class hdp-hadoop::enable-ganglia()
+{
+  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $jtnode_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($jtnode_host == undef) {
+    $calc_jtnode_host = $namenode_host
+  } else {
+    $calc_jtnode_host = $jtnode_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    jtnode_host    => $calc_jtnode_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $kinit_override = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp-hadoop::params
+  $security_enabled = $hdp::params::security_enabled
+  $conf_dir = $hdp-hadoop::params::conf_dir
+  $hdfs_user = $hdp-hadoop::params::hdfs_user
+
+  if ($user == undef) {
+    $run_user = $hdfs_user
+  } else {
+    $run_user = $user
+  }
+
+  if (($security_enabled == true) and ($kinit_override == false)) {
+    #TODO: may figure out so dont need to call kinit if auth in caceh already
+    if ($run_user in [$hdfs_user,'root']) {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
+      $principal = $hdfs_user
+    } else {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
+      $principal = $user
+    }
+    $kinit_if_needed = "${kinit_path_local} -kt ${keytab} ${principal}; "
+  } else {
+    $kinit_if_needed = ""
+  }
+ 
+  if ($echo_yes == true) {
+    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
+  }
+
+  hdp::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}

+ 94 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp

@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::jobtracker(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $mapred_user = $hdp-hadoop::params::mapred_user
+    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'jobtracker_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/jt.service.keytab",
+        keytabfile => 'jt.service.keytab',
+        owner => $hdp-hadoop::params::mapred_user
+      }
+    }
+     
+    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
+      service_state => $service_state
+    }
+
+    #TODO: cleanup 
+    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
+
+    #TODO: do we keep precondition here?
+    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
+      class { 'hdp-hadoop::hdfs::service_check':
+        before => Hdp-hadoop::Service['jobtracker'],
+        require => Class['hdp-hadoop']
+      }
+    }
+
+    hdp-hadoop::service{ 'jobtracker':
+      ensure       => $service_state,
+      user         => $mapred_user
+    }
+  
+    hdp-hadoop::service{ 'historyserver':
+      ensure         => $service_state,
+      user           => $mapred_user,
+      create_pid_dir => false,
+      create_log_dir => false
+    }
+
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver']
+    Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::jobtracker::create_local_dirs($service_state)
+{
+    $dirs = hdp_array_from_comma_list($name)
+    hdp::directory_recursive_create { $dirs :
+      owner => $hdp-hadoop::params::mapred_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+}

+ 29 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp

@@ -0,0 +1,29 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::jobtracker::service_check()
+{
+  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
+    command   => 'job -list',
+    tries     => 3,
+    try_sleep => 5,
+    user => $hdp::params::smokeuser
+  }
+}

+ 75 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp

@@ -0,0 +1,75 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::mapred::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+  $jar_location = $hdp::params::hadoop_jar_location
+  $input_file = 'mapredsmokeinput'
+  $output_file = "mapredsmokeoutput"
+
+  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${output_file}" 
+  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
+  
+  anchor { 'hdp-hadoop::mapred::service_check::begin':}
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
+  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    user      => $smoke_test_user
+  }
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
+    command   => $run_wordcount_job,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
+    user      => $smoke_test_user,
+    logoutput => "true"
+  }
+
+#  exec { 'runjob':
+#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
+#    tries     => 1,
+#    try_sleep => 5,
+#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
+#    logoutput => "true",
+#    user      => $smoke_test_user
+#  }
+
+  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
+    user        => $smoke_test_user
+  }
+  
+  anchor{ 'hdp-hadoop::mapred::service_check::end':}
+}

+ 230 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -0,0 +1,230 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode(
+  $service_state = $hdp::params::cluster_service_state,
+  $slave_hosts = [],
+  $format = true,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and 
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'namenode_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/nn.service.keytab",
+        keytabfile => 'nn.service.keytab',
+        owner => $hdp-hadoop::params::hdfs_user
+      }
+      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
+        keytabfile => 'hdfs.headless.keytab', 
+        owner => $hdp-hadoop::params::hdfs_user, 
+        hostnameInPrincipals => 'no'
+      }
+      hdp::download_keytab { 'namenode_spnego_keytab' :   
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/spnego.service.keytab",
+        keytabfile => 'spnego.service.keytab', 
+        owner => $hdp-hadoop::params::hdfs_user, 
+        mode => '0440',
+        group => $hdp::params::user_group
+      }
+    }
+ 
+    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
+      service_state => $service_state
+    }
+   
+    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
+    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
+  
+    if ($service_state == 'running' and $format == true) {
+      class {'hdp-hadoop::namenode::format' : }
+    }
+
+    hdp-hadoop::service{ 'namenode':
+      ensure       => $service_state,
+      user         => $hdp-hadoop::params::hdfs_user,
+      initial_wait => hdp_option_value($opts,'wait')
+    }
+
+    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
+       service_state => $service_state
+    }
+
+    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
+       service_state => $service_state
+    }
+
+    #top level does not need anchors
+    Class['hdp-hadoop'] ->  Hdp-hadoop::Service['namenode']
+    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['namenode'] 
+    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||> -> Hdp-hadoop::Namenode::Create_user_directories<||>
+    if ($service_state == 'running' and $format == true) {
+      Class['hdp-hadoop'] -> Class['hdp-hadoop::namenode::format'] -> Hdp-hadoop::Service['namenode']
+      Hdp-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp-hadoop::namenode::format']
+    } 
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::namenode::create_name_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp::directory_recursive_create { $dirs :
+    owner => $hdp-hadoop::params::hdfs_user,
+    mode => '0755',
+    service_state => $service_state,
+    force => true
+  }
+}
+
+define hdp-hadoop::namenode::create_app_directories($service_state)
+{
+
+  if ($service_state == 'running') {
+   
+    hdp-hadoop::hdfs::directory{ "/tmp" :
+      service_state => $service_state,
+      owner => $hdp-hadoop::params::hdfs_user,
+      mode => '777'
+    }
+
+    hdp-hadoop::hdfs::directory{ '/mapred' :
+      service_state => $service_state,
+      owner         => $hdp-hadoop::params::mapred_user
+    }
+    hdp-hadoop::hdfs::directory{ '/mapred/system' :
+      service_state => $service_state,
+      owner         => $hdp-hadoop::params::mapred_user
+    }
+    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
+
+    if ($hdp::params::hbase_master_hosts != "") {
+      $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
+      hdp-hadoop::hdfs::directory { $hdfs_root_dir:
+        owner         => $hdp::params::hbase_user,
+        service_state => $service_state
+      }
+    }
+
+    if ($hdp::params::hive_server_host != "") {
+      $hive_user = $hdp::params::hive_user
+      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
+
+      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
+        service_state   => $service_state,
+        owner            => $hive_user,
+        mode             => '777',
+        recursive_chmod  => true
+      }
+    }
+
+    if ($hdp::params::webhcat_server_host != "") {
+      $webhcat_user = $hdp::params::webhcat_user
+      $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
+
+      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
+        service_state => $service_state,
+        owner => $webhcat_user,
+        mode  => '755',
+        recursive_chmod => true
+      }
+    }
+  }
+}
+
+
+define hdp-hadoop::namenode::create_user_directories($service_state)
+{
+  if ($service_state == 'running') {
+    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
+
+    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
+
+    if ($hdp::params::hive_server_host != "") {
+      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
+      $hive_dir_item="$hive_hdfs_user_dir,"
+    } else {
+    $hive_dir_item=""
+    }
+
+    if ($hdp::params::oozie_server != "") {
+      $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
+      $oozie_dir_item="$oozie_hdfs_user_dir,"
+    } else {
+      $oozie_dir_item=""
+    }
+    
+    if ($hdp::params::webhcat_server_host != "") {
+      $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
+      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
+      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
+      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
+        $hcat_dir_item="$hcat_hdfs_user_dir,"
+      } else {
+        $hcat_dir_item=""
+      }
+    } else {
+      $webhcat_dir_item=""
+    }
+
+    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
+
+    #Get unique users directories set
+    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
+
+    hdp-hadoop::namenode::create_user_directory{$users_dirs_set:
+      service_state => $service_state}
+  }
+  
+}
+
+define hdp-hadoop::namenode::create_user_directory($service_state)
+{
+  
+  $owner = hdp_hadoop_get_owner($name)
+  $mode = hdp_hadoop_get_mode($name)
+  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
+  hdp-hadoop::hdfs::directory{ $name:
+   service_state   => $service_state,
+   mode            => $mode,
+   owner           => $owner,
+   recursive_chmod => true
+  }
+}
+

+ 57 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp

@@ -0,0 +1,57 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode::format(
+  $force = false
+)
+{
+  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
+  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
+  $hdfs_user = $hdp::params::hdfs_user
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
+
+  if ($force == true) {
+      hdp-hadoop::exec-hadoop { 'namenode -format' :
+      command => 'namenode -format',
+      kinit_override => true,
+      notify  => Hdp::Exec['set namenode mark']
+    }
+  } else {
+      file { '/tmp/checkForFormat.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
+      mode => '0755'
+    }
+
+    exec { '/tmp/checkForFormat.sh':
+      command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
+      unless   => "test -d ${mark_dir}",
+      require   => File['/tmp/checkForFormat.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true",
+      notify   => Hdp::Exec['set namenode mark']
+    }
+  }
+
+  hdp::exec { 'set namenode mark' :
+    command     => "mkdir -p ${mark_dir}",
+    refreshonly => true
+  }
+}

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp

@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::namenode::service_check()
+{
+  hdp-hadoop::exec-hadoop { 'namenode::service_check':
+    command   => 'dfs -ls /',
+    tries     => 3,
+    try_sleep => 5
+  }
+}

+ 44 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp

@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton, but using define so can use collections to override params
+define hdp-hadoop::package(
+  $ensure = 'present',
+  $include_32_bit = false,
+  $include_64_bit = false
+)
+{
+  #just use 32 if its specifically requested and no 64 bit requests
+  if ($include_32_bit == true) and ($include_64_bit != true) {
+    $size = 32
+  } else  {
+    $size = 64
+  }
+  $package = "hadoop ${size}"
+  $lzo_enabled = $hdp::params::lzo_enabled
+
+  hdp::package{ $package:
+    ensure       => $ensure,
+    package_type => 'hadoop',
+    size         => $size,
+    lzo_needed   => $lzo_enabled
+  }
+  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
+}

+ 187 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp

@@ -0,0 +1,187 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::params(
+) inherits hdp::params 
+{
+
+  ##TODO: for testing in masterless mode
+  $use_preconditions = false
+  ####  
+  $conf_dir = $hdp::params::hadoop_conf_dir 
+
+  ####### users
+
+  $mapred_user = $hdp::params::mapred_user
+  $hdfs_user = $hdp::params::hdfs_user
+  
+  ##### security related
+  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
+ 
+  if ($hdp::params::security_enabled == true) {
+    $enable_security_authorization = true
+    $security_type = "kerberos"
+    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
+    $dfs_datanode_address = 1019
+    $dfs_datanode_http_address = 1022
+  } else {
+    $enable_security_authorization = false
+    $security_type = "simple"
+    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
+    $dfs_datanode_address = hdp_default("dfs_datanode_address","50010")
+    $dfs_datanode_http_address = hdp_default("dfs_datanode_http_address","50075")
+  }
+
+  ### hadoop-env
+  
+  $dtnode_heapsize = hdp_default("dtnode_heapsize","1024m")
+  $ttnode_heapsize = hdp_default("ttnode_heapsize","1024m")
+
+  $hadoop_heapsize = hdp_default("hadoop_heapsize","1024m")
+
+  $hdfs_log_dir_prefix = hdp_default("hdfs_log_dir_prefix","/var/log/hadoop")
+
+  $hadoop_pid_dir_prefix = hdp_default("hadoop_pid_dir_prefix","/var/run/hadoop")
+  $run_dir = $hadoop_pid_dir_prefix
+
+  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
+
+  $jtnode_heapsize = hdp_default("jtnode_heapsize","1024m")
+
+  $jtnode_opt_maxnewsize = hdp_default("jtnode_opt_maxnewsize","200m")
+
+  $jtnode_opt_newsize = hdp_default("jtnode_opt_newsize","200m")
+
+  $namenode_heapsize = hdp_default("namenode_heapsize","1024m")
+
+  $namenode_opt_maxnewsize = hdp_default("namenode_opt_maxnewsize","640m")
+
+  $namenode_opt_newsize = hdp_default("namenode_opt_newsize","640m")
+  
+  $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
+  
+  ### compression related
+  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
+    $mapred_compress_map_output = true
+    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp::params::snappy_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp::params::lzo_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
+    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
+  } else { 
+    $mapred_compress_map_output = false
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
+  }
+
+  ### core-site
+  $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+  $proxyuser_group = hdp_default("core-site/proxyuser.group","users")
+
+  ### hdfs-site
+  $datanode_du_reserved = hdp_default("hdfs-site/datanode.du.reserved",1073741824)
+
+  $dfs_block_local_path_access_user = hdp_default("hdfs-site/dfs.block.local.path.access.user","hbase")
+
+  $dfs_data_dir = $hdp::params::dfs_data_dir
+
+  $dfs_datanode_data_dir_perm = hdp_default("hdfs-site/dfs.datanode.data.dir.perm",750)
+
+  $dfs_datanode_failed_volume_tolerated = hdp_default("hdfs-site/dfs.datanode.failed.volume.tolerated",0)
+
+  $dfs_exclude = hdp_default("hdfs-site/dfs.exclude","dfs.exclude")
+
+  $dfs_include = hdp_default("hdfs-site/dfs.include","dfs.include")
+  
+  $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+  
+  $dfs_replication = hdp_default("hdfs-site/dfs.replication",3)
+
+  $dfs_support_append = hdp_default("hdfs-site/dfs.support.append",true)
+
+  $dfs_webhdfs_enabled = hdp_default("hdfs-site/dfs.webhdfs.enabled",false)
+
+
+ ######### mapred #######
+   ### mapred-site
+
+  $mapred_system_dir = '/mapred/system'
+
+  $io_sort_mb = hdp_default("mapred-site/io.sort.mb","200")
+
+  $io_sort_spill_percent = hdp_default("mapred-site/io.sort.spill.percent","0.9")
+
+  $mapred_child_java_opts_sz = hdp_default("mapred-site/mapred.child.java.opts.sz","-Xmx768m")
+
+  $mapred_cluster_map_mem_mb = hdp_default("mapred-site/mapred.cluster.map.mem.mb","-1")
+
+  $mapred_cluster_max_map_mem_mb = hdp_default("mapred-site/mapred.cluster.max.map.mem.mb","-1")
+
+  $mapred_cluster_max_red_mem_mb = hdp_default("mapred-site/mapred.cluster.max.red.mem.mb","-1")
+
+  $mapred_cluster_red_mem_mb = hdp_default("mapred-site/mapred.cluster.red.mem.mb","-1")
+
+  $mapred_hosts_exclude = hdp_default("mapred-site/mapred.hosts.exclude","mapred.exclude")
+
+  $mapred_hosts_include = hdp_default("mapred-site/mapred.hosts.include","mapred.include")
+
+  $mapred_job_map_mem_mb = hdp_default("mapred-site/mapred.job.map.mem.mb","-1")
+
+  $mapred_job_red_mem_mb = hdp_default("mapred-site/mapred.job.red.mem.mb","-1")
+
+  $mapred_jobstatus_dir = hdp_default("mapred-site/mapred.jobstatus.dir","file:////mapred/jobstatus")
+
+  $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
+   
+  $mapred_map_tasks_max = hdp_default("mapred-site/mapred.map.tasks.max",4)
+
+  $mapred_red_tasks_max = hdp_default("mapred-site/mapred.red.tasks.max",4)
+
+  $mapreduce_userlog_retainhours = hdp_default("mapred-site/mapreduce.userlog.retainhours",24)
+
+  $maxtasks_per_job = hdp_default("mapred-site/maxtasks.per.job","-1")
+
+  $scheduler_name = hdp_default("mapred-site/scheduler.name","org.apache.hadoop.mapred.CapacityTaskScheduler")
+
+  #### health_check
+
+  $security_enabled = $hdp::params::security_enabled
+
+  $task_bin_exe = hdp_default("task_bin_exe")
+
+  $rca_enabled = hdp_default("rca_enabled", false)
+  if ($rca_enabled == true) {
+    $rca_prefix = ""
+  } else {
+    $rca_prefix = "###"
+  }
+  # $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
+  $ambari_db_rca_url = hdp_default("ambari_db_rca_url", "jdbc:postgresql://localhost/ambarirca")
+  $ambari_db_rca_driver = hdp_default("ambari_db_rca_driver", "org.postgresql.Driver")
+  $ambari_db_rca_username = hdp_default("ambari_db_rca_username", "mapred")
+  $ambari_db_rca_password = hdp_default("ambari_db_rca_password", "mapred")
+
+}

+ 120 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp

@@ -0,0 +1,120 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hadoop::service(
+  $ensure = 'running',
+  $user,
+  $initial_wait = undef,
+  $create_pid_dir = true,
+  $create_log_dir = true
+)
+{
+
+  $security_enabled = $hdp::params::security_enabled
+
+  #NOTE does not work if namenode and datanode are on same host 
+  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
+  
+  $hadoop_libexec_dir = $hdp-hadoop::params::hadoop_libexec_dir
+  
+  if (($security_enabled == true) and ($name == 'datanode')) {
+    $run_as_root = true
+  } else {       
+    $run_as_root = false
+  }
+
+  if (($security_enabled == true) and ($name == 'datanode')) {
+    $hdfs_user = $hdp::params::hdfs_user
+    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
+  } else {
+    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
+  } 
+
+  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
+  $hadoop_daemon = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${hdp::params::hadoop_bin}/hadoop-daemon.sh"
+   
+  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
+  if ($ensure == 'running') {
+    if ($run_as_root == true) {
+      $daemon_cmd = "${cmd} start ${name}"
+    } else {
+      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
+    }
+    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    if ($run_as_root == true) {
+      $daemon_cmd = "${cmd} stop ${name}"
+    } else {
+      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}'"
+    }
+    $service_is_up = undef
+  } else {
+    $daemon_cmd = undef
+  }
+ 
+  if ($create_pid_dir == true) {
+    hdp::directory_recursive_create { $pid_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  
+  if ($create_log_dir == true) {
+    hdp::directory_recursive_create { $log_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  if ($daemon_cmd != undef) {  
+    hdp::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $service_is_up,
+      initial_wait => $initial_wait
+    }
+  }
+
+  anchor{"hdp-hadoop::service::${name}::begin":}
+  anchor{"hdp-hadoop::service::${name}::end":}
+  if ($daemon_cmd != undef) {
+    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
+
+    if ($create_pid_dir == true) {
+      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
+    }
+     if ($create_log_dir == true) {
+      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
+    }
+  }
+  if ($ensure == 'running') {
+    #TODO: look at Puppet resource retry and retry_sleep
+    #TODO: can make sleep contingent on $name
+    $sleep = 5
+    $post_check = "sleep ${sleep}; ${service_is_up}"
+    hdp::exec { $post_check:
+      command => $post_check,
+      unless  => $service_is_up
+    }
+    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
+  }  
+}

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp

@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
+{
+  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
+}

+ 27 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp

@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::slave::master-conn($master_host)
+{
+  Hdp-Hadoop::Configfile<||>{
+    namenode_host => $master_host,
+    jtnode_host   => $master_host
+  }
+}

+ 27 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp

@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: this might be replaced by just using hdp::namenode-conn
+class hdp-hadoop::slave::namenode-conn($namenode_host)
+{
+  #TODO: check if can get rido of both
+  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
+  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
+}

+ 46 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp

@@ -0,0 +1,46 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::smoketest(
+  $opts={}
+)
+{
+  #TODO: put in wait
+  #TODO: look for better way to compute outname
+  $date_format = '"%M%d%y"'
+  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
+
+  #TODO: hardwired to run on namenode and to use user hdfs
+
+  $put = "dfs -put /etc/passwd passwd-${outname}"
+  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
+  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
+  anchor{ "hdp-hadoop::smoketest::begin" :} ->
+  hdp-hadoop::exec-hadoop{ $put:
+    command => $put
+  } ->
+  hdp-hadoop::exec-hadoop{ $exec:
+    command =>  $exec
+  } ->
+  hdp-hadoop::exec-hadoop{ $result:
+    command =>  $result
+  } ->
+  anchor{ "hdp-hadoop::smoketest::end" :}
+}

+ 98 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp

@@ -0,0 +1,98 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::snamenode(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params  
+{
+  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
+        $masterHost = $kerberos_adminclient_host[0]
+        hdp::download_keytab { 'snamenode_service_keytab' :
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/nn.service.keytab",
+          keytabfile => 'nn.service.keytab',
+          owner => $hdp-hadoop::params::hdfs_user
+        }
+        hdp::download_keytab { 'snamenode_spnego_keytab' :   
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/spnego.service.keytab",
+          keytabfile => 'spnego.service.keytab', 
+          owner => $hdp-hadoop::params::hdfs_user,
+          mode => '0440',
+          group => $hdp::params::user_group
+        }
+      }
+    }
+ 
+    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
+  
+    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp-hadoop::service{ 'secondarynamenode':
+      ensure         => $service_state,
+      user           => $hdp-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['secondarynamenode']
+    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['secondarynamenode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::snamenode::create_name_dirs($service_state)
+{
+   $dirs = hdp_array_from_comma_list($name)
+   hdp::directory_recursive_create { $dirs :
+     owner => $hdp-hadoop::params::hdfs_user,
+     mode => '0755',
+     service_state => $service_state,
+     force => true
+  }
+}

+ 94 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp

@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::tasktracker(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'tasktracker_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/tt.service.keytab",
+        keytabfile => 'tt.service.keytab',
+        owner => $hdp-hadoop::params::mapred_user
+      }
+    }
+
+    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir:
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+
+    hdp-hadoop::service{ 'tasktracker':
+      ensure => $service_state,
+      user   => $hdp-hadoop::params::mapred_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['tasktracker']
+    Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::tasktracker::create_local_dirs($service_state)
+{
+  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
+    $dirs = hdp_array_from_comma_list($name)
+    hdp::directory_recursive_create_ignore_failure { $dirs :
+      owner => $hdp-hadoop::params::mapred_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+  }
+}

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb

@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb

@@ -0,0 +1,3 @@
+<% exlude_hosts_list.each do |val| -%>
+<%= val%>
+<% end -%>

+ 104 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -0,0 +1,104 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
+
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql*i 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*oracle* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb

@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb

@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

+ 118 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb

@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

+ 196 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb

@@ -0,0 +1,196 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=1MB
+log4j.appender.RFA.MaxBackupIndex=30
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=<%=scope.function_hdp_host("ambari_db_rca_url")%>
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=<%=scope.function_hdp_host("ambari_db_rca_driver")%>
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=<%=scope.function_hdp_host("ambari_db_rca_username")%>
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=<%=scope.function_hdp_host("ambari_db_rca_password")%>
+<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
+
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
+
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
+<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb

@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>

+ 20 - 0
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb

@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
+mapreduce.tasktracker.group=<%=scope.function_hdp_default(["mapred-site/mapreduce.tasktracker.group","hadoop"])%>
+hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

+ 26 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh

@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'usertable'
+drop 'usertable'
+create 'usertable','family'
+put 'usertable','row01','family:col01','value1'
+scan 'usertable'
+exit

+ 39 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp

@@ -0,0 +1,39 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::client(
+  $service_state = $hdp::params::cluster_client_state,
+  $opts = {}
+)
+{
+  #assumption is there are no other hbase components on node
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
+      #adds package, users, directories, and common configs
+      class { 'hdp-hbase': 
+        type          => 'client',
+        service_state => $service_state
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 56 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp

@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::hbase::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+
+  $output_file = "/apps/hbase/data/usertable"
+  $conf_dir = $hdp::params::hbase_conf_dir
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hbase::hbase::service_check::begin':}
+
+  file { '/tmp/hbaseSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hbase/hbaseSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hbaseSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell /tmp/hbaseSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hbaseSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hbaseSmoke.sh'],
+    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
+  }
+  
+  anchor{ 'hdp-hbase::hbase::service_check::end':}
+}

+ 143 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp

@@ -0,0 +1,143 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase(
+  $type,
+  $service_state) 
+{
+  include hdp-hbase::params
+ 
+  $hbase_user = $hdp-hbase::params::hbase_user
+  $config_dir = $hdp-hbase::params::conf_dir
+  
+  $hdp::params::component_exists['hdp-hbase'] = true
+
+
+  #Configs generation  
+
+  if has_key($configuration, 'hbase-site') {
+    configgenerator::configfile{'hbase-site': 
+      modulespath => $hdp-hbase::params::conf_dir,
+      filename => 'hbase-site.xml',
+      module => 'hdp-hbase',
+      configuration => $configuration['hbase-site'],
+      owner => $hbase_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hbase::params::conf_dir}/hbase-site.xml":
+      owner => $hbase_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  if has_key($configuration, 'hbase-policy') {
+    configgenerator::configfile{'hbase-policy': 
+      modulespath => $hdp-hbase::params::conf_dir,
+      filename => 'hbase-policy.xml',
+      module => 'hdp-hbase',
+      configuration => $configuration['hbase-policy'],
+      owner => $hbase_user,
+      group => $hdp::params::user_group
+    }
+  } else { # Manually overriding ownership of file installed by hadoop package
+    file { "${hdp-hbase::params::conf_dir}/hbase-policy.xml":
+      owner => $hbase_user,
+      group => $hdp::params::user_group
+    }
+  }
+
+  anchor{'hdp-hbase::begin':}
+  anchor{'hdp-hbase::end':}
+
+  if ($service_state == 'uninstalled') {
+    hdp::package { 'hbase':
+      ensure => 'uninstalled'
+    }
+    hdp::directory { $config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
+
+  } else {  
+    hdp::package { 'hbase': }
+  
+    hdp::user{ $hbase_user:
+      groups => [$hdp::params::user_group]
+    }
+ 
+    hdp::directory { $config_dir: 
+      service_state => $service_state,
+      force => true,
+      owner => $hbase_user,
+      group => $hdp::params::user_group,
+      override_owner => true
+    }
+
+   hdp-hbase::configfile { ['hbase-env.sh','log4j.properties','hadoop-metrics.properties']: 
+      type => $type
+    }
+
+    hdp-hbase::configfile { 'regionservers':}
+
+    if ($security_enabled == true) {
+      if ($type == 'master') {
+        hdp-hbase::configfile { 'hbase_master_jaas.conf' : }
+      } elsif ($type == 'regionserver') {
+        hdp-hbase::configfile { 'hbase_regionserver_jaas.conf' : }
+      } else {
+        hdp-hbase::configfile { 'hbase_client_jaas.conf' : }
+      }
+    }
+
+    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::User[$hbase_user] -> Hdp::Directory[$config_dir] -> 
+    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
+  }
+}
+
+### config files
+define hdp-hbase::configfile(
+  $mode = undef,
+  $hbase_master_hosts = undef,
+  $template_tag = undef,
+  $type = undef,
+  $conf_dir = $hdp-hbase::params::conf_dir
+) 
+{
+  if ($name == 'hadoop-metrics.properties') {
+    if ($type == 'master') {
+      $tag = GANGLIA-MASTER
+    } else {
+      $tag = GANGLIA-RS
+    }
+  } else {
+    $tag = $template_tag
+  }
+
+  hdp::configfile { "${conf_dir}/${name}":
+    component         => 'hbase',
+    owner             => $hdp-hbase::params::hbase_user,
+    mode              => $mode,
+    hbase_master_hosts => $hbase_master_hosts,
+    template_tag      => $tag
+  }
+}

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp

@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::master-conn($hbase_master_hosts)
+{
+  Hdp-Hbase::Configfile<||>{hbase_master_hosts => $hbase_master_hosts}
+}

+ 66 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp

@@ -0,0 +1,66 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::master(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hbase::params 
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp::params::service_exists['hdp-hbase::master'] = true
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+       $masterHost = $kerberos_adminclient_host[0]
+       hdp::download_keytab { 'hbase_master_service_keytab' :
+         masterhost => $masterHost,
+         keytabdst => "${$keytab_path}/hm.service.keytab",
+         keytabfile => 'hm.service.keytab',
+         owner => $hdp::params::hbase_user
+       }
+    }
+  
+    #adds package, users, directories, and common configs
+    class { 'hdp-hbase': 
+      type          => 'master',
+      service_state => $service_state
+    }
+
+    Hdp-hbase::Configfile<||>{hbase_master_hosts => $hdp::params::host_address}
+  
+    hdp-hbase::service{ 'master':
+      ensure => $service_state
+    }
+
+    #top level does not need anchors
+    Class['hdp-hbase'] -> Hdp-hbase::Service['master'] 
+    } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp-hbase::master::enable-ganglia()
+{
+  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-MASTER'}
+}
+

+ 102 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp

@@ -0,0 +1,102 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::params() inherits hdp::params 
+{
+  
+  ####### users
+  $hbase_user = $hdp::params::hbase_user
+  
+  ### hbase-env
+  $hadoop_conf_dir = hdp_default("hadoop_conf_dir")
+  $conf_dir = $hdp::params::hbase_conf_dir
+
+  $hbase_log_dir = hdp_default("hbase_log_dir","/var/log/hbase")
+
+  $hbase_master_heapsize = hdp_default("hbase_master_heapsize","1000m")
+
+  $hbase_pid_dir = hdp_default("hbase_pid_dir","/var/run/hbase")
+
+  $hbase_regionserver_heapsize = hdp_default("hbase_regionserver_heapsize","1000m")
+
+  $hbase_regionserver_xmn_size = hdp_calc_xmn_from_xms("$hbase_regionserver_heapsize","0.2","512")
+
+  ### hbase-site.xml
+  $hbase_hdfs_root_dir = hdp_default("hbase-site/hbase.hdfs.root.dir","/apps/hbase/data")
+
+  $hbase_tmp_dir = hdp_default("hbase-site/hbase.tmp.dir","$hbase_log_dir")
+
+
+  #TODO: check if any of these 'hdfs' vars need to be euated with vars in hdp-hadoop
+  $hdfs_enable_shortcircuit_read = hdp_default("hbase-site/hdfs.enable.shortcircuit.read",true)
+
+  $hdfs_enable_shortcircuit_skipchecksum = hdp_default("hbase-site/hdfs.enable.shortcircuit.skipchecksum",false)
+
+  $hdfs_support_append = hdp_default("hbase-site/hdfs.support.append",true)
+
+  $hfile_blockcache_size = hdp_default("hbase-site/hfile.blockcache.size","0.25")
+
+  $hfile_max_keyvalue_size = hdp_default("hbase-site/hfile.max.keyvalue.size",10485760)
+
+  $zookeeper_sessiontimeout = hdp_default("hbase-site/zookeeper.sessiontimeout",60000)
+
+  $client_scannercaching = hdp_default("hbase-site/client.scannercaching",100)
+
+  $hstore_blockingstorefiles = hdp_default("hbase-site/hstore.blockingstorefiles",7)
+
+  $hstore_compactionthreshold = hdp_default("hbase-site/hstore.compactionthreshold",3)
+
+  $hstorefile_maxsize = hdp_default("hbase-site/hstorefile.maxsize",1073741824)
+
+  $hregion_blockmultiplier = hdp_default("hbase-site/hregion.blockmultiplier",2)
+
+  $hregion_memstoreflushsize = hdp_default("hbase-site/hregion.memstoreflushsize",134217728)
+
+  $regionserver_handlers = hdp_default("hbase-site/regionserver.handlers", 30)
+
+  $hregion_majorcompaction = hdp_default("hbase-site/hregion.majorcompaction", 86400000)
+
+  $preloaded_mastercoprocessor_classes = hdp_default("hbase-site/preloaded.mastercoprocessor.classes")
+
+  $preloaded_regioncoprocessor_classes = hdp_default("hbase-site/preloaded.regioncoprocessor.classes")
+
+  $regionserver_memstore_lab = hdp_default("hbase-site/regionserver.memstore.lab",true)
+
+  $regionserver_memstore_lowerlimit = hdp_default("hbase-site/regionserver.memstore.lowerlimit","0.35")
+
+  $regionserver_memstore_upperlimit = hdp_default("hbase-site/regionserver.memstore.upperlimit","0.4")
+
+  $hbase_client_jaas_config_file = hdp_default("hbase_client_jaas_config_file", "${conf_dir}/hbase_client_jaas.conf")
+  $hbase_master_jaas_config_file = hdp_default("hbase_master_jaas_config_file", "${conf_dir}/hbase_master_jaas.conf")
+  $hbase_regionserver_jaas_config_file = hdp_default("hbase_regionserver_jaas_config_file", "${conf_dir}/hbase_regionserver_jaas.conf")
+
+  $hbase_master_keytab_path = hdp_default("hbase-site/hbase.master.keytab.file", "${keytab_path}/hbase.service.keytab")
+  $hbase_master_principal = hdp_default("hbase-site/hbase.master.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
+  $hbase_regionserver_keytab_path = hdp_default("hbase-site/hbase.regionserver.keytab.file", "${keytab_path}/hbase.service.keytab")
+  $hbase_regionserver_principal = hdp_default("hbase-site/hbase.regionserver.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
+
+  $hbase_primary_name = hdp_default("hbase_primary_name", "hbase")
+  $hostname = $hdp::params::hostname
+  if ($use_hostname_in_principal) {
+    $hbase_jaas_princ = "${hbase_primary_name}/${hostname}@${kerberos_domain}"
+  } else {
+    $hbase_jaas_princ = "${hbase_primary_name}@${kerberos_domain}"
+  }
+}

+ 73 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp

@@ -0,0 +1,73 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::regionserver(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hbase::params
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp::params::service_exists['hdp-hbase::regionserver'] = true       
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+       $masterHost = $kerberos_adminclient_host[0]
+       hdp::download_keytab { 'hbase_rs_service_keytab' :
+         masterhost => $masterHost,
+         keytabdst => "${$keytab_path}/rs.service.keytab",
+         keytabfile => 'rs.service.keytab',
+         owner => $hdp::params::hbase_user
+       }
+    }
+
+    if ($hdp::params::service_exists['hdp-hbase::master'] != true) {
+      #adds package, users, directories, and common configs
+      class { 'hdp-hbase': 
+        type          => 'regionserver',
+        service_state => $service_state
+      } 
+      $create_pid_dir = true
+      $create_log_dir = true
+    } else {
+      $create_pid_dir = false
+      $create_log_dir = false
+    }
+
+
+    hdp-hbase::service{ 'regionserver':
+      ensure         => $service_state,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+
+    #top level does not need anchors
+    Class['hdp-hbase'] ->  Hdp-hbase::Service['regionserver']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp-hbase::regionserver::enable-ganglia()
+{
+  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-RS'}
+}

+ 76 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp

@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hbase::service(
+  $ensure = 'running',
+  $create_pid_dir = true,
+  $create_log_dir = true,
+  $initial_wait = undef)
+{
+  include hdp-hbase::params
+
+  $role = $name
+  $user = $hdp-hbase::params::hbase_user
+
+  $conf_dir = $hdp::params::hbase_conf_dir
+  $hbase_daemon = $hdp::params::hbase_daemon_script
+  $cmd = "$hbase_daemon --config ${conf_dir}"
+  $pid_dir = $hdp-hbase::params::hbase_pid_dir
+  $pid_file = "${pid_dir}/hbase-hbase-${role}.pid"
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start ${role}'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${role}'"
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+
+  $tag = "hbase_service-${name}"
+  
+  if ($create_pid_dir == true) {
+    hdp::directory_recursive_create { $pid_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+  if ($create_log_dir == true) {
+    hdp::directory_recursive_create { $hdp-hbase::params::hbase_log_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+
+  anchor{"hdp-hbase::service::${name}::begin":} -> Hdp::Directory_recursive_create<|tag == $tag|> -> anchor{"hdp-hbase::service::${name}::end":}
+  if ($daemon_cmd != undef) { 
+    hdp::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $no_op_test,
+      initial_wait => $initial_wait
+    }
+    Hdp::Directory_recursive_create<|context_tag == 'hbase_service'|> -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hbase::service::${name}::end"]
+  }
+}

Some files were not shown because too many files changed in this diff