Parcourir la source

AMBARI-776. Puppet scripts for all the modules to install/configure the stack. (mahadev)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/AMBARI-666@1391722 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar il y a 13 ans
Parent
commit
ebdf6110c2
100 fichiers modifiés avec 6711 ajouts et 307 suppressions
  1. 3 0
      AMBARI-666-CHANGES.txt
  2. 293 48
      ambari-agent/src/main/puppet/manifestloader/site.pp
  3. 23 16
      ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
  4. 21 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
  5. 76 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
  6. 28 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
  7. 97 0
      ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
  8. 37 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
  9. 62 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
  10. 34 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
  11. 71 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
  12. 196 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
  13. 71 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
  14. 536 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
  15. 47 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
  16. 128 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
  17. 57 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
  18. 73 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
  19. 62 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
  20. 43 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
  21. 54 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
  22. 41 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
  23. 28 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
  24. 79 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
  25. 41 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp
  26. 42 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp
  27. 36 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
  28. 36 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
  29. 37 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
  30. 115 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
  31. 90 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
  32. 32 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
  33. 104 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  34. 25 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
  35. 24 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
  36. 61 0
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
  37. 265 243
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
  38. 26 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
  39. 39 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
  40. 56 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
  41. 130 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
  42. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
  43. 66 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
  44. 86 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
  45. 73 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
  46. 76 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
  47. 26 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
  48. 50 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
  49. 50 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
  50. 50 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
  51. 76 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
  52. 80 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/log4j.properties.erb
  53. 3 0
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
  54. 23 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hcatSmoke.sh
  55. 22 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hiveSmoke.sh
  56. 35 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/client.pp
  57. 54 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hcat/service_check.pp
  58. 54 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hive/service_check.pp
  59. 70 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp
  60. 46 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/mysql-connector.pp
  61. 59 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/params.pp
  62. 61 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/server.pp
  63. 65 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/service.pp
  64. 25 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hcat-env.sh.erb
  65. 53 0
      ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hive-env.sh.erb
  66. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
  67. 24 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
  68. 64 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
  69. 71 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
  70. 36 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
  71. 25 0
      ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
  72. 23 0
      ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
  73. 40 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
  74. 55 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
  75. 88 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
  76. 45 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp
  77. 62 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
  78. 60 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
  79. 74 0
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
  80. 54 0
      ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
  81. 21 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
  82. 34 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
  83. 140 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
  84. 217 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
  85. 50 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
  86. 25 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
  87. 70 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
  88. 116 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
  89. 21 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
  90. 36 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
  91. 47 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
  92. 31 0
      ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
  93. 43 0
      ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
  94. 30 0
      ambari-agent/src/main/puppet/modules/hdp-mysql/files/startMysql.sh
  95. 22 0
      ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
  96. 26 0
      ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
  97. 71 0
      ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
  98. 243 0
      ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
  99. 114 0
      ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
  100. 63 0
      ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php

+ 3 - 0
AMBARI-666-CHANGES.txt

@@ -12,6 +12,9 @@ AMBARI-666 branch (unreleased changes)
 
   NEW FEATURES
 
+  AMBARI-776. Puppet scripts for all the modules to install/configure the
+  stack. (mahadev)
+
   AMBARI-756. Heartbeat handler: Handle heartbeat timeout. (jitendra)
 
   AMBARI-772. Stylize main nav. (yusaku)

+ 293 - 48
ambari-agent/src/main/puppet/manifestloader/site.pp

@@ -1,48 +1,293 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-		
-class manifestloader () {
-    file { '/etc/puppet/agent/modules.tgz':
-      ensure => present,
-      source => "puppet:///modules/catalog/modules.tgz",  
-      mode => '0755',
-    }
-
-    exec { 'untar_modules':
-      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
-      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    } 
-
-    exec { 'puppet_apply':
-      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
-      timeout   => 1800,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      logoutput => "true"
-    }
-
-    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
-}
-
-node default {
- stage{1 :}
- class {'manifestloader': stage => 1}
-}
-
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+$hdp_hadoop_mapred_queue_acls_props => {'mapred.queue.default.acl-submit-job' => '*',
+  'mapred.queue.default.acl-administer-jobs' => '*',}
+
+$hdp_hadoop_policy_props => {'security.client.protocol.acl' => '*',
+  'security.client.datanode.protocol.acl' => '*',
+  'security.datanode.protocol.acl' => '*',
+  'security.inter.datanode.protocol.acl' => '*',
+  'security.namenode.protocol.acl' => '*',
+  'security.inter.tracker.protocol.acl' => '*',
+  'security.job.submission.protocol.acl' => '*',
+  'security.task.umbilical.protocol.acl' => '*',
+  'security.admin.operations.protocol.acl' => '',
+  'security.refresh.usertogroups.mappings.protocol.acl' => '',
+  'security.refresh.policy.protocol.acl' => '',}
+
+$hdp_hadoop_core_site_props => {'io.file.buffer.size' => '131072',
+  'io.serializations' => 'org.apache.hadoop.io.serializer.WritableSerialization',
+  'io.compression.codecs' => '',
+  'io.compression.codec.lzo.class' => 'com.hadoop.compression.lzo.LzoCodec',
+  'fs.default.name' => '',
+  'fs.trash.interval' => '360',
+  'fs.checkpoint.dir' => '',
+  'fs.checkpoint.edits.dir' => '',
+  'fs.checkpoint.period' => '21600',
+  'fs.checkpoint.size' => '536870912',
+  'ipc.client.idlethreshold' => '8000',
+  'ipc.client.connection.maxidletime' => '30000',
+  'ipc.client.connect.max.retries' => '50',
+  'webinterface.private.actions' => 'false',
+  'hadoop.security.authentication' => '',
+  'hadoop.security.authorization' => '',
+  'hadoop.security.auth_to_local' => '',}
+
+$hdp_hadoop_mapred_site_props => {'io.sort.mb' => '',
+  'io.sort.record.percent' => '.2',
+  'io.sort.spill.percent' => '',
+  'io.sort.factor' => '100',
+  'mapred.tasktracker.tasks.sleeptime-before-sigkill' => '250',
+  'mapred.job.tracker.handler.count' => '50',
+  'mapred.system.dir' => '',
+  'mapred.job.tracker' => '',
+  'mapred.job.tracker.http.address' => '',
+  'mapred.local.dir' => '',
+  'mapreduce.cluster.administrators' => ' hadoop',
+  'mapred.reduce.parallel.copies' => '30',
+  'mapred.tasktracker.map.tasks.maximum' => '',
+  'mapred.tasktracker.reduce.tasks.maximum' => '',
+  'tasktracker.http.threads' => '50',
+  'mapred.map.tasks.speculative.execution' => 'false',
+  'mapred.reduce.tasks.speculative.execution' => 'false',
+  'mapred.reduce.slowstart.completed.maps' => '0.05',
+  'mapred.inmem.merge.threshold' => '1000',
+  'mapred.job.shuffle.merge.percent' => '0.66',
+  'mapred.job.shuffle.input.buffer.percent'  => '0.7',
+  'mapred.map.output.compression.codec' => '',
+  'mapred.output.compression.type' => 'BLOCK',
+  'mapred.jobtracker.completeuserjobs.maximum' => '0',
+  'mapred.jobtracker.taskScheduler' => '',
+  'mapred.jobtracker.restart.recover' => 'false',
+  'mapred.job.reduce.input.buffer.percent' => '0.0',
+  'mapreduce.reduce.input.limit' => '10737418240',
+  'mapred.compress.map.output' => '',
+  'mapred.task.timeout' => '600000',
+  'jetty.connector' => 'org.mortbay.jetty.nio.SelectChannelConnector',
+  'mapred.task.tracker.task-controller' => '',
+  'mapred.child.root.logger' => 'INFO,TLA',
+  'mapred.child.java.opts' => '',
+  'mapred.cluster.map.memory.mb' => '',
+  'mapred.cluster.reduce.memory.mb' => '',
+  'mapred.job.map.memory.mb' => '',
+  'mapred.job.reduce.memory.mb' => '',
+  'mapred.cluster.max.map.memory.mb' => '',
+  'mapred.cluster.max.reduce.memory.mb' => '',
+  'mapred.hosts' => '',
+  'mapred.hosts.exclude' => '',
+  'mapred.max.tracker.blacklists' => '16',
+  'mapred.healthChecker.script.path' => '',
+  'mapred.healthChecker.interval' => '135000',
+  'mapred.healthChecker.script.timeout' => '60000',
+  'mapred.job.tracker.persist.jobstatus.active' => 'false',
+  'mapred.job.tracker.persist.jobstatus.hours' => '1',
+  'mapred.job.tracker.persist.jobstatus.dir' => '',
+  'mapred.jobtracker.retirejob.check' => '10000',
+  'mapred.jobtracker.retirejob.interval' => '0',
+  'mapred.job.tracker.history.completed.location' => '/mapred/history/done',
+  'mapred.task.maxvmem' => '',
+  'mapred.jobtracker.maxtasks.per.job' => '',
+  'mapreduce.fileoutputcommitter.marksuccessfuljobs' => 'false',
+  'mapred.userlog.retain.hours' => '',
+  'mapred.job.reuse.jvm.num.tasks' => '1',
+  'mapreduce.jobtracker.kerberos.principal' => '',
+  'mapreduce.tasktracker.kerberos.principal' => '',
+  'hadoop.job.history.user.location' => 'none',
+  'mapreduce.jobtracker.keytab.file' => '',
+  'mapreduce.tasktracker.keytab.file' => '',
+  'mapreduce.jobtracker.staging.root.dir' => '/user',
+  'mapreduce.tasktracker.group' => 'hadoop',
+  'mapreduce.jobtracker.split.metainfo.maxsize' => '50000000',
+  'mapreduce.history.server.embedded' => 'false',
+  'mapreduce.history.server.http.address' => '',
+  'mapreduce.jobhistory.kerberos.principal' => '',
+  'mapreduce.jobhistory.keytab.file' => '',
+  'mapred.jobtracker.blacklist.fault-timeout-window' => '180',
+  'mapred.jobtracker.blacklist.fault-bucket-width' => '15',
+  'mapred.queue.names' => 'default',}
+
+$hdp_hadoop_capacity_scheduler_props => {'mapred.capacity-scheduler.queue.default.capacity' => '100',
+  'mapred.capacity-scheduler.queue.default.supports-priority' => 'false',
+  'mapred.capacity-scheduler.queue.default.minimum-user-limit-percent' => '100',
+  'mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user' => '25',}
+
+$hdp_hadoop_hdfs_site_props => {'dfs.name.dir' => '',
+  'dfs.support.append' => '',
+  'dfs.webhdfs.enabled' => '',
+  'dfs.datanode.failed.volumes.tolerated' => '',
+  'dfs.block.local-path-access.user' => '',
+  'dfs.data.dir' => '',
+  'dfs.hosts.exclude' => '',
+  'dfs.hosts' => '',
+  'dfs.replication.max' => '50',
+  'dfs.replication' => '',
+  'dfs.heartbeat.interval' => '3',
+  'dfs.safemode.threshold.pct' => '1.0f',
+  'dfs.balance.bandwidthPerSec' => '6250000',
+  'dfs.datanode.address' => '',
+  'dfs.datanode.http.address' => '',
+  'dfs.block.size' => '134217728',
+  'dfs.http.address' => '',
+  'dfs.datanode.du.reserved' => '',
+  'dfs.datanode.ipc.address' => '0.0.0.0:8010',
+  'dfs.blockreport.initialDelay' => '120',
+  'dfs.datanode.du.pct' => '0.85f',
+  'dfs.namenode.handler.count' => '40',
+  'dfs.datanode.max.xcievers' => '1024',
+  'dfs.umaskmode' => '077',
+  'dfs.web.ugi' => 'gopher,gopher',
+  'dfs.permissions' => 'true',
+  'dfs.permissions.supergroup' => 'hdfs',
+  'dfs.namenode.handler.count' => '100',
+  'ipc.server.max.response.size' => '5242880',
+  'dfs.block.access.token.enable' => 'true',
+  'dfs.namenode.kerberos.principal' => '',
+  'dfs.secondary.namenode.kerberos.principal' => '',
+  'dfs.namenode.kerberos.https.principal' => '',
+  'dfs.secondary.namenode.kerberos.https.principal' => '',
+  'dfs.secondary.http.address' => '',
+  'dfs.secondary.https.port' => '50490',
+  'dfs.web.authentication.kerberos.principal' => '',
+  'dfs.web.authentication.kerberos.keytab' => '',
+  'dfs.datanode.kerberos.principal' => '',
+  'dfs.namenode.keytab.file' => '',
+  'dfs.secondary.namenode.keytab.file' => '',
+  'dfs.datanode.keytab.file' => '',
+  'dfs.https.port' => '50470',
+  'dfs.https.address' => '',
+  'dfs.datanode.data.dir.perm' => '',
+  'dfs.access.time.precision' => '0',
+  'dfs.cluster.administrators' => ' hdfs',
+  'ipc.server.read.threadpool.size' => '5',
+  'dfs.namenode.kerberos.internal.spnego.principal' => '',
+  'dfs.secondary.namenode.kerberos.internal.spnego.principal' => '',}
+
+$hdp_hcat_old_hive_site_props => {'hive.metastore.local' => 'false',
+  'javax.jdo.option.ConnectionURL' => '',
+  'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
+  'javax.jdo.option.ConnectionUserName' => '',
+  'javax.jdo.option.ConnectionPassword' => '',
+  'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
+  'hive.metastore.sasl.enabled' => '',
+  'hive.metastore.kerberos.keytab.file'  => '',
+  'hive.metastore.kerberos.principal' => '',
+  'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
+  'hive.metastore.uris' => '',
+  'hive.semantic.analyzer.factory.impl' => 'org.apache.hcatalog.cli.HCatSemanticAnalyzerFactory',
+  'hadoop.clientside.fs.operations',
+  'hive.metastore.client.socket.timeout' => '60',
+  'hive.metastore.execute.setugi' => 'true',}
+
+$hdp_hive_hive_site_props => {'hive.metastore.local' => 'false',
+  'javax.jdo.option.ConnectionURL' => '',
+  'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
+  'javax.jdo.option.ConnectionUserName' => '',
+  'javax.jdo.option.ConnectionPassword' => '',
+  'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
+  'hive.metastore.sasl.enabled' => '',
+  'hive.metastore.kerberos.keytab.file' => '',
+  'hive.metastore.kerberos.principal' => '',
+  'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
+  'hive.metastore.uris' => '',
+  'hive.semantic.analyzer.factory.impl' => 'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory',
+  'hadoop.clientside.fs.operations' => 'true',
+  'hive.metastore.client.socket.timeout' => '60',
+  'hive.metastore.execute.setugi' => 'true',
+  'hive.security.authorization.enabled' => 'true',
+  'hive.security.authorization.manager' => 'org.apache.hcatalog.security.HdfsAuthorizationProvider',}
+		
+$hdp_oozie_oozie_site_props => {'oozie.base.url' => '',
+  'oozie.system.id' => '',
+  'oozie.systemmode' => 'NORMAL',
+  'oozie.service.AuthorizationService.security.enabled' => 'true',
+  'oozie.service.PurgeService.older.than' => '30',
+  'oozie.service.PurgeService.purge.interval' => '3600',
+  'oozie.service.CallableQueueService.queue.size' => '1000',
+  'oozie.service.CallableQueueService.threads' => '10',
+  'oozie.service.CallableQueueService.callable.concurrency' => '3',
+  'oozie.service.coord.normal.default.timeout' => '120',
+  'oozie.db.schema.name' => 'oozie',
+  'oozie.service.StoreService.create.db.schema' => 'true',
+  'oozie.service.StoreService.jdbc.driver' => 'org.apache.derby.jdbc.EmbeddedDriver',
+  'oozie.service.StoreService.jdbc.url' => '',
+  'oozie.service.StoreService.jdbc.username' => 'sa',
+  'oozie.service.StoreService.jdbc.password' => ' ',
+  'oozie.service.StoreService.pool.max.active.conn' => '10',
+  'oozie.service.HadoopAccessorService.kerberos.enabled' => '',
+  'local.realm' => '',
+  'oozie.service.HadoopAccessorService.keytab.file' => '',
+  'oozie.service.HadoopAccessorService.kerberos.principal' => '',
+  'oozie.service.HadoopAccessorService.jobTracker.whitelist' => ' ',
+  'oozie.authentication.type' => '',
+  'oozie.authentication.kerberos.principal' => '',
+  'oozie.authentication.kerberos.keytab' => '',
+  'oozie.service.HadoopAccessorService.nameNode.whitelist' => ' ',
+  'oozie.service.WorkflowAppService.system.libpath' => '',
+  'use.system.libpath.for.mapreduce.and.pig.jobs' => 'false',
+  'oozie.authentication.kerberos.name.rules' => '',}
+
+$hdp_templeton_templeton_site_props => {'templeton.port' => '50111',
+  'templeton.hadoop.conf.dir' => '',
+  'templeton.jar' => '',
+  'templeton.libjars' => '',
+  'templeton.hadoop' => '',
+  'templeton.pig.archive' => '',
+  'templeton.pig.path' => '',
+  'templeton.hcat' => '',
+  'templeton.hive.archive' => '',
+  'templeton.hive.path' => '',
+  'templeton.hive.properties' => '',
+  'templeton.zookeeper.hosts' => '',
+  'templeton.storage.class' => 'org.apache.hcatalog.templeton.tool.ZooKeeperStorage',
+  'templeton.override.enabled' => 'false',
+  'templeton.streaming.jar' => 'hdfs:///apps/templeton/hadoop-streaming.jar',
+  'templeton.kerberos.principal' => '',
+  'templeton.kerberos.keytab' => '',
+  'templeton.kerberos.secret' => 'secret',}
+    
+class manifestloader () {
+    file { '/etc/puppet/agent/modules.tgz':
+      ensure => present,
+      source => "puppet:///modules/catalog/modules.tgz",  
+      mode => '0755',
+    }
+
+    exec { 'untar_modules':
+      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
+      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    } 
+
+    exec { 'puppet_apply':
+      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
+      timeout   => 1800,
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true"
+    }
+
+    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
+}
+
+node default {
+ stage{1 :}
+ class {'manifestloader': stage => 1}
+}
+

+ 23 - 16
ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp

@@ -34,21 +34,28 @@
 #   </property>
 # </configuration>
 #
+# Params:
+# - configname - name of the config file (class title by default)
+# - modulespath - modules path ('/etc/puppet/modules' by default)
+# - module - module name
+# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
+#
+# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
+#
 
-define configgenerator::configfile ($configname=$title, $module, $properties) {
-    $configcontent => inline_template( "
-	  <configuration>
-	  <% properties.each do |key,value| -%>
-     <property>
-	  <name><%=key %></name><value><%=value %></value>
-     </property>
-	  <% end -%>
-	  </configuration>
-	")
+define configgenerator::configfile ($configname=$title, $modulespath='/etc/puppet/modules', $module, $properties) {
+  $configcontent = inline_template('<configuration>
+  <% properties.each do |key,value| -%>
+  <property>
+    <name><%=key %></name>
+    <value><%=value %></value>
+  </property>
+  <% end -%>
+</configuration>')
  
-	file {'config':
-       ensure  => present,
-       content => $configcontent,
-		path => "/etc/puppet/agent/modules/${module}/templates/${configname}",
-     }
-	) 
+file {'config':
+  ensure  => present,
+  content => $configcontent,
+  path => "${modulespath}/${module}/templates/${configname}",
+}
+} 

+ 21 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp

@@ -0,0 +1,21 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard::dashboard::service_check(){}

+ 76 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp

@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard(
+  $service_state = $hdp::params::cluster_client_state,
+  $opts = {}
+) inherits hdp-dashboard::params
+{
+   if ($service_state == 'no_op') {
+   } elsif ($service_state == 'uninstalled') {
+    hdp::package { 'dashboard' :
+      ensure => 'uninstalled',
+      java_needed => 'false',
+      size   => 64
+    }
+    hdp::directory_recursive_create { $conf_dir :
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir]
+
+   } elsif ($service_state in ['running','installed_and_configured','stopped']) {
+      hdp::package { 'dashboard' :
+        java_needed => 'false',
+        size => 64
+       }
+     $conf_dir =  $hdp-dashboard::params::conf_dir
+  
+     hdp::directory_recursive_create { $conf_dir :
+       service_state => $service_state,
+       force => true
+     }
+ 
+     hdp-dashboard::configfile { 'cluster_configuration.json' : }
+     Hdp-Dashboard::Configfile<||>{dashboard_host => $hdp::params::host_address}
+  
+     #top level does not need anchors
+     Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir] -> Hdp-Dashboard::Configfile<||> 
+    } else {
+     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+   }
+}
+
+###config file helper
+define hdp-dashboard::configfile(
+  $dashboard_host = undef
+)
+{
+  
+  hdp::configfile { "${hdp-dashboard::params::conf_dir}/${name}":
+    component      => 'dashboard',
+    owner          => root,
+    group          => root,
+    dashboard_host => $dashboard_host
+  }
+}
+
+

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp

@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-dashboard::params()
+{
+  
+  $conf_dir = "/usr/share/hdp/dashboard/dataServices/conf/" #cannot change since hard coded in rpm
+
+  $hdp_cluster_name = hdp_default("hadoop/cluster_configuration/hdp_cluster_name")
+  $scheduler_name = hdp_default("hadoop/cluster_configuration/scheduler_name")
+}

+ 97 - 0
ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb

@@ -0,0 +1,97 @@
+{
+  "config_version": 1,
+  "stack_version": "1.0.2",
+  "overall": {
+    "cluster_name": "<%=scope.function_hdp_template_var("hdp_cluster_name")%>",
+    "dashboard_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
+    "dashboard_port": 80,
+    "dataservices_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
+    "dataservices_port": 80,
+    "ganglia" : {
+      "web_host": "<%=scope.function_hdp_host("public_ganglia_server_host")%>",
+      "web_port": 80,
+      "web_root": "/ganglia/?t=yes",
+      "grid_name": "HDP_GRID"
+    },
+    "nagios": {
+      "nagiosserver_host": "<%=scope.function_hdp_host("public_nagios_server_host")%>",
+      "nagiosserver_port": 80,
+      "web_root": "/nagios"
+    },
+    "jmx": {
+      "timeout": 3
+    },
+    "services": {
+	  "HDFS" : [
+        {
+          "installed": true,
+          "name": "HDFS",
+          "namenode_host": "<%=scope.function_hdp_host("public_namenode_host")%>",
+          "namenode_port": 50070,
+          "snamenode_host": "<%=scope.function_hdp_host("public_snamenode_host")%>",
+          "snamenode_port": 50090,
+          "total_datanodes": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "namenode": "HDPNameNode"
+          }
+        }
+      ],
+      "MAPREDUCE" : [
+        {
+          "installed": true,
+          "name": "MAPREDUCE",
+          "jobtracker_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
+          "jobtracker_port": 50030,
+          "total_tasktrackers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "jobhistory_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
+          "jobhistory_port": 51111,
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "jobtracker": "HDPJobTracker"
+          },
+          "scheduler_type": "<%=scope.function_hdp_template_var("scheduler_name")%>"
+        }
+      ],
+      "HBASE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_hbase_master_host")%>,
+          "name": "HBASE",
+          "hbasemaster_host": "<%=scope.function_hdp_host("public_hbase_master_host")%>",
+          "hbasemaster_port": 60010,
+          "total_regionservers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
+          "ganglia_clusters": {
+            "slaves": "HDPSlaves",
+            "hbasemaster": "HDPHBaseMaster"
+          }
+        }
+      ],
+      "ZOOKEEPER" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_zookeeper_hosts")%>,
+          "name": "ZOOKEEPER"
+        }
+      ],
+      "HIVE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_hive_server_host")%>,
+          "name": "HIVE"
+        }
+      ],
+      "TEMPLETON" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_templeton_server_host")%>,
+          "name": "TEMPLETON"
+        }
+      ],
+      "OOZIE" : [
+        {
+          "installed": <%=not scope.function_hdp_no_hosts("public_oozie_server")%>,
+          "name": "OOZIE",
+          "oozie_host": "<%=scope.function_hdp_host("public_oozie_server")%>",
+          "oozie_port": 11000
+        }
+      ]
+    }
+  }
+}

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh

@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

+ 34 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh

@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init

@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

+ 196 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh

@@ -0,0 +1,196 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init

@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

+ 536 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh

@@ -0,0 +1,536 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  metric {
+    name = "mem_total"
+    title = "Memory Total"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+/* Different than 2.5.x default since the old config made no sense */
+collection_group {
+  collect_every = 1800
+  time_threshold = 3600
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+}
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

+ 47 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh

@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

+ 128 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh

@@ -0,0 +1,128 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+
+while getopts ":c:mt" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

+ 57 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh

@@ -0,0 +1,57 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        gmetadRunningPid=`getGmetadRunningPid`;
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

+ 73 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh

@@ -0,0 +1,73 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+  
+        gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh

@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b /var/lib/ganglia/rrds -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

+ 43 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh

@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

+ 54 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh

@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -HUP ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

+ 41 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh

@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

+ 28 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh

@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

+ 79 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp

@@ -0,0 +1,79 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::config(
+  $ganglia_server_host = undef,
+  $service_state = $hdp::params::cluster_service_state
+)
+{
+ if ($service_state in ['running','installed_and_configured','stopped']) {
+    #TODO: divide into what is needed on server vs what is needed on monitored nodes
+    $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
+
+    hdp::directory_recursive_create { $shell_cmds_dir :
+      owner => root,
+      group => root
+    } 
+
+     hdp-ganglia::config::init_file { ['gmetad','gmond']: }
+
+     hdp-ganglia::config::shell_file { $shell_files: }                       
+
+     hdp-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
+       ganglia_server_host => $ganglia_server_host
+     }
+ 
+     anchor{'hdp-ganglia::config::begin':} -> Hdp::Directory_recursive_create[$shell_cmds_dir] -> Hdp-ganglia::Config::Shell_file<||> -> anchor{'hdp-ganglia::config::end':}
+     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::Init_file<||> -> Anchor['hdp-ganglia::config::end']
+     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::File<||> -> Anchor['hdp-ganglia::config::end']
+  }
+}
+
+define hdp-ganglia::config::shell_file()
+{
+  file { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    source => "puppet:///modules/hdp-ganglia/${name}", 
+    mode => '0755'
+  }
+}
+
+define hdp-ganglia::config::init_file()
+{
+  file { "/etc/init.d/hdp-${name}":
+    source => "puppet:///modules/hdp-ganglia/${name}.init", 
+    mode => '0755'
+  }
+}
+
+### config files
+define hdp-ganglia::config::file(
+  $ganglia_server_host = undef
+)
+{
+  hdp::configfile { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    component           => 'ganglia',
+    owner               => root,
+    group               => root
+  }
+  if ($ganglia_server_host != undef) {
+    Hdp::Configfile<||>{ganglia_server_host => $ganglia_server_host}
+  }
+}

+ 41 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp

@@ -0,0 +1,41 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp-ganglia::config::generate_monitor(
+  $ganglia_service,
+  $role
+)
+{
+  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}

+ 42 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp

@@ -0,0 +1,42 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp-ganglia::config::generate_server(
+  $ganglia_service,
+  $role
+)
+{
+  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::hdp-gmetad::service_check() 
+{
+  
+  anchor { 'hdp-ganglia::hdp-gmetad::service_check::begin':}
+
+  exec { 'hdp-gmetad':
+    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp-ganglia::hdp-gmetad::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp-ganglia::hdp-gmetad::service_check::end':}
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::hdp-gmond::service_check() 
+{
+  
+  anchor { 'hdp-ganglia::hdp-gmond::service_check::begin':}
+
+  exec { 'hdp-gmond':
+    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp-ganglia::hdp-gmond::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp-ganglia::hdp-gmond::service_check::end':}
+}

+ 37 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp

@@ -0,0 +1,37 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia(
+  $service_state
+)
+{
+  if (($service_state != 'no_op') or ($service_state != 'uninstalled')) {
+    include hdp-ganglia::params
+    $gmetad_user = $hdp-ganglia::params::gmetad_user
+    $gmond_user = $hdp-ganglia::params::gmond_user
+  
+    user { $gmond_user : shell => '/bin/bash'} #provision for nobody user
+    if ( $gmetad_user != $gmond_user) {
+      user { $gmetad_user : shell => '/bin/bash'} #provision for nobody user
+    }
+    anchor{'hdp-ganglia::begin':} -> User<|title == $gmond_user or title == $gmetad_user|> ->  anchor{'hdp-ganglia::end':}
+  }
+}
+

+ 115 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp

@@ -0,0 +1,115 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::monitor(
+  $service_state = $hdp::params::cluster_service_state,
+  $ganglia_server_host = undef,
+  $opts = {}
+) inherits hdp-ganglia::params
+{
+  if  ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {     
+
+   hdp::package { 'ganglia-monitor':         
+       ensure      => 'uninstalled', 
+      java_needed => false      
+   }
+
+  } else {
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      class { 'hdp-ganglia':
+       service_state => $service_state
+      }
+    }
+
+    hdp::package { 'ganglia-monitor': }
+
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
+    }
+
+#    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
+#     class { 'hdp-hadoop::enable-ganglia': }
+#   }
+
+    if ($service_exists['hdp-hbase::master'] == true) {
+      class { 'hdp-hbase::master::enable-ganglia': }
+    }
+  
+    if ($service_exists['hdp-hbase::regionserver'] == true) {
+      class { 'hdp-hbase::regionserver::enable-ganglia': }
+    }
+
+    class { 'hdp-ganglia::monitor::config-gen': }
+  
+    class { 'hdp-ganglia::monitor::gmond': ensure => $service_state}
+
+    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
+      Class['hdp-ganglia'] -> Hdp::Package['ganglia-monitor'] -> Class['hdp-ganglia::config'] -> 
+      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond']
+    } else {
+      Hdp::Package['ganglia-monitor'] ->  Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond']
+    }
+  }
+}
+
+
+class hdp-ganglia::monitor::config-gen()
+{
+
+  $service_exists = $hdp::params::service_exists
+
+  if ($service_exists['hdp-hadoop::namenode'] == true) {
+    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
+  }
+  if ($service_exists['hdp-hadoop::jobtracker'] == true){
+    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
+  }
+  if ($service_exists['hdp-hbase::master'] == true) {
+    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+  }
+  if ($service_exists['hdp-hadoop::datanode'] == true) {
+    hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
+  }
+  Hdp-ganglia::Config::Generate_monitor<||>{
+    ganglia_service => 'gmond',
+    role => 'monitor'
+  }
+   # 
+  anchor{'hdp-ganglia::monitor::config-gen::begin':} -> Hdp-ganglia::Config::Generate_monitor<||> -> anchor{'hdp-ganglia::monitor::config-gen::end':}
+}
+
+class hdp-ganglia::monitor::gmond(
+  $ensure
+  )
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmond service" :
+      command => "$command",
+      unless => "/bin/ps auwx | /bin/grep [g]mond",
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}

+ 90 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp

@@ -0,0 +1,90 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::monitor_and_server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-ganglia::params
+{
+  $ganglia_shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
+  $ganglia_conf_dir = $hdp-ganglia::params::ganglia_conf_dir
+  $ganglia_runtime_dir = $hdp-ganglia::params::ganglia_runtime_dir
+
+  #note: includes the common package ganglia-monitor
+  class { 'hdp-ganglia':
+    service_state => $service_state
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['uninstalled']) {
+    class { 'hdp-ganglia::server::packages':
+      ensure => 'uninstalled'
+      }
+
+    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
+      service_state => $service_state,
+      force => true
+    }
+    
+    class { 'hdp-ganglia::config':
+      service_state => $service_state
+    }
+
+    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> 
+      Hdp::Directory[$ganglia_conf_dir] -> Hdp::Directory[$ganglia_runtime_dir] ->
+      Class['hdp-ganglia::config']
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    class { 'hdp-ganglia::server::packages': }
+
+    class { 'hdp-ganglia::config': 
+     ganglia_server_host => $hdp::params::host_address,
+     service_state       => $service_state
+     }
+
+    class {'hdp-ganglia::monitor::config-gen': }      
+
+    class {'hdp-ganglia::server::config-gen': }      
+    
+    hdp-ganglia::config::generate_server { 'gmetad':
+      ganglia_service => 'gmetad'
+    }
+
+    class { 'hdp-ganglia::service::gmond': 
+      ensure => $service_state
+    }
+
+    class { 'hdp-ganglia::server::services' : 
+      service_state => $service_state,
+      monitor_and_server_single_node => true
+    }
+
+    class { 'hdp-ganglia::service::change_permission':
+      ensure => $service_state
+    }
+
+    #top level no anchors needed
+    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
+      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::server::config-gen'] -> Hdp-ganglia::Config::Generate_server['gmetad'] ->
+      Class['hdp-ganglia::service::gmond'] -> Class['hdp-ganglia::server::services'] ->
+      Class['hdp-ganglia::service::change_permission']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 32 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp

@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::params() inherits hdp::params
+{
+  $ganglia_conf_dir = "/etc/ganglia/hdp"
+  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
+
+  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
+  
+  $gmetad_user = $hdp::params::gmetad_user
+  $gmond_user = $hdp::params::gmond_user
+
+  $webserver_group = hdp_default("hadoop/gangliaEnv/webserver_group","apache")
+}

+ 104 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -0,0 +1,104 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-ganglia::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-ganglia::params
+{
+  $hdp::params::service_exists['hdp-ganglia::server'] = true
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+
+   class { 'hdp-ganglia::server::packages':
+      ensure => 'uninstalled'
+   }
+
+  } else {
+  class { 'hdp-ganglia':
+    service_state => $service_state
+  }
+
+  class { 'hdp-ganglia::server::packages': }
+
+  class { 'hdp-ganglia::config': 
+    ganglia_server_host => $hdp::params::host_address,
+    service_state       => $service_state 
+  }
+
+  hdp-ganglia::config::generate_server { ['HDPHBaseMaster','HDPJobTracker','HDPNameNode','HDPSlaves']:
+    ganglia_service => 'gmond',
+    role => 'server'
+  }
+  hdp-ganglia::config::generate_server { 'gmetad':
+    ganglia_service => 'gmetad',
+    role => 'server'
+  }
+
+  class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
+
+  class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
+
+  #top level does not need anchors
+  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
+    Hdp-ganglia::Config::Generate_server<||> -> Class['hdp-ganglia::server::gmetad'] -> Class['hdp-ganglia::service::change_permission']
+ }
+}
+
+class hdp-ganglia::server::packages(
+  $ensure = present 
+)
+{
+  hdp::package { ['ganglia-server','ganglia-gweb','ganglia-hdp-gweb-addons']: 
+    ensure      => $ensure,
+    java_needed => false  
+  } 
+}
+
+
+class hdp-ganglia::service::change_permission(
+  $ensure
+)
+{
+  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
+    hdp::directory_recursive_create { '/var/lib/ganglia/dwoo' :
+      mode => '0777'
+      }
+  }
+}
+
+class hdp-ganglia::server::gmetad(
+  $ensure
+)
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmetad service" :
+      command => "$command",
+      unless => "/bin/ps auwx | /bin/grep [g]metad",
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb

@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+    HDPSlaves       		<%=scope.function_hdp_host("ganglia_server_host")%>  8660
+    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
+    HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
+    HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb

@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
+GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
+WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;

+ 61 - 0
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb

@@ -0,0 +1,61 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
+GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

+ 265 - 243
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp

@@ -1,243 +1,265 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_states = []
-)
-{
-  class { 'hdp-hadoop':
-    service_states => $service_states    
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-}
-
-class hdp-hadoop(
-  $service_states  = []
-)
-{
-  include configgenerator
-
-  configgenerator::configfile('hdfs-site.xml': 
-    module => 'hdp-hadoop',
-    properties => {'dfs.name.dir' => '<%=scope.function_hdp_template_var("dfs_name_dir")%>',
-      'dfs.support.append' => '<%=scope.function_hdp_template_var("dfs_support_append")%>',
-      'dfs.webhdfs.enabled' => '<%=scope.function_hdp_template_var("dfs_webhdfs_enabled")%>',
-      'dfs.datanode.failed.volumes.tolerated' => '<%=scope.function_hdp_template_var("dfs_datanode_failed_volume_tolerated")%>',
-      'dfs.block.local-path-access.user' => '<%=scope.function_hdp_template_var("dfs_block_local_path_access_user")%>',
-      'dfs.data.dir' => '<%=scope.function_hdp_template_var("dfs_data_dir")%>'},)
-  
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user  
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ('uninstalled' in $service_states) {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
- 
-    hdp::user{ $hdfs_user:}
-    hdp::user { $mapred_user:}
-
-    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp::params::hadoop_user_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0400'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = ['hadoop-env.sh','core-site.xml','hadoop-policy.xml','health_check','capacity-scheduler.xml','commons-logging.properties','log4j.properties','mapred-queue-acls.xml','slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    hdp-hadoop::configfile { 'mapred-site.xml': 
-      tag => 'common', 
-      owner => $mapred_user
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
-    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    #TODO: may figure out so dont need to call kinit if auth in caceh already
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
-      $principal = $hdfs_user
-    } else {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
-      $principal = $user
-    }
-    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
-  } else {
-    $kinit_if_needed = ""
-  }
- 
-  if ($echo_yes == true) {
-    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
-  } else {
-    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput
-  }
-}
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
+define hdp-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
+}
+
+class hdp-hadoop::initialize()
+{
+  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
+  } else {
+    $hdp::params::component_exists['hdp-hadoop'] = true
+  }
+  hdp-hadoop::common { 'common':}
+  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
+
+#Configs generation  
+  include manifestloader
+
+  configgenerator::configfile{'hdfs-site.xml': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_hdfs_site_props
+  }
+
+  configgenerator::configfile{'capacity-scheduler.xml': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_capacity_scheduler_props
+  }
+
+  configgenerator::configfile{'mapred-site.xml': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_mapred_site_props
+  }
+      
+  configgenerator::configfile{'core-site.xml': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_core_site_props
+  }
+      
+  configgenerator::configfile{'hadoop-policy.xml': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_policy_props
+  }
+      
+  configgenerator::configfile{'mapred-queue-acls.xml.erb': 
+    module => 'hdp-hadoop',
+    properties => $manifestloader::hdp_hadoop_mapred_queue_acls_props
+  }
+}
+
+class hdp-hadoop(
+  $service_states  = []
+)
+{
+  include hdp-hadoop::params
+  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
+  $mapred_user = $hdp-hadoop::params::mapred_user  
+  $hdfs_user = $hdp-hadoop::params::hdfs_user  
+
+  anchor{'hdp-hadoop::begin':} 
+  anchor{'hdp-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
+  } else {
+    
+    hdp-hadoop::package { 'hadoop':}
+
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+ 
+    hdp::user{ $hdfs_user:}
+    hdp::user { $mapred_user:}
+
+    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
+    hdp::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
+    hdp::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    #taskcontroller.cfg properties conditional on security
+    if ($hdp::params::security_enabled == true) {
+      file { "${hdp::params::hadoop_bin}/task-controller":
+        owner   => 'root',
+        group   => $hdp::params::hadoop_user_group,
+        mode    => '6050',
+        require => Hdp-hadoop::Package['hadoop'],
+        before  => Anchor['hdp-hadoop::end']
+      }
+      $tc_owner = 'root'
+      $tc_mode = '0400'
+    } else {
+      $tc_owner = $hdfs_user
+      $tc_mode = undef
+    }
+    hdp-hadoop::configfile { 'taskcontroller.cfg' :
+      tag   => 'common',
+      owner => $tc_owner,
+      mode  => $tc_mode
+    }
+
+    $template_files = ['hadoop-env.sh','core-site.xml','hadoop-policy.xml','health_check','capacity-scheduler.xml','commons-logging.properties','log4j.properties','mapred-queue-acls.xml','slaves']
+    hdp-hadoop::configfile { $template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
+      tag   => 'common', 
+      owner => $hdfs_user,
+    }
+
+    hdp-hadoop::configfile { 'mapred-site.xml': 
+      tag => 'common', 
+      owner => $mapred_user
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
+    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
+  }
+}
+
+class hdp-hadoop::enable-ganglia()
+{
+  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $jtnode_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($jtnode_host == undef) {
+    $calc_jtnode_host = $namenode_host
+  } else {
+    $calc_jtnode_host = $jtnode_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    jtnode_host    => $calc_jtnode_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $kinit_override = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp-hadoop::params
+  $security_enabled = $hdp::params::security_enabled
+  $conf_dir = $hdp-hadoop::params::conf_dir
+  $hdfs_user = $hdp-hadoop::params::hdfs_user
+
+  if ($user == undef) {
+    $run_user = $hdfs_user
+  } else {
+    $run_user = $user
+  }
+
+  if (($security_enabled == true) and ($kinit_override == false)) {
+    #TODO: may figure out so dont need to call kinit if auth in caceh already
+    if ($run_user in [$hdfs_user,'root']) {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
+      $principal = $hdfs_user
+    } else {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
+      $principal = $user
+    }
+    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
+  } else {
+    $kinit_if_needed = ""
+  }
+ 
+  if ($echo_yes == true) {
+    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
+  }
+
+  hdp::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}

+ 26 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh

@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'usertable'
+drop 'usertable'
+create 'usertable','family'
+put 'usertable','row01','family:col01','value1'
+scan 'usertable'
+exit

+ 39 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp

@@ -0,0 +1,39 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::client(
+  $service_state = $hdp::params::cluster_client_state,
+  $opts = {}
+)
+{
+  #assumption is there are no other hbase components on node
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
+      #adds package, users, directories, and common configs
+      class { 'hdp-hbase': 
+        type          => 'client',
+        service_state => $service_state
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 56 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp

@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::hbase::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+
+  $output_file = "/apps/hbase/data/usertable"
+  $conf_dir = $hdp::params::hbase_conf_dir
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hbase::hbase::service_check::begin':}
+
+  file { '/tmp/hbaseSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hbase/hbaseSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hbaseSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell /tmp/hbaseSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hbaseSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hbaseSmoke.sh'],
+    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
+  }
+  
+  anchor{ 'hdp-hbase::hbase::service_check::end':}
+}

+ 130 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp

@@ -0,0 +1,130 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase(
+  $type,
+  $service_state) 
+{
+  include hdp-hbase::params
+ 
+  $hbase_user = $hdp-hbase::params::hbase_user
+  $config_dir = $hdp-hbase::params::conf_dir
+  
+  $hdp::params::component_exists['hdp-hbase'] = true
+
+  #Configs generation  
+  configgenerator::configfile{'hbase-site.xml.erb': 
+    module => 'hdp-hbase',
+    properties => {'hbase.rootdir' => 'hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020<%=scope.function_hdp_template_var("hbase_hdfs_root_dir")%>',
+      'hbase.cluster.distributed' => 'true',
+      'hbase.tmp.dir' => '<%=scope.function_hdp_template_var("hbase_tmp_dir")%>',
+      'hbase.master.info.bindAddress' => '<%=scope.function_hdp_host("hbase_master_host")%>',
+      'hbase.regionserver.global.memstore.upperLimit' => '<%=scope.function_hdp_template_var("regionserver_memstore_upperlimit")%>',
+      'hbase.regionserver.handler.count' => '<%=scope.function_hdp_template_var("regionserver_handlers")%>',
+      'hbase.hregion.majorcompaction' => '<%=scope.function_hdp_template_var("hregion_majorcompaction")%>',
+      'hbase.regionserver.global.memstore.lowerLimit' => '<%=scope.function_hdp_template_var("regionserver_memstore_lowerlimit")%>',
+      'hbase.hregion.memstore.block.multiplier' => '<%=scope.function_hdp_template_var("hregion_blockmultiplier")%>',
+      'hbase.hregion.memstore.flush.size' => '<%=scope.function_hdp_template_var("hregion_memstoreflushsize")%>',
+      'hbase.hregion.memstore.mslab.enabled' => '<%=scope.function_hdp_template_var("regionserver_memstore_lab")%>',
+      'hbase.hregion.max.filesize' => '<%=scope.function_hdp_template_var("hstorefile_maxsize")%>',
+      'hbase.client.scanner.caching' => '<%=scope.function_hdp_template_var("client_scannercaching")%>',
+      'zookeeper.session.timeout' => '<%=scope.function_hdp_template_var("zookeeper_sessiontimeout")%>',
+      'hbase.client.keyvalue.maxsize' => '<%=scope.function_hdp_template_var("hfile_max_keyvalue_size")%>',
+      'hbase.hstore.compactionThreshold' => '<%=scope.function_hdp_template_var("hstore_compactionthreshold")%>',
+      'hbase.hstore.blockingStoreFiles' => '<%=scope.function_hdp_template_var("hstore_blockingstorefiles")%>',
+      'hfile.block.cache.size' => '<%=scope.function_hdp_template_var("hfile_blockcache_size")%>',
+      'hbase.master.keytab.file' => '<%=scope.function_hdp_template_var("keytab_path")%>/hm.service.keytab',
+      'hbase.master.kerberos.principal' => 'hm/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%>',
+      'hbase.regionserver.keytab.file' => '<%=scope.function_hdp_template_var("keytab_path")%>/rs.service.keytab',
+      'hbase.regionserver.kerberos.principal' => 'rs/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%>',
+      'hbase.superuser' => 'hbase',
+      'hbase.coprocessor.region.classes' => '<%=scope.function_hdp_template_var("preloaded_regioncoprocessor_classes")%>',
+      'hbase.coprocessor.master.classes' => '<%=scope.function_hdp_template_var("preloaded_mastercoprocessor_classes")%>',
+      'hbase.zookeeper.quorum' => '<%=zkh=scope.function_hdp_host("zookeeper_hosts");scope.function_hdp_is_empty(zkh) ? "" : [zkh].flatten.join(",")%>',
+      'dfs.support.append' => '<%=scope.function_hdp_template_var("hdfs_support_append")%>',
+      'dfs.client.read.shortcircuit' => '<%=scope.function_hdp_template_var("hdfs_enable_shortcircuit_read")%>',
+      'dfs.client.read.shortcircuit.skip.checksum' => '<%=scope.function_hdp_template_var("hdfs_enable_shortcircuit_skipchecksum")%>',}
+      }
+
+  configgenerator::configfile{'hbase-policy.xml.erb': 
+    module => 'hdp-hbase',
+    properties => {'security.client.protocol.acl' => '*',
+      'security.admin.protocol.acl' => '*',
+      'security.masterregion.protocol.acl' => '*',}
+      }
+
+  anchor{'hdp-hbase::begin':}
+  anchor{'hdp-hbase::end':}
+
+  if ($service_state == 'uninstalled') {
+    hdp::package { 'hbase':
+      ensure => 'uninstalled'
+    }
+    hdp::directory { $config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
+
+  } else {  
+    hdp::package { 'hbase': }
+  
+    hdp::user{ $hbase_user:}
+ 
+    hdp::directory { $config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+   hdp-hbase::configfile { ['hbase-env.sh','hbase-site.xml','hbase-policy.xml','log4j.properties','hadoop-metrics.properties']: 
+      type => $type
+    }
+    hdp-hbase::configfile { 'regionservers':}
+    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::User[$hbase_user] -> Hdp::Directory[$config_dir] -> 
+    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
+  }
+}
+
+### config files
+define hdp-hbase::configfile(
+  $mode = undef,
+  $hbase_master_host = undef,
+  $template_tag = undef,
+  $type = undef,
+) 
+{
+  if ($name == 'hadoop-metrics.properties') {
+    if ($type == 'master') {
+    $tag = GANGLIA-MASTER
+  } else {
+     $tag = GANGLIA-RS
+  }
+   } else {
+    $tag = $template_tag
+}
+  hdp::configfile { "${hdp-hbase::params::conf_dir}/${name}":
+    component         => 'hbase',
+    owner             => $hdp-hbase::params::hbase_user,
+    mode              => $mode,
+    hbase_master_host => $hbase_master_host,
+    template_tag      => $tag
+  }
+}

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp

@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::master-conn($hbase_master_host)
+{
+  Hdp-Hbase::Configfile<||>{hbase_master_host => $hbase_master_host}
+}

+ 66 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp

@@ -0,0 +1,66 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::master(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hbase::params 
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp::params::service_exists['hdp-hbase::master'] = true
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+       $masterHost = $kerberos_adminclient_host[0]
+       hdp::download_keytab { 'hbase_master_service_keytab' :
+         masterhost => $masterHost,
+         keytabdst => "${$keytab_path}/hm.service.keytab",
+         keytabfile => 'hm.service.keytab',
+         owner => $hdp::params::hbase_user
+       }
+    }
+  
+    #adds package, users, directories, and common configs
+    class { 'hdp-hbase': 
+      type          => 'master',
+      service_state => $service_state
+    }
+
+    Hdp-hbase::Configfile<||>{hbase_master_host => $hdp::params::host_address}
+  
+    hdp-hbase::service{ 'master':
+      ensure => $service_state
+    }
+
+    #top level does not need anchors
+    Class['hdp-hbase'] -> Hdp-hbase::Service['master'] 
+    } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp-hbase::master::enable-ganglia()
+{
+  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-MASTER'}
+}
+

+ 86 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp

@@ -0,0 +1,86 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::params() inherits hdp::params 
+{
+  
+  ####### users
+  $hbase_user = $hdp::params::hbase_user
+  
+  ### hbase-env
+  $hadoop_conf_dir = hdp_default("hadoop/hbase-env/hadoop_conf_dir")
+  $conf_dir = $hdp::params::hbase_conf_dir
+
+  $hbase_log_dir = hdp_default("hadoop/hbase-env/hbase_log_dir","/var/log/hbase")
+
+  $hbase_master_heapsize = hdp_default("hadoop/hbase-env/hbase_master_heapsize","1000m")
+
+  $hbase_pid_dir = hdp_default("hadoop/hbase-env/hbase_pid_dir","/var/run/hbase")
+
+  $hbase_regionserver_heapsize = hdp_default("hadoop/hbase-env/hbase_regionserver_heapsize","1000m")
+
+  $hbase_regionserver_xmn_size = hdp_calc_xmn_from_xms("$hbase_regionserver_heapsize","0.2","512")
+
+  ### hbase-site.xml
+  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
+
+  $hbase_tmp_dir = hdp_default("hadoop/hbase-site/hbase_tmp_dir","$hbase_log_dir")
+
+
+  #TODO: check if any of these 'hdfs' vars need to be euated with vars in hdp-hadoop
+  $hdfs_enable_shortcircuit_read = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_read",true)
+
+  $hdfs_enable_shortcircuit_skipchecksum = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_skipchecksum","false")
+
+  $hdfs_support_append = hdp_default("hadoop/hbase-site/hdfs_support_append",true)
+
+  $hfile_blockcache_size = hdp_default("hadoop/hbase-site/hfile_blockcache_size","0.25")
+
+  $hfile_max_keyvalue_size = hdp_default("hadoop/hbase-site/hfile_max_keyvalue_size",10485760)
+
+  $zookeeper_sessiontimeout = hdp_default("hadoop/hbase-site/zookeeper_sessiontimeout",60000)
+
+  $client_scannercaching = hdp_default("hadoop/hbase-site/client_scannercaching",100)
+
+  $hstore_blockingstorefiles = hdp_default("hadoop/hbase-site/hstore_blockingstorefiles",7)
+
+  $hstore_compactionthreshold = hdp_default("hadoop/hbase-site/hstore_compactionthreshold",3)
+
+  $hstorefile_maxsize = hdp_default("hadoop/hbase-site/hstorefile_maxsize",1073741824)
+
+  $hregion_blockmultiplier = hdp_default("hadoop/hbase-site/hregion_blockmultiplier",2)
+
+  $hregion_memstoreflushsize = hdp_default("hadoop/hbase-site/hregion_memstoreflushsize",134217728)
+
+  $regionserver_handlers = hdp_default("hadoop/hbase-site/regionserver_handlers", 30)
+
+  $hregion_majorcompaction = hdp_default("hadoop/hbase-site/hregion_majorcompaction", 86400000)
+
+  $preloaded_mastercoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_mastercoprocessor_classes")
+
+  $preloaded_regioncoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_regioncoprocessor_classes")
+
+  $regionserver_memstore_lab = hdp_default("hadoop/hbase-site/regionserver_memstore_lab",true)
+
+  $regionserver_memstore_lowerlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_lowerlimit","0.35")
+
+  $regionserver_memstore_upperlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_upperlimit","0.4")
+
+}

+ 73 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp

@@ -0,0 +1,73 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::regionserver(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hbase::params
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp::params::service_exists['hdp-hbase::regionserver'] = true       
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+       $masterHost = $kerberos_adminclient_host[0]
+       hdp::download_keytab { 'hbase_rs_service_keytab' :
+         masterhost => $masterHost,
+         keytabdst => "${$keytab_path}/rs.service.keytab",
+         keytabfile => 'rs.service.keytab',
+         owner => $hdp::params::hbase_user
+       }
+    }
+
+    if ($hdp::params::service_exists['hdp-hbase::master'] != true) {
+      #adds package, users, directories, and common configs
+      class { 'hdp-hbase': 
+        type          => 'regionserver',
+        service_state => $service_state
+      } 
+      $create_pid_dir = true
+      $create_log_dir = true
+    } else {
+      $create_pid_dir = false
+      $create_log_dir = false
+    }
+
+
+    hdp-hbase::service{ 'regionserver':
+      ensure         => $service_state,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+
+    #top level does not need anchors
+    Class['hdp-hbase'] ->  Hdp-hbase::Service['regionserver']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp-hbase::regionserver::enable-ganglia()
+{
+  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-RS'}
+}

+ 76 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp

@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp-hbase::service(
+  $ensure = 'running',
+  $create_pid_dir = true,
+  $create_log_dir = true,
+  $initial_wait = undef)
+{
+  include hdp-hbase::params
+
+  $role = $name
+  $user = $hdp-hbase::params::hbase_user
+
+  $conf_dir = $hdp::params::hbase_conf_dir
+  $hbase_daemon = $hdp::params::hbase_daemon_script
+  $cmd = "$hbase_daemon --config ${conf_dir}"
+  $pid_dir = $hdp-hbase::params::hbase_pid_dir
+  $pid_file = "${pid_dir}/hbase-hbase-${role}.pid"
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start ${role}'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${role}'"
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+
+  $tag = "hbase_service-${name}"
+  
+  if ($create_pid_dir == true) {
+    hdp::directory_recursive_create { $pid_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+  if ($create_log_dir == true) {
+    hdp::directory_recursive_create { $hdp-hbase::params::hbase_log_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+
+  anchor{"hdp-hbase::service::${name}::begin":} -> Hdp::Directory_recursive_create<|tag == $tag|> -> anchor{"hdp-hbase::service::${name}::end":}
+  if ($daemon_cmd != undef) { 
+    hdp::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $no_op_test,
+      initial_wait => $initial_wait
+    }
+    Hdp::Directory_recursive_create<|context_tag == 'hbase_service'|> -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hbase::service::${name}::end"]
+  }
+}

+ 26 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp

@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hbase::zk-conn(
+ $zookeeper_hosts
+)
+{
+  Hdp::Configfile<||>{zookeeper_hosts => $zookeeper_hosts}
+}

+ 50 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb

@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663

+ 50 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb

@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660

+ 50 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb

@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663

+ 76 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb

@@ -0,0 +1,76 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-<%=scope.function_hdp_template_var("hbase_conf_dir")%>}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}:<%=scope.function_hdp_template_var("::hdp-hadoop::params::conf_dir")%>
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx<%=scope.function_hdp_template_var("hbase_master_heapsize")%>"
+export HBASE_REGIONSERVER_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseParNewGC -Xmn<%=scope.function_hdp_template_var("hbase_regionserver_xmn_size")%> -XX:CMSInitiatingOccupancyFraction=80 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/hs_err_pid%p.log"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=<%=scope.function_hdp_template_var("hbase_log_dir")%>
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR=<%=scope.function_hdp_template_var("hbase_pid_dir")%>
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false

+ 80 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/log4j.properties.erb

@@ -0,0 +1,80 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO

+ 3 - 0
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb

@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("hbase_rs_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>

+ 23 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hcatSmoke.sh

@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+hcat -e 'show tables'
+hcat -e 'drop table IF EXISTS hcatsmoke'
+hcat -e 'create table hcatsmoke ( id INT, name string ) stored as rcfile ;'

+ 22 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/files/hiveSmoke.sh

@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+echo 'CREATE EXTERNAL TABLE IF NOT EXISTS hivesmoke ( foo INT, bar STRING );' | hive
+echo 'DESCRIBE hivesmoke;' | hive

+ 35 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/client.pp

@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::client(
+  $service_state = $hdp::params::cluster_client_state,
+  $hcat_server_host = undef
+) inherits hdp::params
+{ 
+  if ($service_state == 'no_op') {
+   } elsif ($service_state == 'installed_and_configured') {
+    include hdp-hcat #installs package, creates user, sets configuration
+    if ($hcat_server_host != undef) {
+      Hdp-Hcat::Configfile<||>{hcat_server_host => $hcat_server_host}
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 54 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hcat/service_check.pp

@@ -0,0 +1,54 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::hcat::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hcatsmoke"
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hcat::hcat::service_check::begin':}
+
+  file { '/tmp/hcatSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hcat/hcatSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hcatSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'sh /tmp/hcatSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hcatSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hcat::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hcatSmoke.sh'],
+    before      => Anchor['hdp-hcat::hcat::service_check::end'] 
+  }
+  
+  anchor{ 'hdp-hcat::hcat::service_check::end':}
+}

+ 54 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/hive/service_check.pp

@@ -0,0 +1,54 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::hive::service_check() 
+{
+  $smoke_test_user = $hdp::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hivesmoke"
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hcat::hive::service_check::begin':}
+
+  file { '/tmp/hiveSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hcat/hiveSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hiveSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'sh /tmp/hiveSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hiveSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hive::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hiveSmoke.sh'],
+    before      => Anchor['hdp-hcat::hive::service_check::end'] 
+  }
+  
+  anchor{ 'hdp-hcat::hive::service_check::end':}
+}

+ 70 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp

@@ -0,0 +1,70 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat(
+  $server = false
+) 
+{
+  include hdp-hcat::params
+  include manifestloader
+
+#Configs generation  
+
+  configgenerator::configfile{'hive-site.xml': 
+    module => 'hdp-hcat-old',
+    properties => $manifestloader::hdp_hcat_old_hive_site_props
+  }
+
+  $hcat_user = $hdp::params::hcat_user
+  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
+ 
+  hdp::package { 'hcat-base' : }
+  if ($server == true ) {
+    hdp::package { 'hcat-server':} 
+    class { 'hdp-hcat::mysql-connector': }
+  }
+  
+  hdp::user{ $hcat_user:}
+  
+  hdp::directory { $hcat_config_dir: }
+
+  hdp-hcat::configfile { ['hcat-env.sh','hive-env.sh','hive-site.xml']: }
+  
+  anchor { 'hdp-hcat::begin': } -> Hdp::Package['hcat-base'] -> Hdp::User[$hcat_user] -> 
+   Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> ->  anchor { 'hdp-hcat::end': }
+
+   if ($server == true ) {
+     Hdp::Package['hcat-base'] -> Hdp::Package['hcat-server'] ->  Hdp::User[$hcat_user] -> Class['hdp-hcat::mysql-connector'] -> Anchor['hdp-hcat::end']
+  }
+}
+
+### config files
+define hdp-hcat::configfile(
+  $mode = undef,
+  $hcat_server_host = undef
+) 
+{
+  hdp::configfile { "${hdp-hcat::params::hcat_conf_dir}/${name}":
+    component        => 'hcat',
+    owner            => $hdp::params::hcat_user,
+    mode             => $mode,
+    hcat_server_host => $hcat_server_host 
+  }
+}

+ 46 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/mysql-connector.pp

@@ -0,0 +1,46 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::mysql-connector()
+{
+  include hdp-hcat::params
+
+  $url = $hdp-hcat::params::mysql_connector_url
+  $zip_name = regsubst($url,'^.+/([^/]+$)','\1')
+  $jar_name = regsubst($zip_name,'zip$','-bin.jar')
+  $target = "${hdp::params::artifact_dir}/${zip_name}"
+  $hcat_lib = $hdp-hcat::params::hcat_lib
+  
+  exec{ "curl ${url}":
+    command => "mkdir -p ${artifact_dir} ; curl -f --retry 10 ${url} -o ${target} ",
+    creates => $target,
+    path    => ["/bin","/usr/bin/"]
+  }
+  exec{ "unzip ${target}":
+    command => "unzip -o -j ${target} '*.jar' -x */lib/*",
+    cwd     => $hcat_lib,
+    user    => $hdp::params::hcat_user,
+    group   => $hdp::params::hadoop_user_group,
+    creates => "${hcat_lib}/${$jar_name}",
+    path    => ["/bin","/usr/bin/"]
+  }
+
+  Exec["curl ${url}"] -> Exec["unzip ${target}"]
+}

+ 59 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/params.pp

@@ -0,0 +1,59 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::params() inherits hdp::params
+{
+
+  #TODO: will move to globals
+  $hcat_metastore_user_name = hdp_default("hadoop/hive-site/hcat_metastore_user_name","dbusername")
+  $hcat_metastore_user_passwd = hdp_default("hadoop/hive-site/hcat_metastore_user_passwd","dbpassword")
+ 
+ ####### users
+ 
+  
+  ### common
+  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
+  $hcat_lib = hdp_default("hcat_lib","/usr/share/hcatalog/lib") #TODO: should I remove and just use hcat_dbroot
+
+  ### hcat-env
+  $hcat_conf_dir = hdp_default("hadoop/hcat-env/hcat_conf_dir","/etc/hcatalog")
+
+  $hcat_dbroot = hdp_default("hadoop/hcat-env/hcat_dbroot",$hcat_lib)
+
+  $hcat_log_dir = hdp_default("hadoop/hcat-env/hcat_log_dir","/var/log/hcatalog")
+
+  $hcat_pid_dir = hdp_default("hadoop/hcat-env/hcat_pid_dir","/var/run/hcatalog")
+#  $hcat_pid_dir = "${hcat_piddirprefix}/${hdp::params::hcat_user}"
+  
+  ### hive-site
+  $hcat_database_name = hdp_default("hadoop/hive-site/hcat_database_name","hive")
+
+  $hcat_metastore_principal = hdp_default("hadoop/hive-site/hcat_metastore_principal")
+
+  $hcat_metastore_sasl_enabled = hdp_default("hadoop/hive-site/hcat_metastore_sasl_enabled","false")
+
+  #TODO: using instead hcat_server_host in hdp::params $hcat_metastore_server_host = hdp_default("hadoop/hive-site/hcat_metastore_server_host")
+
+  $keytab_path = hdp_default("hadoop/hive-site/keytab_path")
+  
+  ###mysql connector
+  $download_url = $hdp::params::gpl_artifacts_download_url
+  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
+}

+ 61 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/server.pp

@@ -0,0 +1,61 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-hcat::params
+{ 
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) { 
+    class{ 'hdp-hcat' : server => true} #installs package, creates user, sets configuration
+  
+    Hdp-Hcat::Configfile<||>{hcat_server_host => $hdp::params::host_address}
+
+    class { 'hdp-hcat::hdfs-directories' : 
+      service_state => $service_state
+    }
+
+    class { 'hdp-hcat::service' :
+      ensure => $service_state
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hcat'] -> Class['hdp-hcat::hdfs-directories'] -> Class['hdp-hcat::service']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+class hdp-hcat::hdfs-directories($service_state)
+{
+  $hcat_user = $hdp::params::hcat_user
+ 
+  hdp-hadoop::hdfs::directory{ '/apps/hive/warehouse':
+    service_state   => $service_state,
+    owner            => $hcat_user,
+    mode             => '770',
+    recursive_chmod  => true
+  }  
+  hdp-hadoop::hdfs::directory{ "/usr/${hcat_user}":
+    service_state => $service_state,
+    owner         => $hcat_user
+  }
+}

+ 65 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/service.pp

@@ -0,0 +1,65 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::service(
+  $ensure,
+  $initial_wait = undef
+)
+{
+  include $hdp-hcat::params
+  
+  $user = $hdp::params::hcat_user
+  $hadoop_home = $hdp::hadoop_home
+  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/hcat_server.sh"
+  $pid_file = "${hdp-hcat::params::hcat_pid_dir}/hcat.pid" 
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop'"
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+
+  hdp-hcat::service::directory { $hdp-hcat::params::hcat_pid_dir : }
+  hdp-hcat::service::directory { $hdp-hcat::params::hcat_log_dir : }
+
+  anchor{'hdp-hcat::service::begin':} -> Hdp-hcat::Service::Directory<||> -> anchor{'hdp-hcat::service::end':}
+  
+  if ($daemon_cmd != undef) {
+    hdp::exec { $daemon_cmd:
+      command => $daemon_cmd,
+      unless  => $no_op_test,
+      initial_wait => $initial_wait
+    }
+    Hdp-hcat::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-hcat::service::end']
+  }
+}
+
+define hdp-hcat::service::directory()
+{
+  hdp::directory_recursive_create { $name: 
+    owner => $hdp::params::hcat_user,
+    mode => '0755'
+  }
+}
+

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hcat-env.sh.erb

@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME=<%=scope.function_hdp_java_home()%>
+HCAT_PID_DIR=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/
+HCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
+HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
+USER=<%=scope.function_hdp_user("hcat_user")%>
+METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>

+ 53 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat-old/templates/hive-env.sh.erb

@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh

@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+hcat -e "show tables"
+hcat -e "drop table IF EXISTS ${tablename}"
+hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"

+ 24 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh

@@ -0,0 +1,24 @@
+/*
+ *
+ * licensed to the apache software foundation (asf) under one
+ * or more contributor license agreements.  see the notice file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  the asf licenses this file
+ * to you under the apache license, version 2.0 (the
+ * "license"); you may not use this file except in compliance
+ * with the license.  you may obtain a copy of the license at
+ *
+ *   http://www.apache.org/licenses/license-2.0
+ *
+ * unless required by applicable law or agreed to in writing,
+ * software distributed under the license is distributed on an
+ * "as is" basis, without warranties or conditions of any
+ * kind, either express or implied.  see the license for the
+ * specific language governing permissions and limitations
+ * under the license.
+ *
+ */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

+ 64 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp

@@ -0,0 +1,64 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::hcat::service_check() 
+{
+  include hdp-hcat::params
+  $unique = hdp_unique_id_and_date()
+  $smoke_test_user = $hdp::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hcatsmoke${unique}"
+  $security_enabled=$hdp::params::security_enabled
+  $smoke_user_keytab = "${hdp-hcat::params::keytab_path}/${smoke_test_user}.headless.keytab"
+
+  if ($security_enabled == true) {
+    $smoke_user_kinitcmd="/usr/kerberos/bin/kinit  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+  } else {
+    $smoke_user_kinitcmd=""
+  }
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hcat::hcat::service_check::begin':}
+
+  file { '/tmp/hcatSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hcat/hcatSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hcatSmoke.sh':
+    command   => "su - ${smoke_test_user} -c '${smoke_user_kinitcmd}sh /tmp/hcatSmoke.sh hcatsmoke${unique}'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hcatSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hcat::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hcatSmoke.sh'],
+    before      => Anchor['hdp-hcat::hcat::service_check::end'] 
+  }
+  
+  anchor{ 'hdp-hcat::hcat::service_check::end':}
+}

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp

@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat(
+  $service_state = $hdp::params::cluster_client_state
+) inherits hdp-hcat::params
+{
+  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
+
+  if ($hdp::params::use_32_bits_on_slaves == false) {
+    $size = 64
+  } else {
+    $size = 32
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+    hdp::package { 'hcat' :
+      ensure => 'uninstalled', 
+      size   => $size
+    }
+
+    hdp::directory { $hcat_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir]
+
+  } elsif ($service_state == 'installed_and_configured') {
+    hdp::package { 'hcat' : 
+      size => $size
+    }
+
+    hdp::directory { $hcat_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp-hcat::configfile { 'hcat-env.sh':}
+  
+    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> 
+ } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+### config files
+define hdp-hcat::configfile()
+{
+  hdp::configfile { "${hdp::params::hcat_conf_dir}/${name}":
+    component => 'hcat'
+  }
+}

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp

@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat::params() inherits hdp::params
+{
+  $hcat_conf_dir = $hdp::params::hcat_conf_dir
+
+  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
+  $hcat_lib = hdp_default("hcat_lib","/usr/lib/hcatalog/share/hcatalog") #TODO: should I remove and just use hcat_dbroot
+
+  ### hcat-env
+  $hcat_dbroot = hdp_default("hadoop/hcat-env/hcat_dbroot",$hcat_lib)
+
+  $hcat_log_dir = hdp_default("hadoop/hcat-env/hcat_log_dir","/var/log/hcatalog")
+
+  $hcat_pid_dir = hdp_default("hadoop/hcat-env/hcat_pid_dir","/var/run/hcatalog")
+
+  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
+}

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb

@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME=<%=scope.function_hdp_java_home()%>
+HCAT_PID_DIR=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/
+HCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
+HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
+USER=<%=scope.function_hdp_user("hcat_user")%>
+METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>

+ 23 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh

@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

+ 40 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp

@@ -0,0 +1,40 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::client(
+  $service_state = $hdp::params::cluster_client_state,
+  $hive_server_host = undef
+) inherits hdp::params
+{ 
+  if ($service_state == 'no_op') {
+   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    if ($hdp::params::service_exists['hdp-hive::server'] != true) {
+      #installs package, creates user, sets configuration
+      class { 'hdp-hive':
+        service_state => $service_state
+      } 
+      if ($hive_server_host != undef) {
+        Hdp-Hive::Configfile<||>{hive_server_host => $hive_server_host}
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 55 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp

@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::hive::service_check() 
+{
+  $unique = hdp_unique_id_and_date()
+  $smoke_test_user = $hdp::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp-hive::hive::service_check::begin':}
+
+  file { '/tmp/hiveSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp-hive/hiveSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hiveSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hiveSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp-hadoop::exec-hadoop { 'hive::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hiveSmoke.sh'],
+    before      => Anchor['hdp-hive::hive::service_check::end'] 
+  }
+  
+  anchor{ 'hdp-hive::hive::service_check::end':}
+}

+ 88 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp

@@ -0,0 +1,88 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive(
+  $service_state,
+  $server = false
+) 
+{
+  include hdp-hive::params
+  include manifestloader
+
+  $hive_user = $hdp-hive::params::hive_user
+  $hive_config_dir = $hdp-hive::params::hive_conf_dir
+
+  configgenerator::configfile{'hive-site.xml.erb': 
+    module => 'hdp-hive',
+    properties => $manifestloader::hdp_hive_hive_site_props
+  }
+
+  anchor { 'hdp-hive::begin': }
+  anchor { 'hdp-hive::end': } 
+
+  if ($service_state == 'uninstalled') {
+    hdp::package { 'hive' : 
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory { $hive_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::Directory[$hive_config_dir] ->  Anchor['hdp-hive::end']
+
+  } else {
+    hdp::package { 'hive' : }
+    if ($server == true ) {
+      class { 'hdp-hive::mysql-connector': }
+    }
+  
+    hdp::user{ $hive_user:}
+  
+    hdp::directory { $hive_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp-hive::configfile { ['hive-env.sh','hive-site.xml']: }
+  
+    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::User[$hive_user] ->  
+     Hdp::Directory[$hive_config_dir] -> Hdp-hive::Configfile<||> ->  Anchor['hdp-hive::end']
+
+     if ($server == true ) {
+       Hdp::Package['hive'] -> Hdp::User[$hive_user] -> Class['hdp-hive::mysql-connector'] -> Anchor['hdp-hive::end']
+    }
+  }
+}
+
+### config files
+define hdp-hive::configfile(
+  $mode = undef,
+  $hive_server_host = undef
+) 
+{
+  hdp::configfile { "${hdp-hive::params::hive_conf_dir}/${name}":
+    component        => 'hive',
+    owner            => $hdp-hive::params::hive_user,
+    mode             => $mode,
+    hive_server_host => $hive_server_host 
+  }
+}

+ 45 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp

@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::mysql-connector()
+{
+  include hdp-hive::params
+
+  $hive_lib = $hdp-hive::params::hive_lib
+  $target = "${hive_lib}/mysql-connector-java.jar"
+  
+  anchor { 'hdp-hive::mysql-connector::begin':}
+
+   hdp::package { 'mysql-connector' :
+     require   => Anchor['hdp-hive::mysql-connector::begin']
+   }
+
+   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}':
+       command => "mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}",
+       unless  => "test -f ${target}",
+       creates => $target,
+       path    => ["/bin","/usr/bin/"],
+       require => Hdp::Package['mysql-connector'],
+       notify  =>  Anchor['hdp-hive::mysql-connector::end'],
+   }
+
+   anchor { 'hdp-hive::mysql-connector::end':}
+
+}

+ 62 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp

@@ -0,0 +1,62 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::params() inherits hdp::params
+{
+
+  #TODO: will move to globals
+  $hive_metastore_user_name = hdp_default("hadoop/hive-site/hive_metastore_user_name","dbusername")
+  $hive_metastore_user_passwd = hdp_default("hadoop/hive-site/hive_metastore_user_passwd","dbpassword")
+
+  ### users
+  $hive_user = $hdp::params::hive_user 
+
+  ### common
+  $hive_metastore_port = hdp_default("hive_metastore_port",9083)
+  $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
+
+  ### hive-env
+  $hive_conf_dir = $hdp::params::hive_conf_dir
+
+  $hive_dbroot = hdp_default("hadoop/hive-env/hive_dbroot",$hive_lib)
+
+  $hive_log_dir = hdp_default("hadoop/hive-env/hive_log_dir","/var/log/hive")
+
+  $hive_pid_dir = hdp_default("hadoop/hive-env/hive_pid_dir","/var/run/hive")
+  
+  ### hive-site
+  $hive_database_name = hdp_default("hadoop/hive-site/hive_database_name","hive")
+
+  if ($hdp::params::security_enabled == true) {
+    $hive_metastore_sasl_enabled = "true"
+  } else {
+    $hive_metastore_sasl_enabled = "false"
+  }
+
+  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
+
+  #TODO: using instead hive_server_host in hdp::params 
+  #$hive_metastore_server_host = hdp_default("hadoop/hive-site/hive_metastore_server_host")
+  
+  ###mysql connector
+  $download_url = $hdp::params::gpl_artifacts_download_url
+  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
+  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-0.4.0.14.jar")
+}

+ 60 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp

@@ -0,0 +1,60 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-hive::params
+{ 
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+
+    $hdp::params::service_exists['hdp-hive::server'] = true
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'hive_server_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/hive.service.keytab",
+        keytabfile => 'hive.service.keytab',
+        owner => $hdp-hive::params::hive_user
+      }
+    }
+
+    #installs package, creates user, sets configuration
+    class{ 'hdp-hive' : 
+      service_state => $service_state,
+      server        => true
+    } 
+  
+    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
+
+    class { 'hdp-hive::service' :
+      ensure => $service_state
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hive'] -> Class['hdp-hive::service']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 74 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp

@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive::service(
+  $ensure
+)
+{
+  include $hdp-hive::params
+  
+  $user = $hdp-hive::params::hive_user
+  $hadoop_home = $hdp::params::hadoop_home
+  $hive_log_dir = $hdp-hive::params::hive_log_dir
+  $cmd = "env HADOOP_HOME=${hadoop_home} nohup hive --service metastore > ${hive_log_dir}/hive.out 2> ${hive_log_dir}/hive.log &"
+  $pid_file = "${hdp-hive::params::hive_pid_dir}/hive.pid" 
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} '"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    #TODO: this needs to be fixed
+    $daemon_cmd = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \" | awk '{print \$2}' | xargs kill >/dev/null 2>&1"
+    $no_op_test = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \""
+  } else {
+    $daemon_cmd = undef
+  }
+
+  hdp-hive::service::directory { $hdp-hive::params::hive_pid_dir : }
+  hdp-hive::service::directory { $hdp-hive::params::hive_log_dir : }
+
+  anchor{'hdp-hive::service::begin':} -> Hdp-hive::Service::Directory<||> -> anchor{'hdp-hive::service::end':}
+  
+  if ($daemon_cmd != undef) {
+    if ($ensure == 'running') {
+      hdp::exec { $daemon_cmd:
+        command => $daemon_cmd,
+        unless  => $no_op_test
+      }
+    } elsif ($ensure == 'stopped') {
+      hdp::exec { $daemon_cmd:
+        command => $daemon_cmd,
+        onlyif  => $no_op_test
+      }
+    }
+    Hdp-hive::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-hive::service::end']
+  }
+}
+
+define hdp-hive::service::directory()
+{
+  hdp::directory_recursive_create { $name: 
+    owner => $hdp-hive::params::hive_user,
+    mode => '0755',
+    service_state => $ensure,
+    force => true
+  }
+}
+

+ 54 - 0
ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb

@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("hive_conf_dir")%>
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH=<%=scope.function_hdp_template_var("hive_aux_jars_path")%>

+ 21 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb

@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'facter'
+Facter.add("kadm_keytab") do
+  setcode do
+     %x{[ -f /etc/kadm5.keytab ] && base64 </etc/kadm5.keytab 2>/dev/null} + "\n"
+  end
+end

+ 34 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb

@@ -0,0 +1,34 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+module Puppet::Parser::Functions
+  newfunction(:kerberos_keytabs_input, :type => :rvalue) do |args|
+    fqdn,node_components,keytab_map = args 
+    ndx_ret = Hash.new
+    node_components.each do |cmp|
+      if info = keytab_map[cmp]
+        keytab = info["keytab"]
+        ndx_ret[keytab] ||= {"keytab" => keytab, "principals" => info["primaries"].map{|p|"#{p}/#{fqdn}"}}
+      end
+    end
+    ndx_ret.values
+  end
+end

+ 140 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp

@@ -0,0 +1,140 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class hdp-kerberos::adminclient(
+  $service_state = $hdp::params::cluster_service_state
+) inherits hdp-kerberos::params
+{
+  import 'hdp'
+
+  $kadmin_pw = "bla123"
+  $kadmin_admin = "kadmin/admin"
+  $realm = $kerberos_domain
+  $krb_realm = $kerberos_domain
+  $hdp::params::service_exists['hdp-kerberos::adminclient'] = true
+  $krbContext = {}
+  $krbContext['kadmin_pw'] = $kadmin_pw
+  $krbContext['kadmin_admin'] = $kadmin_admin
+  $krbContext['realm' ] = $kerberos_domain
+  $krbContext['local_or_remote'] = 'remote'
+  $krbContext['principals_to_create'] = $principals_to_create
+  $krbContext['keytabs_to_create'] = $keytabs_to_create
+  $krbContext['principals_in_keytabs'] = $principals_in_keytabs
+
+  $kdc_server = $kdc_host
+
+  package { $package_name_client:
+    ensure => installed,
+  }
+  if ($hdp::params::service_exists['hdp-kerberos::server'] != true) {
+    file { "/etc/krb5.conf":
+      content => template('hdp-kerberos/krb5.conf'),
+      owner => "root",
+      group => "root",
+      mode => "0644",
+      require => Package[$package_name_client],
+    }
+  }
+ 
+  if ($create_principals_keytabs == "yes") {
+    notice("Creating principals and keytabs..")
+    hdp-kerberos::principals_and_keytabs::services { 'alphabeta': 
+      krb_context => $krbContext
+    }
+  }
+}
+
+
+define hdp-kerberos::principals_and_keytabs::services(
+  $krb_context
+)
+{
+  include hdp-kerberos::params
+  $principals_to_create = $krb_context[principals_to_create]
+  $keytabs_to_create = $krb_context[keytabs_to_create]
+
+  hdp-kerberos::principal {$principals_to_create:
+    krb_context => $krb_context,
+  }
+  
+  hdp-kerberos::keytab { $keytabs_to_create :
+    krb_context => $krb_context,
+    require => Hdp-kerberos::Principal[$principals_to_create]
+  }
+}
+
+define hdp-kerberos::keytab(
+  $krb_context,
+  $keytable_file_owner = undef,
+  $keytable_file_mode  = undef
+)
+{
+  include hdp-kerberos::params
+  $keytab = $name
+  $realm = $krb_context['realm']
+  $local_or_remote = $krb_context['local_or_remote']
+  $kadmin_pw = $krb_context['kadmin_pw']
+  $kadmin_admin = $krb_context['kadmin_admin']
+  $kadmin_cmd = "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
+  if ($local_or_remote == 'local') {
+    $kadmin_cmd = 'kadmin.local'
+  }
+  $principals_in_keytabs = $krb_context['principals_in_keytabs']
+
+  $principals = $principals_in_keytabs[$keytab]
+  $principals_list = inline_template("<%= principals.join(' ')%>")
+  $keytab_filename = $keytab
+
+  exec { "xst ${keytab}":
+    command => "rm -rf ${keytab_filename}; ${kadmin_cmd} -q 'xst -k ${keytab_filename} ${principals_list}'; chown puppet:apache ${keytab_filename}",
+    unless  => "klist -kt ${keytab_filename} 2>/dev/null | grep -q ' ${principals[0]}'", #TODO may make more robust test
+    path   => $hdp-kerberos::params::exec_path,
+  }
+
+  if (($keytable_file_owner != undef) or ($keytable_file_mode != undef)) {
+    file { $keytab_filename:
+      owner => $keytable_file_owner,
+      mode  => $keytable_file_mode,
+      require => Exec["xst ${keytab}"]
+    }
+  }
+}
+
+define hdp-kerberos::principal(
+  $krb_context
+)
+{
+  include hdp-kerberos::params
+  $realm = $krb_context['realm']
+  $local_or_remote = $krb_context['local_or_remote']
+  $kadmin_pw = $krb_context['kadmin_pw']
+  $kadmin_admin = $krb_context['kadmin_admin']
+  $kadmin_cmd =  "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
+  if ($local_or_remote == 'local') {
+    $kadmin_cmd = 'kadmin.local'
+  }
+  $principal = $name
+  exec { "addprinc ${principal}":
+    command => "${kadmin_cmd} -q 'addprinc -randkey ${principal}'",
+    unless => "${kadmin_cmd} -q listprincs | grep -q '^${principal}$'",
+    path => $hdp-kerberos::params::exec_path
+  }
+}

+ 217 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp

@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class kerberos {
+  class site {
+    # The following is our interface to the world. This is what we allow
+    # users to tweak from the outside (see tests/init.pp for a complete
+    # example) before instantiating target classes.
+    # Once we migrate to Puppet 2.6 we can potentially start using 
+    # parametrized classes instead.
+    $domain     = $kerberos_domain     ? { '' => inline_template('<%= domain %>'),
+                                           default => $kerberos_domain }
+    $realm      = $kerberos_realm      ? { '' => inline_template('<%= domain.upcase %>'),
+                                           default => $kerberos_realm } 
+    $kdc_server = $kerberos_kdc_server ? { '' => 'localhost',
+                                           default => $kerberos_kdc_server }
+    $kdc_port   = $kerberos_kdc_port   ? { '' => '88', 
+                                           default => $kerberos_kdc_port } 
+    $admin_port = 749 /* BUG: linux daemon packaging doesn't let us tweak this */
+
+    $keytab_export_dir = "/var/lib/bigtop_keytabs"
+
+    case $operatingsystem {
+        'ubuntu': {
+            $package_name_kdc    = 'krb5-kdc'
+            $service_name_kdc    = 'krb5-kdc'
+            $package_name_admin  = 'krb5-admin-server'
+            $service_name_admin  = 'krb5-admin-server'
+            $package_name_client = 'krb5-user'
+            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
+            $kdc_etc_path        = '/etc/krb5kdc/'
+        }
+        # default assumes CentOS, Redhat 5 series (just look at how random it all looks :-()
+        default: {
+            $package_name_kdc    = 'krb5-server'
+            $service_name_kdc    = 'krb5kdc'
+            $package_name_admin  = 'krb5-libs'
+            $service_name_admin  = 'kadmin'
+            $package_name_client = 'krb5-workstation'
+            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
+            $kdc_etc_path        = '/var/kerberos/krb5kdc/'
+        }
+    }
+
+    file { "/etc/krb5.conf":
+      content => template('kerberos/krb5.conf'),
+      owner => "root",
+      group => "root",
+      mode => "0644",
+    }
+
+    @file { $keytab_export_dir:
+      ensure => directory,
+      owner  => "root",
+      group  => "root",
+    }
+
+    # Required for SPNEGO
+    @principal { "HTTP": 
+
+    }
+  }
+
+  class kdc inherits kerberos::site {
+    package { $package_name_kdc:
+      ensure => installed,
+    }
+
+    file { $kdc_etc_path:
+    	ensure => directory,
+        owner => root,
+        group => root,
+        mode => "0700",
+        require => Package["$package_name_kdc"],
+    }
+    file { "${kdc_etc_path}/kdc.conf":
+      content => template('kerberos/kdc.conf'),
+      require => Package["$package_name_kdc"],
+      owner => "root",
+      group => "root",
+      mode => "0644",
+    }
+    file { "${kdc_etc_path}/kadm5.acl":
+      content => template('kerberos/kadm5.acl'),
+      require => Package["$package_name_kdc"],
+      owner => "root",
+      group => "root",
+      mode => "0644",
+    }
+
+    exec { "kdb5_util":
+      path => $exec_path,
+      command => "rm -f /etc/kadm5.keytab ; kdb5_util -P cthulhu -r ${realm} create -s && kadmin.local -q 'cpw -pw secure kadmin/admin'",
+      
+      creates => "${kdc_etc_path}/stash",
+
+      subscribe => File["${kdc_etc_path}/kdc.conf"],
+      # refreshonly => true, 
+
+      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]],
+    }
+
+    service { $service_name_kdc:
+      ensure => running,
+      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
+      subscribe => File["${kdc_etc_path}/kdc.conf"],
+      hasrestart => true,
+    }
+
+
+    class admin_server inherits kerberos::kdc {
+      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
+
+      package { "$package_name_admin":
+        ensure => installed,
+        require => Package["$package_name_kdc"],
+      } 
+  
+      service { "$service_name_admin":
+        ensure => running,
+        require => [Package["$package_name_admin"], Service["$service_name_kdc"]],
+        hasrestart => true,
+        restart => "${se_hack} ; service ${service_name_admin} restart",
+        start => "${se_hack} ; service ${service_name_admin} start",
+      }
+    }
+  }
+
+  class client inherits kerberos::site {
+    package { $package_name_client:
+      ensure => installed,
+    }
+  }
+
+  class server {
+    include kerberos::client
+
+    class { "kerberos::kdc": } 
+    ->
+    Class["kerberos::client"] 
+
+    class { "kerberos::kdc::admin_server": }
+    -> 
+    Class["kerberos::client"]
+  }
+
+  define principal {
+    require "kerberos::client"
+
+    realize(File[$kerberos::site::keytab_export_dir])
+
+    $principal = "$title/$::fqdn"
+    $keytab    = "$kerberos::site::keytab_export_dir/$title.keytab"
+
+    exec { "addprinc.$title":
+      path => $kerberos::site::exec_path,
+      command => "kadmin -w secure -p kadmin/admin -q 'addprinc -randkey $principal'",
+      unless => "kadmin -w secure -p kadmin/admin -q listprincs | grep -q $principal",
+      require => Package[$kerberos::site::package_name_client],
+    } 
+    ->
+    exec { "xst.$title":
+      path    => $kerberos::site::exec_path, 
+      command => "kadmin -w secure -p kadmin/admin -q 'xst -k $keytab $principal'",
+      unless  => "klist -kt $keytab 2>/dev/null | grep -q $principal",
+      require => File[$kerberos::site::keytab_export_dir],
+    }
+  }
+
+  define host_keytab($princs = undef, $spnego = disabled) {
+    $keytab = "/etc/$title.keytab"
+
+    $requested_princs = $princs ? { 
+      undef   => [ $title ],
+      default => $princs,
+    }
+
+    $internal_princs = $spnego ? {
+      /(true|enabled)/ => [ 'HTTP' ],
+      default          => [ ],
+    }
+    realize(Kerberos::Principal[$internal_princs])
+
+    $includes = inline_template("<%=
+      [requested_princs, internal_princs].flatten.map { |x|
+        \"rkt $kerberos::site::keytab_export_dir/#{x}.keytab\"
+      }.join(\"\n\")
+    %>")
+
+    kerberos::principal { $requested_princs:
+    }
+
+    exec { "ktinject.$title":
+      path     => $kerberos::site::exec_path,
+      command  => "/usr/bin/ktutil <<EOF
+        $includes
+        wkt $keytab
+EOF
+        chown $title $keytab",
+      creates => $keytab,
+      require => [ Kerberos::Principal[$requested_princs],
+                   Kerberos::Principal[$internal_princs] ],
+    }
+  }
+}

+ 50 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp

@@ -0,0 +1,50 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class hdp-kerberos::client(
+  $service_state = $hdp::params::cluster_service_state
+) inherits hdp-kerberos::params
+{
+  import 'hdp'
+
+  $hdp::params::service_exists['hdp-kerberos::client'] = true
+
+  $kdc_server = $kdc_host
+  $krb_realm = $kerberos_domain
+  $realm = $kerberos_domain
+
+  if ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true)  {
+    package { $package_name_client:
+      ensure => installed,
+    }
+  }
+
+  if (($hdp::params::service_exists['hdp-kerberos::server'] != true) and
+      ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true) ) {
+    file { "/etc/krb5.conf":
+      content => template('hdp-kerberos/krb5.conf'),
+      owner => "root",
+      group => "root",
+      mode => "0644",
+      require => Package[$package_name_client],
+    }
+  }
+}

+ 25 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp

@@ -0,0 +1,25 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class hdp-kerberos()
+{
+}
+

+ 70 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp

@@ -0,0 +1,70 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class hdp-kerberos::params(
+) inherits hdp::params
+{
+  $domain  = 'hadoop.com'
+  $realm = inline_template('<%= @domain.upcase %>')
+  $kdc_server = $::fqdn
+  $kdc_port = 88
+  $keytab_export_base_dir = '/etc/security/'
+  $keytab_export_dir = "${keytab_export_base_dir}/keytabs"
+
+  $keytab_map = {
+    'hdp-hadoop::namenode' =>  
+      {keytab    => 'nn.service.keytab',
+       primaries => ['nn', 'host', 'HTTP']},
+    'hdp-hadoop::snamenode' =>  
+      {keytab    => 'nn.service.keytab',
+       primaries => ['nn', 'host', 'HTTP']},
+    'hdp-hadoop::datanode' =>  
+      {keytab    => 'dn.service.keytab',
+       primaries => ['dn']},
+    'hdp-hadoop::jobtracker' =>  
+      {keytab    => 'jt.service.keytab',
+       primaries => ['jt']},
+    'hdp-hadoop::tasktracker' =>  
+      {keytab    => 'tt.service.keytab',
+       primaries => ['tt']}
+  }
+
+  case $::operatingsystem {
+    'ubuntu': {
+      $package_name_kdc    = 'krb5-kdc'
+      $service_name_kdc    = 'krb5-kdc'
+      $package_name_admin  = 'krb5-admin-server'
+      $service_name_admin  = 'krb5-admin-server'
+      $package_name_client = 'krb5-user'
+      $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
+      $kdc_etc_path        = '/etc/krb5kdc/'
+     }
+     default: {
+       $package_name_kdc    = 'krb5-server'
+       $service_name_kdc    = 'krb5kdc'
+       $package_name_admin  = 'krb5-libs'
+       $service_name_admin  = 'kadmin'
+       $package_name_client = 'krb5-workstation' 
+       $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
+       $kdc_etc_path        = '/var/kerberos/krb5kdc/'
+    }
+  }
+}

+ 116 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp

@@ -0,0 +1,116 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+class hdp-kerberos::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-kerberos::params
+{ 
+  import 'hdp'
+
+  $hdp::params::service_exists['hdp-kerberos::server'] = true
+
+  $krb_realm = $kerberos_domain
+  $kadmin_pw = "bla123"
+  $kadmin_admin = "kadmin/admin"
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    # Install kdc server and client
+    package { $package_name_kdc:
+      ensure => installed
+    }
+
+    # set the realm
+    $realm = $krb_realm
+    # SUHAS: This should be set on all the nodes in addition to kdc server
+    file { "/etc/krb5.conf":
+      content => template('hdp-kerberos/krb5.conf'),
+      owner => "root",
+      group => "root",
+      mode => "0644",
+      require => Package[$package_name_kdc],
+      }
+
+    file { $kdc_etc_path:
+      ensure => directory,
+      owner => root,
+      group => root,
+      mode => "0700",
+      require => Package[$package_name_kdc],
+    }
+
+    file { "${kdc_etc_path}/kdc.conf":
+      content => template('hdp-kerberos/kdc.conf'),
+      require => Package["$package_name_kdc"],
+      owner => "root",
+      group => "root",
+      mode => "0644",
+    }
+
+    # SUHAS: kadm5.acl file template is missing in gsInsaller
+    # SUHAS: gsInstaller stops stopIptables at this point (sequence is not relevant here).
+    file { "${kdc_etc_path}/kadm5.acl":
+      content => template('hdp-kerberos/kadm5.acl'),
+      require => Package["$package_name_kdc"],
+      owner => "root",
+      group => "root",
+      mode => "0644",
+    }
+
+    exec { "kdb5_util":
+      path => $exec_path,
+      command => "rm -f ${kdc_etc_path}/kadm5.keytab; kdb5_util -P x86yzh12 -r ${realm} create -s && kadmin.local -q 'cpw -pw ${kadmin_pw} ${kadmin_admin}'",
+      creates => "${kdc_etc_path}/stash",
+      subscribe => File["${kdc_etc_path}/kdc.conf"],
+      require => [Package[$package_name_kdc], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]]
+    }
+
+    # SUHAS: gsInstaller has checkconfig_on
+    exec { "chkconfig_krb5kdc_on":
+      path => $exec_path,
+      command => "chkconfig krb5kdc on",
+      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
+    }
+    
+    # Start KDC Server
+    if ($service_state in ['running','stopped']) {
+      service { $service_name_kdc:
+        ensure => $service_state,
+        require => [Exec["chkconfig_krb5kdc_on"]],
+        subscribe => File["${kdc_etc_path}/kdc.conf"],
+        hasrestart => true,
+      }
+
+      # SUHAS: This is to be done on HMC not KDC Server??
+      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
+      service { $service_name_admin:
+        ensure => $service_state,
+        require => Service[$service_name_kdc],
+        hasrestart => true,
+        restart => "${se_hack} ; service ${service_name_admin} restart",
+        start => "${se_hack} ; service ${service_name_admin} start",
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 21 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl

@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file Is the access control list for krb5 administration.
+# When this file is edited run /etc/init.d/krb5-admin-server restart to activate
+# One common way to set up Kerberos administration is to allow any principal 
+# ending in /admin  is given full administrative rights.
+# To enable this, uncomment the following line:
+*/admin *

+ 36 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf

@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+default_realm = <%= realm %>
+
+[kdcdefaults]
+    v4_mode = nopreauth
+    kdc_ports = 0
+    kdc_tcp_ports = 88 
+
+[realms]
+    <%= realm %> = {
+        acl_file = <%= kdc_etc_path %>/kadm5.acl
+        dict_file = /usr/share/dict/words
+        admin_keytab = <%= kdc_etc_path %>/kadm5.keytab
+        supported_enctypes = des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal des-cbc-crc:v4 des-cbc-crc:afs3
+        kdc_ports = <%= kdc_port %>
+        database_name = <%= kdc_etc_path %>/principal
+        key_stash_file = <%= kdc_etc_path %>/stash
+        max_life = 10h 0m 0s
+        max_renewable_life = 7d 0h 0m 0s
+        master_key_type = des3-hmac-sha1
+        default_principal_flags = +preauth
+    }

+ 47 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf

@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[libdefaults]
+    default_realm = <%= realm %>
+    dns_lookup_realm = false
+    dns_lookup_kdc = false
+    ticket_lifetime = 24h
+    forwardable = yes
+    udp_preference_limit = 1
+
+[realms]
+    <%= realm %> = {
+        kdc = <%= kdc_server %>:<%= kdc_port %>
+        admin_server = <%= kdc_server %>:749
+        default_domain = <%= domain %>
+    }
+
+[appdefaults] 
+    pam = {
+        debug = false 
+        ticket_lifetime = 36000 
+        renew_lifetime = 36000 
+        forwardable = true 
+        krb4_convert = false 
+    }
+
+[domain_realm]
+    .<%= domain %> = <%= realm %>
+     <%= domain %> = <%= realm %>
+
+[logging]
+    default = FILE:/var/log/krb5libs.log
+    kdc = FILE:/var/log/krb5kdc.log
+    admin_server = FILE:/var/log/kadmind.log

+ 31 - 0
ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp

@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+$kerberos_domain = "krb.test.com"
+$kerberos_realm = "KRB.TEST.COM"
+$kerberos_kdc_server = "localhost"
+$kerberos_kdc_port = 88
+# the following turns a node into a fully functional KDC 
+include kerberos::kdc
+# the following opens up KDC principle datbase for remote
+# administration (it really should be optional, but it is
+# required for now in order to make kerberos::client::host_keytab
+# work)
+include kerberos::kdc::admin_server
+
+# the following turns a node into a Kerberos client hosts with.. 
+include kerberos::client
+# ...an optional host_keytab for as many services as you want:
+kerberos::client::host_keytab { ["host", "hdfs", "mapred"]: }

+ 43 - 0
ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp

@@ -0,0 +1,43 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-monitor-webserver( 
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp::params 
+{
+  #TODO: does not install apache package
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    if ($service_state == 'running') {
+      #TODO: refine by using notify/subscribe
+      hdp::exec { 'monitor webserver start':
+        command => '/etc/init.d/httpd start',
+        unless => '/etc/init.d/httpd status'
+      } 
+    } elsif ($service_state == 'stopped') {
+      package { 'httpd':
+        ensure => 'stopped'
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 30 - 0
ambari-agent/src/main/puppet/modules/hdp-mysql/files/startMysql.sh

@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldbuser=$1
+mysqldbpasswd=$2
+mysqldbhost=$3
+
+echo "Adding user $mysqldbuser@$mysqldbhost"
+echo "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';" | mysql -u root
+echo "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';" | mysql -u root
+echo "flush privileges;" | mysql -u root

+ 22 - 0
ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp

@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-mysql(){}
+

+ 26 - 0
ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp

@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-mysql::params() inherits hdp-hive::params
+{
+   $db_name = "$hdp-hive::params::hive_database_name"
+   $db_user = $hdp-hive::params::hive_metastore_user_name
+   $db_pw = $hdp-hive::params::hive_metastore_user_passwd
+}

+ 71 - 0
ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp

@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-mysql::server(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp-mysql::params
+{ 
+  if ($service_state in ['no_op','uninstalled']) {
+   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+   
+    $db_user = $hdp-mysql::params::db_user
+    $db_pw = $hdp-mysql::params::db_pw
+    $db_name = $hdp-mysql::params::db_name
+    $host = $hdp::params::hive_mysql_host 
+
+    anchor { 'hdp-mysql::server::begin':}
+
+    hdp::package { 'mysql' :
+      size   => 32,
+      require   => Anchor['hdp-mysql::server::begin']
+    }
+
+    hdp::exec { 'mysqld start':
+        command => '/etc/init.d/mysqld start',
+        unless  => '/etc/init.d/mysqld status',
+        require => Hdp::Package['mysql'],
+        notify  => File['/tmp/startMysql.sh']
+    }
+
+    file { '/tmp/startMysql.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp-mysql/startMysql.sh",
+      mode => '0755',
+      require => Hdp::Exec['mysqld start'],
+      notify => Exec['/tmp/startMysql.sh']
+    }
+
+    exec { '/tmp/startMysql.sh':
+      command   => "sh /tmp/startMysql.sh ${db_user} ${db_pw} ${host}",
+      tries     => 3,
+      try_sleep => 5,
+      require   => File['/tmp/startMysql.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      notify   => Anchor['hdp-mysql::server::end'],
+      logoutput => "true"
+    }
+
+    anchor { 'hdp-mysql::server::end':}
+
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}

+ 243 - 0
ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php

@@ -0,0 +1,243 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>

+ 114 - 0
ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl

@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;

+ 63 - 0
ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php

@@ -0,0 +1,63 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-DS-*");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff